aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 21:07:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 21:07:07 -0500
commit6be35c700f742e911ecedd07fcc43d4439922334 (patch)
treeca9f37214d204465fcc2d79c82efd291e357c53c /drivers
parente37aa63e87bd581f9be5555ed0ba83f5295c92fc (diff)
parent520dfe3a3645257bf83660f672c47f8558f3d4c4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: 1) Allow to dump, monitor, and change the bridge multicast database using netlink. From Cong Wang. 2) RFC 5961 TCP blind data injection attack mitigation, from Eric Dumazet. 3) Networking user namespace support from Eric W. Biederman. 4) tuntap/virtio-net multiqueue support by Jason Wang. 5) Support for checksum offload of encapsulated packets (basically, tunneled traffic can still be checksummed by HW). From Joseph Gasparakis. 6) Allow BPF filter access to VLAN tags, from Eric Dumazet and Daniel Borkmann. 7) Bridge port parameters over netlink and BPDU blocking support from Stephen Hemminger. 8) Improve data access patterns during inet socket demux by rearranging socket layout, from Eric Dumazet. 9) TIPC protocol updates and cleanups from Ying Xue, Paul Gortmaker, and Jon Maloy. 10) Update TCP socket hash sizing to be more in line with current day realities. The existing heurstics were choosen a decade ago. From Eric Dumazet. 11) Fix races, queue bloat, and excessive wakeups in ATM and associated drivers, from Krzysztof Mazur and David Woodhouse. 12) Support DOVE (Distributed Overlay Virtual Ethernet) extensions in VXLAN driver, from David Stevens. 13) Add "oops_only" mode to netconsole, from Amerigo Wang. 14) Support set and query of VEB/VEPA bridge mode via PF_BRIDGE, also allow DCB netlink to work on namespaces other than the initial namespace. From John Fastabend. 15) Support PTP in the Tigon3 driver, from Matt Carlson. 16) tun/vhost zero copy fixes and improvements, plus turn it on by default, from Michael S. Tsirkin. 17) Support per-association statistics in SCTP, from Michele Baldessari. And many, many, driver updates, cleanups, and improvements. Too numerous to mention individually. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1722 commits) net/mlx4_en: Add support for destination MAC in steering rules net/mlx4_en: Use generic etherdevice.h functions. net: ethtool: Add destination MAC address to flow steering API bridge: add support of adding and deleting mdb entries bridge: notify mdb changes via netlink ndisc: Unexport ndisc_{build,send}_skb(). uapi: add missing netconf.h to export list pkt_sched: avoid requeues if possible solos-pci: fix double-free of TX skb in DMA mode bnx2: Fix accidental reversions. bna: Driver Version Updated to 3.1.2.1 bna: Firmware update bna: Add RX State bna: Rx Page Based Allocation bna: TX Intr Coalescing Fix bna: Tx and Rx Optimizations bna: Code Cleanup and Enhancements ath9k: check pdata variable before dereferencing it ath5k: RX timestamp is reported at end of frame ath9k_htc: RX timestamp is reported at end of frame ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/solos-pci.c88
-rw-r--r--drivers/bcma/bcma_private.h6
-rw-r--r--drivers/bcma/driver_chipcommon.c135
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c3
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c44
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c35
-rw-r--r--drivers/bcma/driver_mips.c52
-rw-r--r--drivers/bcma/driver_pci_host.c24
-rw-r--r--drivers/bcma/host_pci.c6
-rw-r--r--drivers/bcma/main.c62
-rw-r--r--drivers/bcma/sprom.c5
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c28
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/dma/ioat/dca.c23
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c2
-rw-r--r--drivers/isdn/hisax/callc.c2
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c5
-rw-r--r--drivers/isdn/mISDN/tei.c20
-rw-r--r--drivers/isdn/pcbit/layer2.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c6
-rw-r--r--drivers/net/bonding/bond_alb.c197
-rw-r--r--drivers/net/bonding/bond_alb.h28
-rw-r--r--drivers/net/bonding/bond_debugfs.c5
-rw-r--r--drivers/net/bonding/bond_main.c19
-rw-r--r--drivers/net/bonding/bonding.h13
-rw-r--r--drivers/net/can/Kconfig9
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c9
-rw-r--r--drivers/net/can/bfin_can.c7
-rw-r--r--drivers/net/can/c_can/c_can.c12
-rw-r--r--drivers/net/can/c_can/c_can.h3
-rw-r--r--drivers/net/can/c_can/c_can_pci.c8
-rw-r--r--drivers/net/can/c_can/c_can_platform.c36
-rw-r--r--drivers/net/can/cc770/cc770_isa.c18
-rw-r--r--drivers/net/can/cc770/cc770_platform.c18
-rw-r--r--drivers/net/can/dev.c3
-rw-r--r--drivers/net/can/flexcan.c12
-rw-r--r--drivers/net/can/grcan.c1756
-rw-r--r--drivers/net/can/janz-ican3.c28
-rw-r--r--drivers/net/can/mcp251x.c6
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c37
-rw-r--r--drivers/net/can/mscan/mscan.c8
-rw-r--r--drivers/net/can/mscan/mscan.h1
-rw-r--r--drivers/net/can/pch_can.c6
-rw-r--r--drivers/net/can/sja1000/Kconfig3
-rw-r--r--drivers/net/can/sja1000/ems_pci.c4
-rw-r--r--drivers/net/can/sja1000/ems_pcmcia.c5
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c8
-rw-r--r--drivers/net/can/sja1000/peak_pci.c7
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c39
-rw-r--r--drivers/net/can/sja1000/sja1000.c8
-rw-r--r--drivers/net/can/sja1000/sja1000.h1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c16
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c14
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c1
-rw-r--r--drivers/net/can/sja1000/tscan1.c8
-rw-r--r--drivers/net/can/softing/softing_cs.c11
-rw-r--r--drivers/net/can/softing/softing_main.c14
-rw-r--r--drivers/net/can/ti_hecc.c5
-rw-r--r--drivers/net/can/usb/Kconfig29
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c7
-rw-r--r--drivers/net/can/usb/esd_usb2.c45
-rw-r--r--drivers/net/can/usb/kvaser_usb.c1627
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c5
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h1
-rw-r--r--drivers/net/dsa/Kconfig5
-rw-r--r--drivers/net/ethernet/3com/3c509.c29
-rw-r--r--drivers/net/ethernet/3com/3c59x.c21
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c10
-rw-r--r--drivers/net/ethernet/8390/ax88796.c16
-rw-r--r--drivers/net/ethernet/8390/etherh.c14
-rw-r--r--drivers/net/ethernet/8390/hydra.c18
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c12
-rw-r--r--drivers/net/ethernet/8390/ne3210.c4
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c17
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c12
-rw-r--r--drivers/net/ethernet/adi/Kconfig2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c271
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h13
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/alteon/acenic.c31
-rw-r--r--drivers/net/ethernet/amd/a2065.c16
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c8
-rw-r--r--drivers/net/ethernet/amd/ariadne.c10
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c6
-rw-r--r--drivers/net/ethernet/amd/declance.c8
-rw-r--r--drivers/net/ethernet/amd/depca.c10
-rw-r--r--drivers/net/ethernet/amd/hplance.c17
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/amd/sunlance.c12
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c11
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c7
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c17
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c17
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c10
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c1022
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h134
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h141
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c528
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h91
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c41
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h29
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c991
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1186
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c189
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h30
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h7
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c726
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h68
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c138
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h9
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c943
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig9
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c1283
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h112
-rw-r--r--drivers/net/ethernet/cadence/macb.c564
-rw-r--r--drivers/net/ethernet/cadence/macb.h67
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c59
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/tp.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c7
-rw-r--r--drivers/net/ethernet/cisco/Kconfig2
-rw-r--r--drivers/net/ethernet/cisco/enic/Kconfig2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c7
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c18
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c3
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c16
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c18
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c6
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c24
-rw-r--r--drivers/net/ethernet/dlink/sundance.c95
-rw-r--r--drivers/net/ethernet/dnet.c11
-rw-r--r--drivers/net/ethernet/emulex/Kconfig2
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h51
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c409
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h177
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c80
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c849
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c5
-rw-r--r--drivers/net/ethernet/ethoc.c8
-rw-r--r--drivers/net/ethernet/fealnx.c12
-rw-r--r--drivers/net/ethernet/freescale/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/Makefile1
-rw-r--r--drivers/net/ethernet/freescale/fec.c171
-rw-r--r--drivers/net/ethernet/freescale/fec.h119
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c383
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c5
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c14
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/hp/hp100.c18
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c20
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c8
-rw-r--r--drivers/net/ethernet/ibm/Kconfig5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c16
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c24
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c13
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c7
-rw-r--r--drivers/net/ethernet/icplus/ipg.c7
-rw-r--r--drivers/net/ethernet/intel/Kconfig30
-rw-r--r--drivers/net/ethernet/intel/e100.c7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c17
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c13
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c66
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c115
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h27
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h17
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c243
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c135
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c331
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c60
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c141
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c77
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c369
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c128
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c99
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h112
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c406
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1572
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c102
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c41
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c160
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c100
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c115
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c109
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c418
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c470
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c403
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
-rw-r--r--drivers/net/ethernet/jme.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c9
-rw-r--r--drivers/net/ethernet/marvell/sky2.c35
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h27
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c13
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c18
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c6
-rw-r--r--drivers/net/ethernet/myricom/Kconfig1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c285
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.c4
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c8
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c21
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c13
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c8
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c6
-rw-r--r--drivers/net/ethernet/neterion/Kconfig2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/neterion/s2io.h5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c27
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c6
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c6
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig17
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c14
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c12
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c12
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c93
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h521
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c114
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c73
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h56
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c749
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c852
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c1309
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1992
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c629
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c960
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c12
-rw-r--r--drivers/net/ethernet/rdc/r6040.c9
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c75
-rw-r--r--drivers/net/ethernet/realtek/8139too.c14
-rw-r--r--drivers/net/ethernet/realtek/atp.c58
-rw-r--r--drivers/net/ethernet/realtek/atp.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c170
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/s6gmac.c6
-rw-r--r--drivers/net/ethernet/seeq/ether3.c22
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig10
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/efx.c16
-rw-r--r--drivers/net/ethernet/sfc/efx.h13
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c25
-rw-r--r--drivers/net/ethernet/sfc/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/io.h43
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c23
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h5
-rw-r--r--drivers/net/ethernet/sfc/nic.c81
-rw-r--r--drivers/net/ethernet/sfc/nic.h28
-rw-r--r--drivers/net/ethernet/sfc/rx.c6
-rw-r--r--drivers/net/ethernet/sfc/selftest.c4
-rw-r--r--drivers/net/ethernet/sfc/siena.c17
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c8
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c11
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c7
-rw-r--r--drivers/net/ethernet/sis/sis190.c27
-rw-r--r--drivers/net/ethernet/sis/sis900.c29
-rw-r--r--drivers/net/ethernet/smsc/epic100.c13
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c20
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h16
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c22
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h20
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c32
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c100
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c261
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c134
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h46
-rw-r--r--drivers/net/ethernet/sun/cassini.c13
-rw-r--r--drivers/net/ethernet/sun/niu.c121
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c10
-rw-r--r--drivers/net/ethernet/sun/sungem.c7
-rw-r--r--drivers/net/ethernet/sun/sunhme.c20
-rw-r--r--drivers/net/ethernet/sun/sunqe.c12
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c15
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig9
-rw-r--r--drivers/net/ethernet/ti/Makefile2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c635
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c31
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h1
-rw-r--r--drivers/net/ethernet/ti/cpts.c427
-rw-r--r--drivers/net/ethernet/ti/cpts.h146
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c11
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c22
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c10
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c18
-rw-r--r--drivers/net/ethernet/via/via-rhine.c13
-rw-r--r--drivers/net/ethernet/via/via-velocity.c28
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c10
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c10
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c4
-rw-r--r--drivers/net/fddi/defxx.c46
-rw-r--r--drivers/net/fddi/skfp/skfddi.c4
-rw-r--r--drivers/net/hippi/rrunner.c11
-rw-r--r--drivers/net/hyperv/rndis_filter.c10
-rw-r--r--drivers/net/ieee802154/at86rf230.c6
-rw-r--r--drivers/net/ieee802154/fakehard.c6
-rw-r--r--drivers/net/ieee802154/fakelb.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c6
-rw-r--r--drivers/net/irda/au1k_ir.c8
-rw-r--r--drivers/net/irda/bfin_sir.c8
-rw-r--r--drivers/net/irda/ep7211-sir.c73
-rw-r--r--drivers/net/irda/sh_irda.c10
-rw-r--r--drivers/net/irda/sh_sir.c6
-rw-r--r--drivers/net/irda/smsc-ircc2.c6
-rw-r--r--drivers/net/irda/via-ircc.c15
-rw-r--r--drivers/net/irda/vlsi_ir.c6
-rw-r--r--drivers/net/netconsole.c6
-rw-r--r--drivers/net/phy/davicom.c6
-rw-r--r--drivers/net/phy/dp83640.c78
-rw-r--r--drivers/net/phy/mdio-gpio.c14
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c6
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c6
-rw-r--r--drivers/net/phy/mdio-octeon.c6
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/micrel.c44
-rw-r--r--drivers/net/phy/smsc.c95
-rw-r--r--drivers/net/phy/spi_ks8995.c6
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/tun.c875
-rw-r--r--drivers/net/usb/Kconfig22
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_common.c117
-rw-r--r--drivers/net/usb/asix_devices.c19
-rw-r--r--drivers/net/usb/cdc_mbim.c412
-rw-r--r--drivers/net/usb/cdc_ncm.c645
-rw-r--r--drivers/net/usb/dm9601.c107
-rw-r--r--drivers/net/usb/int51x1.c52
-rw-r--r--drivers/net/usb/mcs7830.c85
-rw-r--r--drivers/net/usb/net1080.c110
-rw-r--r--drivers/net/usb/plusb.c11
-rw-r--r--drivers/net/usb/sierra_net.c47
-rw-r--r--drivers/net/usb/smsc75xx.c1406
-rw-r--r--drivers/net/usb/smsc95xx.c1088
-rw-r--r--drivers/net/usb/smsc95xx.h25
-rw-r--r--drivers/net/usb/usbnet.c196
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c742
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c42
-rw-r--r--drivers/net/vxlan.c286
-rw-r--r--drivers/net/wan/Makefile4
-rw-r--r--drivers/net/wan/dscc4.c7
-rw-r--r--drivers/net/wan/farsync.c10
-rw-r--r--drivers/net/wan/hd64570.c5
-rw-r--r--drivers/net/wan/hd64572.c5
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c7
-rw-r--r--drivers/net/wan/pc300too.c4
-rw-r--r--drivers/net/wan/pci200syn.c4
-rw-r--r--drivers/net/wan/wanxl.c4
-rw-r--r--drivers/net/wan/wanxlfw.S1
-rw-r--r--drivers/net/wireless/adm8211.c6
-rw-r--r--drivers/net/wireless/airo.c8
-rw-r--r--drivers/net/wireless/at76c50x-usb.c85
-rw-r--r--drivers/net/wireless/ath/Kconfig8
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ar5523/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1798
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.h152
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523_hw.h431
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c15
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c31
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c410
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h69
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c12
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c13
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c14
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c92
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c55
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c160
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c47
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c32
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c284
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h78
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h172
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c86
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c71
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h76
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h338
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c439
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h33
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c136
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c203
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c211
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h36
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c21
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig1
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c21
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c53
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c7
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c7
-rw-r--r--drivers/net/wireless/ath/hw.c20
-rw-r--r--drivers/net/wireless/atmel_pci.c6
-rw-r--r--drivers/net/wireless/b43/dma.c7
-rw-r--r--drivers/net/wireless/b43/main.c14
-rw-r--r--drivers/net/wireless/b43/pcmcia.c6
-rw-r--r--drivers/net/wireless/b43/pio.c4
-rw-r--r--drivers/net/wireless/b43/sdio.c6
-rw-r--r--drivers/net/wireless/b43/xmit.c2
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h5
-rw-r--r--drivers/net/wireless/b43legacy/main.c37
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c58
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c182
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h256
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h98
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c893
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h46
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c716
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c579
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c447
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h215
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c344
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c14
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c404
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c2944
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h328
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c723
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/antsel.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h175
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.c156
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.h75
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c345
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c157
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c1261
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h48
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c471
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c64
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h42
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/stf.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/types.h3
-rw-r--r--drivers/net/wireless/brcm80211/include/defs.h11
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c9
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c52
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h2
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_geo.c3
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c8
-rw-r--r--drivers/net/wireless/iwlegacy/common.h5
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c14
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c8
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c49
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c28
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c78
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c44
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c12
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c76
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c86
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h57
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c16
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c12
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c28
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c16
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h117
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c413
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c1064
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c1233
-rw-r--r--drivers/net/wireless/libertas/cfg.c33
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c39
-rw-r--r--drivers/net/wireless/libertas/if_spi.c6
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c585
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c8
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c8
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c89
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c24
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c10
-rw-r--r--drivers/net/wireless/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/mwifiex/init.c39
-rw-r--r--drivers/net/wireless/mwifiex/join.c6
-rw-r--r--drivers/net/wireless/mwifiex/main.c94
-rw-r--r--drivers/net/wireless/mwifiex/main.h19
-rw-r--r--drivers/net/wireless/mwifiex/scan.c55
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c39
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c49
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c26
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c38
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c11
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c7
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c17
-rw-r--r--drivers/net/wireless/mwifiex/usb.c4
-rw-r--r--drivers/net/wireless/mwifiex/util.c19
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c12
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h2
-rw-r--r--drivers/net/wireless/mwl8k.c71
-rw-r--r--drivers/net/wireless/orinoco/cfg.c11
-rw-r--r--drivers/net/wireless/orinoco/main.h2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c4
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c9
-rw-r--r--drivers/net/wireless/p54/eeprom.c5
-rw-r--r--drivers/net/wireless/p54/p54pci.c19
-rw-r--r--drivers/net/wireless/p54/p54spi.c6
-rw-r--r--drivers/net/wireless/p54/p54usb.c6
-rw-r--r--drivers/net/wireless/p54/txrx.c6
-rw-r--r--drivers/net/wireless/rndis_wlan.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c245
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c36
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c8
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c8
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig11
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile4
-rw-r--r--drivers/net/wireless/rtlwifi/base.c24
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c9
-rw-r--r--drivers/net/wireless/rtlwifi/core.c5
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c24
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c227
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c88
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c93
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c15
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c48
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/dm.c30
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c25
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/rf.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c95
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c65
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/rf.c18
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c97
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c64
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/rf.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c20
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/Makefile22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/btc.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/def.h163
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.c920
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/dm.h149
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.c745
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/fw.h101
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c542
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h160
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c1786
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h151
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.c2380
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/hw.h73
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/led.c151
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/led.h39
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.c2044
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/phy.h224
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c109
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h322
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c129
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h98
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/reg.h2097
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.c505
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/rf.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.c380
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/sw.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/table.c738
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/table.h50
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c670
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.h725
-rw-r--r--drivers/net/wireless/rtlwifi/stats.c268
-rw-r--r--drivers/net/wireless/rtlwifi/stats.h46
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h161
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c6
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h4
-rw-r--r--drivers/net/xen-netfront.c10
-rw-r--r--drivers/nfc/Makefile2
-rw-r--r--drivers/nfc/pn533.c18
-rw-r--r--drivers/nfc/pn544/Makefile7
-rw-r--r--drivers/nfc/pn544/i2c.c500
-rw-r--r--drivers/nfc/pn544/pn544.c (renamed from drivers/nfc/pn544_hci.c)679
-rw-r--r--drivers/nfc/pn544/pn544.h32
-rw-r--r--drivers/pps/Kconfig1
-rw-r--r--drivers/ptp/Kconfig19
-rw-r--r--drivers/ptp/ptp_chardev.c61
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.c3
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c49
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/driver_chipcommon.c100
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c30
-rw-r--r--drivers/ssb/driver_extif.c24
-rw-r--r--drivers/ssb/driver_mipscore.c30
-rw-r--r--drivers/ssb/embedded.c35
-rw-r--r--drivers/ssb/main.c11
-rw-r--r--drivers/ssb/ssb_private.h31
-rw-r--r--drivers/vhost/net.c147
-rw-r--r--drivers/vhost/tcm_vhost.c9
-rw-r--r--drivers/vhost/vhost.c59
-rw-r--r--drivers/vhost/vhost.h14
837 files changed, 58002 insertions, 26440 deletions
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 98510931c815..c909b7b7d5f1 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -164,7 +164,6 @@ static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb,
164static uint32_t fpga_tx(struct solos_card *); 164static uint32_t fpga_tx(struct solos_card *);
165static irqreturn_t solos_irq(int irq, void *dev_id); 165static irqreturn_t solos_irq(int irq, void *dev_id);
166static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci); 166static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci);
167static int list_vccs(int vci);
168static int atm_init(struct solos_card *, struct device *); 167static int atm_init(struct solos_card *, struct device *);
169static void atm_remove(struct solos_card *); 168static void atm_remove(struct solos_card *);
170static int send_command(struct solos_card *card, int dev, const char *buf, size_t size); 169static int send_command(struct solos_card *card, int dev, const char *buf, size_t size);
@@ -710,7 +709,8 @@ void solos_bh(unsigned long card_arg)
710 dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n", 709 dev_warn(&card->dev->dev, "Received packet for unknown VPI.VCI %d.%d on port %d\n",
711 le16_to_cpu(header->vpi), le16_to_cpu(header->vci), 710 le16_to_cpu(header->vpi), le16_to_cpu(header->vci),
712 port); 711 port);
713 continue; 712 dev_kfree_skb_any(skb);
713 break;
714 } 714 }
715 atm_charge(vcc, skb->truesize); 715 atm_charge(vcc, skb->truesize);
716 vcc->push(vcc, skb); 716 vcc->push(vcc, skb);
@@ -790,44 +790,6 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci)
790 return vcc; 790 return vcc;
791} 791}
792 792
793static int list_vccs(int vci)
794{
795 struct hlist_head *head;
796 struct atm_vcc *vcc;
797 struct hlist_node *node;
798 struct sock *s;
799 int num_found = 0;
800 int i;
801
802 read_lock(&vcc_sklist_lock);
803 if (vci != 0){
804 head = &vcc_hash[vci & (VCC_HTABLE_SIZE -1)];
805 sk_for_each(s, node, head) {
806 num_found ++;
807 vcc = atm_sk(s);
808 printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
809 vcc->dev->number,
810 vcc->vpi,
811 vcc->vci);
812 }
813 } else {
814 for(i = 0; i < VCC_HTABLE_SIZE; i++){
815 head = &vcc_hash[i];
816 sk_for_each(s, node, head) {
817 num_found ++;
818 vcc = atm_sk(s);
819 printk(KERN_DEBUG "Device: %d Vpi: %d Vci: %d\n",
820 vcc->dev->number,
821 vcc->vpi,
822 vcc->vci);
823 }
824 }
825 }
826 read_unlock(&vcc_sklist_lock);
827 return num_found;
828}
829
830
831static int popen(struct atm_vcc *vcc) 793static int popen(struct atm_vcc *vcc)
832{ 794{
833 struct solos_card *card = vcc->dev->dev_data; 795 struct solos_card *card = vcc->dev->dev_data;
@@ -840,7 +802,7 @@ static int popen(struct atm_vcc *vcc)
840 return -EINVAL; 802 return -EINVAL;
841 } 803 }
842 804
843 skb = alloc_skb(sizeof(*header), GFP_ATOMIC); 805 skb = alloc_skb(sizeof(*header), GFP_KERNEL);
844 if (!skb) { 806 if (!skb) {
845 if (net_ratelimit()) 807 if (net_ratelimit())
846 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); 808 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
@@ -857,8 +819,6 @@ static int popen(struct atm_vcc *vcc)
857 819
858 set_bit(ATM_VF_ADDR, &vcc->flags); 820 set_bit(ATM_VF_ADDR, &vcc->flags);
859 set_bit(ATM_VF_READY, &vcc->flags); 821 set_bit(ATM_VF_READY, &vcc->flags);
860 list_vccs(0);
861
862 822
863 return 0; 823 return 0;
864} 824}
@@ -866,10 +826,21 @@ static int popen(struct atm_vcc *vcc)
866static void pclose(struct atm_vcc *vcc) 826static void pclose(struct atm_vcc *vcc)
867{ 827{
868 struct solos_card *card = vcc->dev->dev_data; 828 struct solos_card *card = vcc->dev->dev_data;
869 struct sk_buff *skb; 829 unsigned char port = SOLOS_CHAN(vcc->dev);
830 struct sk_buff *skb, *tmpskb;
870 struct pkt_hdr *header; 831 struct pkt_hdr *header;
871 832
872 skb = alloc_skb(sizeof(*header), GFP_ATOMIC); 833 /* Remove any yet-to-be-transmitted packets from the pending queue */
834 spin_lock(&card->tx_queue_lock);
835 skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) {
836 if (SKB_CB(skb)->vcc == vcc) {
837 skb_unlink(skb, &card->tx_queue[port]);
838 solos_pop(vcc, skb);
839 }
840 }
841 spin_unlock(&card->tx_queue_lock);
842
843 skb = alloc_skb(sizeof(*header), GFP_KERNEL);
873 if (!skb) { 844 if (!skb) {
874 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n"); 845 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in pclose()\n");
875 return; 846 return;
@@ -881,15 +852,22 @@ static void pclose(struct atm_vcc *vcc)
881 header->vci = cpu_to_le16(vcc->vci); 852 header->vci = cpu_to_le16(vcc->vci);
882 header->type = cpu_to_le16(PKT_PCLOSE); 853 header->type = cpu_to_le16(PKT_PCLOSE);
883 854
884 fpga_queue(card, SOLOS_CHAN(vcc->dev), skb, NULL); 855 skb_get(skb);
856 fpga_queue(card, port, skb, NULL);
885 857
886 clear_bit(ATM_VF_ADDR, &vcc->flags); 858 if (!wait_event_timeout(card->param_wq, !skb_shared(skb), 5 * HZ))
887 clear_bit(ATM_VF_READY, &vcc->flags); 859 dev_warn(&card->dev->dev,
860 "Timeout waiting for VCC close on port %d\n", port);
861
862 dev_kfree_skb(skb);
888 863
889 /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the 864 /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the
890 tasklet has finished processing any incoming packets (and, more to 865 tasklet has finished processing any incoming packets (and, more to
891 the point, using the vcc pointer). */ 866 the point, using the vcc pointer). */
892 tasklet_unlock_wait(&card->tlet); 867 tasklet_unlock_wait(&card->tlet);
868
869 clear_bit(ATM_VF_ADDR, &vcc->flags);
870
893 return; 871 return;
894} 872}
895 873
@@ -967,10 +945,11 @@ static uint32_t fpga_tx(struct solos_card *card)
967 for (port = 0; tx_pending; tx_pending >>= 1, port++) { 945 for (port = 0; tx_pending; tx_pending >>= 1, port++) {
968 if (tx_pending & 1) { 946 if (tx_pending & 1) {
969 struct sk_buff *oldskb = card->tx_skb[port]; 947 struct sk_buff *oldskb = card->tx_skb[port];
970 if (oldskb) 948 if (oldskb) {
971 pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr, 949 pci_unmap_single(card->dev, SKB_CB(oldskb)->dma_addr,
972 oldskb->len, PCI_DMA_TODEVICE); 950 oldskb->len, PCI_DMA_TODEVICE);
973 951 card->tx_skb[port] = NULL;
952 }
974 spin_lock(&card->tx_queue_lock); 953 spin_lock(&card->tx_queue_lock);
975 skb = skb_dequeue(&card->tx_queue[port]); 954 skb = skb_dequeue(&card->tx_queue[port]);
976 if (!skb) 955 if (!skb)
@@ -1011,9 +990,10 @@ static uint32_t fpga_tx(struct solos_card *card)
1011 if (vcc) { 990 if (vcc) {
1012 atomic_inc(&vcc->stats->tx); 991 atomic_inc(&vcc->stats->tx);
1013 solos_pop(vcc, oldskb); 992 solos_pop(vcc, oldskb);
1014 } else 993 } else {
1015 dev_kfree_skb_irq(oldskb); 994 dev_kfree_skb_irq(oldskb);
1016 995 wake_up(&card->param_wq);
996 }
1017 } 997 }
1018 } 998 }
1019 /* For non-DMA TX, write the 'TX start' bit for all four ports simultaneously */ 999 /* For non-DMA TX, write the 'TX start' bit for all four ports simultaneously */
@@ -1248,7 +1228,7 @@ static int atm_init(struct solos_card *card, struct device *parent)
1248 card->atmdev[i]->phy_data = (void *)(unsigned long)i; 1228 card->atmdev[i]->phy_data = (void *)(unsigned long)i;
1249 atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND); 1229 atm_dev_signal_change(card->atmdev[i], ATM_PHY_SIG_FOUND);
1250 1230
1251 skb = alloc_skb(sizeof(*header), GFP_ATOMIC); 1231 skb = alloc_skb(sizeof(*header), GFP_KERNEL);
1252 if (!skb) { 1232 if (!skb) {
1253 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in atm_init()\n"); 1233 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in atm_init()\n");
1254 continue; 1234 continue;
@@ -1345,6 +1325,8 @@ static struct pci_driver fpga_driver = {
1345 1325
1346static int __init solos_pci_init(void) 1326static int __init solos_pci_init(void)
1347{ 1327{
1328 BUILD_BUG_ON(sizeof(struct solos_skb_cb) > sizeof(((struct sk_buff *)0)->cb));
1329
1348 printk(KERN_INFO "Solos PCI Driver Version %s\n", VERSION); 1330 printk(KERN_INFO "Solos PCI Driver Version %s\n", VERSION);
1349 return pci_register_driver(&fpga_driver); 1331 return pci_register_driver(&fpga_driver);
1350} 1332}
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 169fc58427d3..537ae53231cd 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -48,8 +48,8 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
48#endif /* CONFIG_BCMA_DRIVER_MIPS */ 48#endif /* CONFIG_BCMA_DRIVER_MIPS */
49 49
50/* driver_chipcommon_pmu.c */ 50/* driver_chipcommon_pmu.c */
51u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc); 51u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
52u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc); 52u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
53 53
54#ifdef CONFIG_BCMA_SFLASH 54#ifdef CONFIG_BCMA_SFLASH
55/* driver_chipcommon_sflash.c */ 55/* driver_chipcommon_sflash.c */
@@ -84,6 +84,8 @@ extern void __exit bcma_host_pci_exit(void);
84/* driver_pci.c */ 84/* driver_pci.c */
85u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address); 85u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
86 86
87extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
88
87#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE 89#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
88bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc); 90bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
89void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); 91void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index a4c3ebcc4c86..dc96dd8ebff2 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -4,12 +4,15 @@
4 * 4 *
5 * Copyright 2005, Broadcom Corporation 5 * Copyright 2005, Broadcom Corporation
6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch> 6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7 * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
7 * 8 *
8 * Licensed under the GNU/GPL. See COPYING for details. 9 * Licensed under the GNU/GPL. See COPYING for details.
9 */ 10 */
10 11
11#include "bcma_private.h" 12#include "bcma_private.h"
13#include <linux/bcm47xx_wdt.h>
12#include <linux/export.h> 14#include <linux/export.h>
15#include <linux/platform_device.h>
13#include <linux/bcma/bcma.h> 16#include <linux/bcma/bcma.h>
14 17
15static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, 18static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
@@ -22,12 +25,93 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
22 return value; 25 return value;
23} 26}
24 27
25void bcma_core_chipcommon_init(struct bcma_drv_cc *cc) 28static u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc)
26{ 29{
27 u32 leddc_on = 10; 30 if (cc->capabilities & BCMA_CC_CAP_PMU)
28 u32 leddc_off = 90; 31 return bcma_pmu_get_alp_clock(cc);
29 32
30 if (cc->setup_done) 33 return 20000000;
34}
35
36static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc)
37{
38 struct bcma_bus *bus = cc->core->bus;
39 u32 nb;
40
41 if (cc->capabilities & BCMA_CC_CAP_PMU) {
42 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
43 nb = 32;
44 else if (cc->core->id.rev < 26)
45 nb = 16;
46 else
47 nb = (cc->core->id.rev >= 37) ? 32 : 24;
48 } else {
49 nb = 28;
50 }
51 if (nb == 32)
52 return 0xffffffff;
53 else
54 return (1 << nb) - 1;
55}
56
57static u32 bcma_chipco_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt,
58 u32 ticks)
59{
60 struct bcma_drv_cc *cc = bcm47xx_wdt_get_drvdata(wdt);
61
62 return bcma_chipco_watchdog_timer_set(cc, ticks);
63}
64
65static u32 bcma_chipco_watchdog_timer_set_ms_wdt(struct bcm47xx_wdt *wdt,
66 u32 ms)
67{
68 struct bcma_drv_cc *cc = bcm47xx_wdt_get_drvdata(wdt);
69 u32 ticks;
70
71 ticks = bcma_chipco_watchdog_timer_set(cc, cc->ticks_per_ms * ms);
72 return ticks / cc->ticks_per_ms;
73}
74
75static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
76{
77 struct bcma_bus *bus = cc->core->bus;
78
79 if (cc->capabilities & BCMA_CC_CAP_PMU) {
80 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
81 /* 4706 CC and PMU watchdogs are clocked at 1/4 of ALP clock */
82 return bcma_chipco_get_alp_clock(cc) / 4000;
83 else
84 /* based on 32KHz ILP clock */
85 return 32;
86 } else {
87 return bcma_chipco_get_alp_clock(cc) / 1000;
88 }
89}
90
91int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
92{
93 struct bcm47xx_wdt wdt = {};
94 struct platform_device *pdev;
95
96 wdt.driver_data = cc;
97 wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
98 wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
99 wdt.max_timer_ms = bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
100
101 pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
102 cc->core->bus->num, &wdt,
103 sizeof(wdt));
104 if (IS_ERR(pdev))
105 return PTR_ERR(pdev);
106
107 cc->watchdog = pdev;
108
109 return 0;
110}
111
112void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
113{
114 if (cc->early_setup_done)
31 return; 115 return;
32 116
33 if (cc->core->id.rev >= 11) 117 if (cc->core->id.rev >= 11)
@@ -36,6 +120,22 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
36 if (cc->core->id.rev >= 35) 120 if (cc->core->id.rev >= 35)
37 cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT); 121 cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT);
38 122
123 if (cc->capabilities & BCMA_CC_CAP_PMU)
124 bcma_pmu_early_init(cc);
125
126 cc->early_setup_done = true;
127}
128
129void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
130{
131 u32 leddc_on = 10;
132 u32 leddc_off = 90;
133
134 if (cc->setup_done)
135 return;
136
137 bcma_core_chipcommon_early_init(cc);
138
39 if (cc->core->id.rev >= 20) { 139 if (cc->core->id.rev >= 20) {
40 bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0); 140 bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, 0);
41 bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, 0); 141 bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, 0);
@@ -56,15 +156,33 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
56 ((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) | 156 ((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
57 (leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT))); 157 (leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT)));
58 } 158 }
159 cc->ticks_per_ms = bcma_chipco_watchdog_ticks_per_ms(cc);
59 160
60 cc->setup_done = true; 161 cc->setup_done = true;
61} 162}
62 163
63/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */ 164/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
64void bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks) 165u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
65{ 166{
66 /* instant NMI */ 167 u32 maxt;
67 bcma_cc_write32(cc, BCMA_CC_WATCHDOG, ticks); 168 enum bcma_clkmode clkmode;
169
170 maxt = bcma_chipco_watchdog_get_max_timer(cc);
171 if (cc->capabilities & BCMA_CC_CAP_PMU) {
172 if (ticks == 1)
173 ticks = 2;
174 else if (ticks > maxt)
175 ticks = maxt;
176 bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
177 } else {
178 clkmode = ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC;
179 bcma_core_set_clockmode(cc->core, clkmode);
180 if (ticks > maxt)
181 ticks = maxt;
182 /* instant NMI */
183 bcma_cc_write32(cc, BCMA_CC_WATCHDOG, ticks);
184 }
185 return ticks;
68} 186}
69 187
70void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value) 188void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value)
@@ -118,8 +236,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
118 struct bcma_serial_port *ports = cc->serial_ports; 236 struct bcma_serial_port *ports = cc->serial_ports;
119 237
120 if (ccrev >= 11 && ccrev != 15) { 238 if (ccrev >= 11 && ccrev != 15) {
121 /* Fixed ALP clock */ 239 baud_base = bcma_chipco_get_alp_clock(cc);
122 baud_base = bcma_pmu_alp_clock(cc);
123 if (ccrev >= 21) { 240 if (ccrev >= 21) {
124 /* Turn off UART clock before switching clocksource. */ 241 /* Turn off UART clock before switching clocksource. */
125 bcma_cc_write32(cc, BCMA_CC_CORECTL, 242 bcma_cc_write32(cc, BCMA_CC_CORECTL,
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
index 9042781edec3..dbda91e4dff5 100644
--- a/drivers/bcma/driver_chipcommon_nflash.c
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -32,6 +32,9 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
32 } 32 }
33 33
34 cc->nflash.present = true; 34 cc->nflash.present = true;
35 if (cc->core->id.rev == 38 &&
36 (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT))
37 cc->nflash.boot = true;
35 38
36 /* Prepare platform device, but don't register it yet. It's too early, 39 /* Prepare platform device, but don't register it yet. It's too early,
37 * malloc (required by device_private_init) is not available yet. */ 40 * malloc (required by device_private_init) is not available yet. */
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 201faf106b3f..e162999bf916 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -144,7 +144,7 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
144 } 144 }
145} 145}
146 146
147void bcma_pmu_init(struct bcma_drv_cc *cc) 147void bcma_pmu_early_init(struct bcma_drv_cc *cc)
148{ 148{
149 u32 pmucap; 149 u32 pmucap;
150 150
@@ -153,7 +153,10 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
153 153
154 bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n", 154 bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
155 cc->pmu.rev, pmucap); 155 cc->pmu.rev, pmucap);
156}
156 157
158void bcma_pmu_init(struct bcma_drv_cc *cc)
159{
157 if (cc->pmu.rev == 1) 160 if (cc->pmu.rev == 1)
158 bcma_cc_mask32(cc, BCMA_CC_PMU_CTL, 161 bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
159 ~BCMA_CC_PMU_CTL_NOILPONW); 162 ~BCMA_CC_PMU_CTL_NOILPONW);
@@ -165,7 +168,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
165 bcma_pmu_workarounds(cc); 168 bcma_pmu_workarounds(cc);
166} 169}
167 170
168u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc) 171u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc)
169{ 172{
170 struct bcma_bus *bus = cc->core->bus; 173 struct bcma_bus *bus = cc->core->bus;
171 174
@@ -193,7 +196,7 @@ u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
193/* Find the output of the "m" pll divider given pll controls that start with 196/* Find the output of the "m" pll divider given pll controls that start with
194 * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc. 197 * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
195 */ 198 */
196static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m) 199static u32 bcma_pmu_pll_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
197{ 200{
198 u32 tmp, div, ndiv, p1, p2, fc; 201 u32 tmp, div, ndiv, p1, p2, fc;
199 struct bcma_bus *bus = cc->core->bus; 202 struct bcma_bus *bus = cc->core->bus;
@@ -222,14 +225,14 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
222 ndiv = (tmp & BCMA_CC_PPL_NDIV_MASK) >> BCMA_CC_PPL_NDIV_SHIFT; 225 ndiv = (tmp & BCMA_CC_PPL_NDIV_MASK) >> BCMA_CC_PPL_NDIV_SHIFT;
223 226
224 /* Do calculation in Mhz */ 227 /* Do calculation in Mhz */
225 fc = bcma_pmu_alp_clock(cc) / 1000000; 228 fc = bcma_pmu_get_alp_clock(cc) / 1000000;
226 fc = (p1 * ndiv * fc) / p2; 229 fc = (p1 * ndiv * fc) / p2;
227 230
228 /* Return clock in Hertz */ 231 /* Return clock in Hertz */
229 return (fc / div) * 1000000; 232 return (fc / div) * 1000000;
230} 233}
231 234
232static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m) 235static u32 bcma_pmu_pll_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
233{ 236{
234 u32 tmp, ndiv, p1div, p2div; 237 u32 tmp, ndiv, p1div, p2div;
235 u32 clock; 238 u32 clock;
@@ -260,7 +263,7 @@ static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
260} 263}
261 264
262/* query bus clock frequency for PMU-enabled chipcommon */ 265/* query bus clock frequency for PMU-enabled chipcommon */
263static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc) 266static u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
264{ 267{
265 struct bcma_bus *bus = cc->core->bus; 268 struct bcma_bus *bus = cc->core->bus;
266 269
@@ -268,40 +271,42 @@ static u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
268 case BCMA_CHIP_ID_BCM4716: 271 case BCMA_CHIP_ID_BCM4716:
269 case BCMA_CHIP_ID_BCM4748: 272 case BCMA_CHIP_ID_BCM4748:
270 case BCMA_CHIP_ID_BCM47162: 273 case BCMA_CHIP_ID_BCM47162:
271 return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0, 274 return bcma_pmu_pll_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
272 BCMA_CC_PMU5_MAINPLL_SSB); 275 BCMA_CC_PMU5_MAINPLL_SSB);
273 case BCMA_CHIP_ID_BCM5356: 276 case BCMA_CHIP_ID_BCM5356:
274 return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0, 277 return bcma_pmu_pll_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
275 BCMA_CC_PMU5_MAINPLL_SSB); 278 BCMA_CC_PMU5_MAINPLL_SSB);
276 case BCMA_CHIP_ID_BCM5357: 279 case BCMA_CHIP_ID_BCM5357:
277 case BCMA_CHIP_ID_BCM4749: 280 case BCMA_CHIP_ID_BCM4749:
278 return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0, 281 return bcma_pmu_pll_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
279 BCMA_CC_PMU5_MAINPLL_SSB); 282 BCMA_CC_PMU5_MAINPLL_SSB);
280 case BCMA_CHIP_ID_BCM4706: 283 case BCMA_CHIP_ID_BCM4706:
281 return bcma_pmu_clock_bcm4706(cc, BCMA_CC_PMU4706_MAINPLL_PLL0, 284 return bcma_pmu_pll_clock_bcm4706(cc,
282 BCMA_CC_PMU5_MAINPLL_SSB); 285 BCMA_CC_PMU4706_MAINPLL_PLL0,
286 BCMA_CC_PMU5_MAINPLL_SSB);
283 case BCMA_CHIP_ID_BCM53572: 287 case BCMA_CHIP_ID_BCM53572:
284 return 75000000; 288 return 75000000;
285 default: 289 default:
286 bcma_warn(bus, "No backplane clock specified for %04X device, pmu rev. %d, using default %d Hz\n", 290 bcma_warn(bus, "No bus clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
287 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK); 291 bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
288 } 292 }
289 return BCMA_CC_PMU_HT_CLOCK; 293 return BCMA_CC_PMU_HT_CLOCK;
290} 294}
291 295
292/* query cpu clock frequency for PMU-enabled chipcommon */ 296/* query cpu clock frequency for PMU-enabled chipcommon */
293u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc) 297u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
294{ 298{
295 struct bcma_bus *bus = cc->core->bus; 299 struct bcma_bus *bus = cc->core->bus;
296 300
297 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) 301 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
298 return 300000000; 302 return 300000000;
299 303
304 /* New PMUs can have different clock for bus and CPU */
300 if (cc->pmu.rev >= 5) { 305 if (cc->pmu.rev >= 5) {
301 u32 pll; 306 u32 pll;
302 switch (bus->chipinfo.id) { 307 switch (bus->chipinfo.id) {
303 case BCMA_CHIP_ID_BCM4706: 308 case BCMA_CHIP_ID_BCM4706:
304 return bcma_pmu_clock_bcm4706(cc, 309 return bcma_pmu_pll_clock_bcm4706(cc,
305 BCMA_CC_PMU4706_MAINPLL_PLL0, 310 BCMA_CC_PMU4706_MAINPLL_PLL0,
306 BCMA_CC_PMU5_MAINPLL_CPU); 311 BCMA_CC_PMU5_MAINPLL_CPU);
307 case BCMA_CHIP_ID_BCM5356: 312 case BCMA_CHIP_ID_BCM5356:
@@ -316,10 +321,11 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
316 break; 321 break;
317 } 322 }
318 323
319 return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU); 324 return bcma_pmu_pll_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
320 } 325 }
321 326
322 return bcma_pmu_get_clockcontrol(cc); 327 /* On old PMUs CPU has the same clock as the bus */
328 return bcma_pmu_get_bus_clock(cc);
323} 329}
324 330
325static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset, 331static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
index 2c4eec2ca5a0..63e688393825 100644
--- a/drivers/bcma/driver_chipcommon_sflash.c
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -12,7 +12,7 @@
12 12
13static struct resource bcma_sflash_resource = { 13static struct resource bcma_sflash_resource = {
14 .name = "bcma_sflash", 14 .name = "bcma_sflash",
15 .start = BCMA_SFLASH, 15 .start = BCMA_SOC_FLASH2,
16 .end = 0, 16 .end = 0,
17 .flags = IORESOURCE_MEM | IORESOURCE_READONLY, 17 .flags = IORESOURCE_MEM | IORESOURCE_READONLY,
18}; 18};
@@ -31,15 +31,42 @@ struct bcma_sflash_tbl_e {
31}; 31};
32 32
33static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = { 33static struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
34 { "", 0x14, 0x10000, 32, }, 34 { "M25P20", 0x11, 0x10000, 4, },
35 { "M25P40", 0x12, 0x10000, 8, },
36
37 { "M25P16", 0x14, 0x10000, 32, },
38 { "M25P32", 0x14, 0x10000, 64, },
39 { "M25P64", 0x16, 0x10000, 128, },
40 { "M25FL128", 0x17, 0x10000, 256, },
35 { 0 }, 41 { 0 },
36}; 42};
37 43
38static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = { 44static struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
45 { "SST25WF512", 1, 0x1000, 16, },
46 { "SST25VF512", 0x48, 0x1000, 16, },
47 { "SST25WF010", 2, 0x1000, 32, },
48 { "SST25VF010", 0x49, 0x1000, 32, },
49 { "SST25WF020", 3, 0x1000, 64, },
50 { "SST25VF020", 0x43, 0x1000, 64, },
51 { "SST25WF040", 4, 0x1000, 128, },
52 { "SST25VF040", 0x44, 0x1000, 128, },
53 { "SST25VF040B", 0x8d, 0x1000, 128, },
54 { "SST25WF080", 5, 0x1000, 256, },
55 { "SST25VF080B", 0x8e, 0x1000, 256, },
56 { "SST25VF016", 0x41, 0x1000, 512, },
57 { "SST25VF032", 0x4a, 0x1000, 1024, },
58 { "SST25VF064", 0x4b, 0x1000, 2048, },
39 { 0 }, 59 { 0 },
40}; 60};
41 61
42static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = { 62static struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
63 { "AT45DB011", 0xc, 256, 512, },
64 { "AT45DB021", 0x14, 256, 1024, },
65 { "AT45DB041", 0x1c, 256, 2048, },
66 { "AT45DB081", 0x24, 256, 4096, },
67 { "AT45DB161", 0x2c, 512, 4096, },
68 { "AT45DB321", 0x34, 512, 8192, },
69 { "AT45DB642", 0x3c, 1024, 8192, },
43 { 0 }, 70 { 0 },
44}; 71};
45 72
@@ -84,6 +111,8 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
84 break; 111 break;
85 } 112 }
86 break; 113 break;
114 case 0x13:
115 return -ENOTSUPP;
87 default: 116 default:
88 for (e = bcma_sflash_st_tbl; e->name; e++) { 117 for (e = bcma_sflash_st_tbl; e->name; e++) {
89 if (e->id == id) 118 if (e->id == id)
@@ -116,7 +145,7 @@ int bcma_sflash_init(struct bcma_drv_cc *cc)
116 return -ENOTSUPP; 145 return -ENOTSUPP;
117 } 146 }
118 147
119 sflash->window = BCMA_SFLASH; 148 sflash->window = BCMA_SOC_FLASH2;
120 sflash->blocksize = e->blocksize; 149 sflash->blocksize = e->blocksize;
121 sflash->numblocks = e->numblocks; 150 sflash->numblocks = e->numblocks;
122 sflash->size = sflash->blocksize * sflash->numblocks; 151 sflash->size = sflash->blocksize * sflash->numblocks;
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index cc65b45b4368..792daad28cbc 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -115,7 +115,7 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
115 bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) & 115 bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
116 ~(1 << irqflag)); 116 ~(1 << irqflag));
117 else 117 else
118 bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq), 0); 118 bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
119 119
120 /* assign the new one */ 120 /* assign the new one */
121 if (irq == 0) { 121 if (irq == 0) {
@@ -171,7 +171,7 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
171 struct bcma_bus *bus = mcore->core->bus; 171 struct bcma_bus *bus = mcore->core->bus;
172 172
173 if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU) 173 if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
174 return bcma_pmu_get_clockcpu(&bus->drv_cc); 174 return bcma_pmu_get_cpu_clock(&bus->drv_cc);
175 175
176 bcma_err(bus, "No PMU available, need this to get the cpu clock\n"); 176 bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
177 return 0; 177 return 0;
@@ -181,47 +181,66 @@ EXPORT_SYMBOL(bcma_cpu_clock);
181static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) 181static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
182{ 182{
183 struct bcma_bus *bus = mcore->core->bus; 183 struct bcma_bus *bus = mcore->core->bus;
184 struct bcma_drv_cc *cc = &bus->drv_cc;
184 185
185 switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) { 186 switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
186 case BCMA_CC_FLASHT_STSER: 187 case BCMA_CC_FLASHT_STSER:
187 case BCMA_CC_FLASHT_ATSER: 188 case BCMA_CC_FLASHT_ATSER:
188 bcma_debug(bus, "Found serial flash\n"); 189 bcma_debug(bus, "Found serial flash\n");
189 bcma_sflash_init(&bus->drv_cc); 190 bcma_sflash_init(cc);
190 break; 191 break;
191 case BCMA_CC_FLASHT_PARA: 192 case BCMA_CC_FLASHT_PARA:
192 bcma_debug(bus, "Found parallel flash\n"); 193 bcma_debug(bus, "Found parallel flash\n");
193 bus->drv_cc.pflash.window = 0x1c000000; 194 cc->pflash.present = true;
194 bus->drv_cc.pflash.window_size = 0x02000000; 195 cc->pflash.window = BCMA_SOC_FLASH2;
196 cc->pflash.window_size = BCMA_SOC_FLASH2_SZ;
195 197
196 if ((bcma_read32(bus->drv_cc.core, BCMA_CC_FLASH_CFG) & 198 if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) &
197 BCMA_CC_FLASH_CFG_DS) == 0) 199 BCMA_CC_FLASH_CFG_DS) == 0)
198 bus->drv_cc.pflash.buswidth = 1; 200 cc->pflash.buswidth = 1;
199 else 201 else
200 bus->drv_cc.pflash.buswidth = 2; 202 cc->pflash.buswidth = 2;
201 break; 203 break;
202 default: 204 default:
203 bcma_err(bus, "Flash type not supported\n"); 205 bcma_err(bus, "Flash type not supported\n");
204 } 206 }
205 207
206 if (bus->drv_cc.core->id.rev == 38 || 208 if (cc->core->id.rev == 38 ||
207 bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) { 209 bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
208 if (bus->drv_cc.capabilities & BCMA_CC_CAP_NFLASH) { 210 if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
209 bcma_debug(bus, "Found NAND flash\n"); 211 bcma_debug(bus, "Found NAND flash\n");
210 bcma_nflash_init(&bus->drv_cc); 212 bcma_nflash_init(cc);
211 } 213 }
212 } 214 }
213} 215}
214 216
217void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
218{
219 struct bcma_bus *bus = mcore->core->bus;
220
221 if (mcore->early_setup_done)
222 return;
223
224 bcma_chipco_serial_init(&bus->drv_cc);
225 bcma_core_mips_flash_detect(mcore);
226
227 mcore->early_setup_done = true;
228}
229
215void bcma_core_mips_init(struct bcma_drv_mips *mcore) 230void bcma_core_mips_init(struct bcma_drv_mips *mcore)
216{ 231{
217 struct bcma_bus *bus; 232 struct bcma_bus *bus;
218 struct bcma_device *core; 233 struct bcma_device *core;
219 bus = mcore->core->bus; 234 bus = mcore->core->bus;
220 235
236 if (mcore->setup_done)
237 return;
238
221 bcma_info(bus, "Initializing MIPS core...\n"); 239 bcma_info(bus, "Initializing MIPS core...\n");
222 240
223 if (!mcore->setup_done) 241 bcma_core_mips_early_init(mcore);
224 mcore->assigned_irqs = 1; 242
243 mcore->assigned_irqs = 1;
225 244
226 /* Assign IRQs to all cores on the bus */ 245 /* Assign IRQs to all cores on the bus */
227 list_for_each_entry(core, &bus->cores, list) { 246 list_for_each_entry(core, &bus->cores, list) {
@@ -256,10 +275,5 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
256 bcma_info(bus, "IRQ reconfiguration done\n"); 275 bcma_info(bus, "IRQ reconfiguration done\n");
257 bcma_core_mips_dump_irq(bus); 276 bcma_core_mips_dump_irq(bus);
258 277
259 if (mcore->setup_done)
260 return;
261
262 bcma_chipco_serial_init(&bus->drv_cc);
263 bcma_core_mips_flash_detect(mcore);
264 mcore->setup_done = true; 278 mcore->setup_done = true;
265} 279}
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 9baf886e82df..e6b5c89469dc 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -35,11 +35,6 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
35 chipid_top != 0x5300) 35 chipid_top != 0x5300)
36 return false; 36 return false;
37 37
38 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
39 bcma_info(bus, "This PCI core is disabled and not working\n");
40 return false;
41 }
42
43 bcma_core_enable(pc->core, 0); 38 bcma_core_enable(pc->core, 0);
44 39
45 return !mips_busprobe32(tmp, pc->core->io_addr); 40 return !mips_busprobe32(tmp, pc->core->io_addr);
@@ -396,6 +391,11 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
396 391
397 bcma_info(bus, "PCIEcore in host mode found\n"); 392 bcma_info(bus, "PCIEcore in host mode found\n");
398 393
394 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
395 bcma_info(bus, "This PCIE core is disabled and not working\n");
396 return;
397 }
398
399 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL); 399 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
400 if (!pc_host) { 400 if (!pc_host) {
401 bcma_err(bus, "can not allocate memory"); 401 bcma_err(bus, "can not allocate memory");
@@ -452,6 +452,8 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
452 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM; 452 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
453 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM + 453 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
454 BCMA_SOC_PCI_MEM_SZ - 1; 454 BCMA_SOC_PCI_MEM_SZ - 1;
455 pc_host->io_resource.start = 0x100;
456 pc_host->io_resource.end = 0x47F;
455 pci_membase_1G = BCMA_SOC_PCIE_DMA_H32; 457 pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
456 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, 458 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
457 tmp | BCMA_SOC_PCI_MEM); 459 tmp | BCMA_SOC_PCI_MEM);
@@ -459,6 +461,8 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
459 pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM; 461 pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
460 pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM + 462 pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
461 BCMA_SOC_PCI_MEM_SZ - 1; 463 BCMA_SOC_PCI_MEM_SZ - 1;
464 pc_host->io_resource.start = 0x480;
465 pc_host->io_resource.end = 0x7FF;
462 pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32; 466 pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
463 pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG; 467 pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
464 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0, 468 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
@@ -534,7 +538,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
534static void bcma_core_pci_fixup_addresses(struct pci_dev *dev) 538static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
535{ 539{
536 struct resource *res; 540 struct resource *res;
537 int pos; 541 int pos, err;
538 542
539 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) { 543 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
540 /* This is not a device on the PCI-core bridge. */ 544 /* This is not a device on the PCI-core bridge. */
@@ -547,8 +551,12 @@ static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
547 551
548 for (pos = 0; pos < 6; pos++) { 552 for (pos = 0; pos < 6; pos++) {
549 res = &dev->resource[pos]; 553 res = &dev->resource[pos];
550 if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) 554 if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) {
551 pci_assign_resource(dev, pos); 555 err = pci_assign_resource(dev, pos);
556 if (err)
557 pr_err("PCI: Problem fixing up the addresses on %s\n",
558 pci_name(dev));
559 }
552 } 560 }
553} 561}
554DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses); 562DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index b6b4b5ebd4c2..98fdc3e014e7 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -238,7 +238,7 @@ static void __devexit bcma_host_pci_remove(struct pci_dev *dev)
238 pci_set_drvdata(dev, NULL); 238 pci_set_drvdata(dev, NULL);
239} 239}
240 240
241#ifdef CONFIG_PM 241#ifdef CONFIG_PM_SLEEP
242static int bcma_host_pci_suspend(struct device *dev) 242static int bcma_host_pci_suspend(struct device *dev)
243{ 243{
244 struct pci_dev *pdev = to_pci_dev(dev); 244 struct pci_dev *pdev = to_pci_dev(dev);
@@ -261,11 +261,11 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
261 bcma_host_pci_resume); 261 bcma_host_pci_resume);
262#define BCMA_PM_OPS (&bcma_pm_ops) 262#define BCMA_PM_OPS (&bcma_pm_ops)
263 263
264#else /* CONFIG_PM */ 264#else /* CONFIG_PM_SLEEP */
265 265
266#define BCMA_PM_OPS NULL 266#define BCMA_PM_OPS NULL
267 267
268#endif /* CONFIG_PM */ 268#endif /* CONFIG_PM_SLEEP */
269 269
270static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { 270static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
271 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, 271 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index d865470bc951..debd4f142f93 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -81,6 +81,18 @@ struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
81} 81}
82EXPORT_SYMBOL_GPL(bcma_find_core); 82EXPORT_SYMBOL_GPL(bcma_find_core);
83 83
84static struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
85 u8 unit)
86{
87 struct bcma_device *core;
88
89 list_for_each_entry(core, &bus->cores, list) {
90 if (core->id.id == coreid && core->core_unit == unit)
91 return core;
92 }
93 return NULL;
94}
95
84static void bcma_release_core_dev(struct device *dev) 96static void bcma_release_core_dev(struct device *dev)
85{ 97{
86 struct bcma_device *core = container_of(dev, struct bcma_device, dev); 98 struct bcma_device *core = container_of(dev, struct bcma_device, dev);
@@ -153,6 +165,12 @@ static int bcma_register_cores(struct bcma_bus *bus)
153 } 165 }
154#endif 166#endif
155 167
168 if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
169 err = bcma_chipco_watchdog_register(&bus->drv_cc);
170 if (err)
171 bcma_err(bus, "Error registering watchdog driver\n");
172 }
173
156 return 0; 174 return 0;
157} 175}
158 176
@@ -165,6 +183,8 @@ static void bcma_unregister_cores(struct bcma_bus *bus)
165 if (core->dev_registered) 183 if (core->dev_registered)
166 device_unregister(&core->dev); 184 device_unregister(&core->dev);
167 } 185 }
186 if (bus->hosttype == BCMA_HOSTTYPE_SOC)
187 platform_device_unregister(bus->drv_cc.watchdog);
168} 188}
169 189
170int __devinit bcma_bus_register(struct bcma_bus *bus) 190int __devinit bcma_bus_register(struct bcma_bus *bus)
@@ -183,6 +203,20 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
183 return -1; 203 return -1;
184 } 204 }
185 205
206 /* Early init CC core */
207 core = bcma_find_core(bus, bcma_cc_core_id(bus));
208 if (core) {
209 bus->drv_cc.core = core;
210 bcma_core_chipcommon_early_init(&bus->drv_cc);
211 }
212
213 /* Try to get SPROM */
214 err = bcma_sprom_get(bus);
215 if (err == -ENOENT) {
216 bcma_err(bus, "No SPROM available\n");
217 } else if (err)
218 bcma_err(bus, "Failed to get SPROM: %d\n", err);
219
186 /* Init CC core */ 220 /* Init CC core */
187 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 221 core = bcma_find_core(bus, bcma_cc_core_id(bus));
188 if (core) { 222 if (core) {
@@ -198,10 +232,17 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
198 } 232 }
199 233
200 /* Init PCIE core */ 234 /* Init PCIE core */
201 core = bcma_find_core(bus, BCMA_CORE_PCIE); 235 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
236 if (core) {
237 bus->drv_pci[0].core = core;
238 bcma_core_pci_init(&bus->drv_pci[0]);
239 }
240
241 /* Init PCIE core */
242 core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
202 if (core) { 243 if (core) {
203 bus->drv_pci.core = core; 244 bus->drv_pci[1].core = core;
204 bcma_core_pci_init(&bus->drv_pci); 245 bcma_core_pci_init(&bus->drv_pci[1]);
205 } 246 }
206 247
207 /* Init GBIT MAC COMMON core */ 248 /* Init GBIT MAC COMMON core */
@@ -211,13 +252,6 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
211 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn); 252 bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
212 } 253 }
213 254
214 /* Try to get SPROM */
215 err = bcma_sprom_get(bus);
216 if (err == -ENOENT) {
217 bcma_err(bus, "No SPROM available\n");
218 } else if (err)
219 bcma_err(bus, "Failed to get SPROM: %d\n", err);
220
221 /* Register found cores */ 255 /* Register found cores */
222 bcma_register_cores(bus); 256 bcma_register_cores(bus);
223 257
@@ -275,18 +309,18 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
275 return -1; 309 return -1;
276 } 310 }
277 311
278 /* Init CC core */ 312 /* Early init CC core */
279 core = bcma_find_core(bus, bcma_cc_core_id(bus)); 313 core = bcma_find_core(bus, bcma_cc_core_id(bus));
280 if (core) { 314 if (core) {
281 bus->drv_cc.core = core; 315 bus->drv_cc.core = core;
282 bcma_core_chipcommon_init(&bus->drv_cc); 316 bcma_core_chipcommon_early_init(&bus->drv_cc);
283 } 317 }
284 318
285 /* Init MIPS core */ 319 /* Early init MIPS core */
286 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K); 320 core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
287 if (core) { 321 if (core) {
288 bus->drv_mips.core = core; 322 bus->drv_mips.core = core;
289 bcma_core_mips_init(&bus->drv_mips); 323 bcma_core_mips_early_init(&bus->drv_mips);
290 } 324 }
291 325
292 bcma_info(bus, "Early bus registered\n"); 326 bcma_info(bus, "Early bus registered\n");
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index 0d546b64be34..4adf9ef9a113 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -595,8 +595,11 @@ int bcma_sprom_get(struct bcma_bus *bus)
595 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); 595 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
596 596
597 err = bcma_sprom_valid(sprom); 597 err = bcma_sprom_valid(sprom);
598 if (err) 598 if (err) {
599 bcma_warn(bus, "invalid sprom read from the PCIe card, try to use fallback sprom\n");
600 err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
599 goto out; 601 goto out;
602 }
600 603
601 bcma_sprom_extract_r8(bus, sprom); 604 bcma_sprom_extract_r8(bus, sprom);
602 605
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 3f4bfc814dc7..9959d4cb23dc 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -492,7 +492,7 @@ done:
492static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) 492static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
493{ 493{
494 u16 buf_len = 0; 494 u16 buf_len = 0;
495 int ret, buf_block_len, blksz; 495 int ret, num_blocks, blksz;
496 struct sk_buff *skb = NULL; 496 struct sk_buff *skb = NULL;
497 u32 type; 497 u32 type;
498 u8 *payload = NULL; 498 u8 *payload = NULL;
@@ -514,18 +514,17 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
514 } 514 }
515 515
516 blksz = SDIO_BLOCK_SIZE; 516 blksz = SDIO_BLOCK_SIZE;
517 buf_block_len = (buf_len + blksz - 1) / blksz; 517 num_blocks = DIV_ROUND_UP(buf_len, blksz);
518 518
519 if (buf_len <= SDIO_HEADER_LEN 519 if (buf_len <= SDIO_HEADER_LEN
520 || (buf_block_len * blksz) > ALLOC_BUF_SIZE) { 520 || (num_blocks * blksz) > ALLOC_BUF_SIZE) {
521 BT_ERR("invalid packet length: %d", buf_len); 521 BT_ERR("invalid packet length: %d", buf_len);
522 ret = -EINVAL; 522 ret = -EINVAL;
523 goto exit; 523 goto exit;
524 } 524 }
525 525
526 /* Allocate buffer */ 526 /* Allocate buffer */
527 skb = bt_skb_alloc(buf_block_len * blksz + BTSDIO_DMA_ALIGN, 527 skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC);
528 GFP_ATOMIC);
529 if (skb == NULL) { 528 if (skb == NULL) {
530 BT_ERR("No free skb"); 529 BT_ERR("No free skb");
531 goto exit; 530 goto exit;
@@ -541,7 +540,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
541 payload = skb->data; 540 payload = skb->data;
542 541
543 ret = sdio_readsb(card->func, payload, card->ioport, 542 ret = sdio_readsb(card->func, payload, card->ioport,
544 buf_block_len * blksz); 543 num_blocks * blksz);
545 if (ret < 0) { 544 if (ret < 0) {
546 BT_ERR("readsb failed: %d", ret); 545 BT_ERR("readsb failed: %d", ret);
547 ret = -EIO; 546 ret = -EIO;
@@ -553,7 +552,16 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
553 */ 552 */
554 553
555 buf_len = payload[0]; 554 buf_len = payload[0];
556 buf_len |= (u16) payload[1] << 8; 555 buf_len |= payload[1] << 8;
556 buf_len |= payload[2] << 16;
557
558 if (buf_len > blksz * num_blocks) {
559 BT_ERR("Skip incorrect packet: hdrlen %d buffer %d",
560 buf_len, blksz * num_blocks);
561 ret = -EIO;
562 goto exit;
563 }
564
557 type = payload[3]; 565 type = payload[3];
558 566
559 switch (type) { 567 switch (type) {
@@ -589,8 +597,7 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
589 597
590 default: 598 default:
591 BT_ERR("Unknown packet type:%d", type); 599 BT_ERR("Unknown packet type:%d", type);
592 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, payload, 600 BT_ERR("hex: %*ph", blksz * num_blocks, payload);
593 blksz * buf_block_len);
594 601
595 kfree_skb(skb); 602 kfree_skb(skb);
596 skb = NULL; 603 skb = NULL;
@@ -849,8 +856,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
849 if (ret < 0) { 856 if (ret < 0) {
850 i++; 857 i++;
851 BT_ERR("i=%d writesb failed: %d", i, ret); 858 BT_ERR("i=%d writesb failed: %d", i, ret);
852 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, 859 BT_ERR("hex: %*ph", nb, payload);
853 payload, nb);
854 ret = -EIO; 860 ret = -EIO;
855 if (i > MAX_WRITE_IOMEM_RETRY) 861 if (i > MAX_WRITE_IOMEM_RETRY)
856 goto exit; 862 goto exit;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ee82f2fb65f0..a1d4ede5b892 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -96,6 +96,7 @@ static struct usb_device_id btusb_table[] = {
96 { USB_DEVICE(0x0c10, 0x0000) }, 96 { USB_DEVICE(0x0c10, 0x0000) },
97 97
98 /* Broadcom BCM20702A0 */ 98 /* Broadcom BCM20702A0 */
99 { USB_DEVICE(0x0b05, 0x17b5) },
99 { USB_DEVICE(0x04ca, 0x2003) }, 100 { USB_DEVICE(0x04ca, 0x2003) },
100 { USB_DEVICE(0x0489, 0xe042) }, 101 { USB_DEVICE(0x0489, 0xe042) },
101 { USB_DEVICE(0x413c, 0x8197) }, 102 { USB_DEVICE(0x413c, 0x8197) },
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index abd9038e06b1..d6668071bd0d 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -604,6 +604,23 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
604 return slots; 604 return slots;
605} 605}
606 606
607static inline int dca3_tag_map_invalid(u8 *tag_map)
608{
609 /*
610 * If the tag map is not programmed by the BIOS the default is:
611 * 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
612 *
613 * This an invalid map and will result in only 2 possible tags
614 * 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that
615 * this entire definition is invalid.
616 */
617 return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
618 (tag_map[1] == DCA_TAG_MAP_VALID) &&
619 (tag_map[2] == DCA_TAG_MAP_VALID) &&
620 (tag_map[3] == DCA_TAG_MAP_VALID) &&
621 (tag_map[4] == DCA_TAG_MAP_VALID));
622}
623
607struct dca_provider * __devinit 624struct dca_provider * __devinit
608ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) 625ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
609{ 626{
@@ -674,6 +691,12 @@ ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
674 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; 691 ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
675 } 692 }
676 693
694 if (dca3_tag_map_invalid(ioatdca->tag_map)) {
695 dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n");
696 free_dca_provider(dca);
697 return NULL;
698 }
699
677 err = register_dca_provider(dca, &pdev->dev); 700 err = register_dca_provider(dca, &pdev->dev);
678 if (err) { 701 if (err) {
679 free_dca_provider(dca); 702 free_dca_provider(dca);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 81363ffa5357..6e99d73563b8 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -490,7 +490,7 @@ receive_dmsg(struct hfc_pci *hc)
490 (df->data[le16_to_cpu(zp->z1)])) { 490 (df->data[le16_to_cpu(zp->z1)])) {
491 if (dch->debug & DEBUG_HW) 491 if (dch->debug & DEBUG_HW)
492 printk(KERN_DEBUG 492 printk(KERN_DEBUG
493 "empty_fifo hfcpci paket inv. len " 493 "empty_fifo hfcpci packet inv. len "
494 "%d or crc %d\n", 494 "%d or crc %d\n",
495 rcnt, 495 rcnt,
496 df->data[le16_to_cpu(zp->z1)]); 496 df->data[le16_to_cpu(zp->z1)]);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 182ecf0626c2..feafa91c2ed9 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1302,7 +1302,7 @@ modeisar(struct isar_ch *ch, u32 bprotocol)
1302 &ch->is->Flags)) 1302 &ch->is->Flags))
1303 ch->dpath = 1; 1303 ch->dpath = 1;
1304 else { 1304 else {
1305 pr_info("modeisar both pathes in use\n"); 1305 pr_info("modeisar both paths in use\n");
1306 return -EBUSY; 1306 return -EBUSY;
1307 } 1307 }
1308 if (bprotocol == ISDN_P_B_HDLC) 1308 if (bprotocol == ISDN_P_B_HDLC)
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index a47637be0cc5..ddec47a911a0 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -35,7 +35,7 @@ static int chancount;
35/* experimental REJECT after ALERTING for CALLBACK to beat the 4s delay */ 35/* experimental REJECT after ALERTING for CALLBACK to beat the 4s delay */
36#define ALERT_REJECT 0 36#define ALERT_REJECT 0
37 37
38/* Value to delay the sending of the first B-channel paket after CONNECT 38/* Value to delay the sending of the first B-channel packet after CONNECT
39 * here is no value given by ITU, but experience shows that 300 ms will 39 * here is no value given by ITU, but experience shows that 300 ms will
40 * work on many networks, if you or your other side is behind local exchanges 40 * work on many networks, if you or your other side is behind local exchanges
41 * a greater value may be recommented. If the delay is to short the first paket 41 * a greater value may be recommented. If the delay is to short the first paket
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 334fa90bed8e..f60d4be58941 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -354,7 +354,7 @@ receive_dmsg(struct IsdnCardState *cs)
354 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) || 354 if ((rcnt > MAX_DFRAME_LEN + 3) || (rcnt < 4) ||
355 (df->data[zp->z1])) { 355 (df->data[zp->z1])) {
356 if (cs->debug & L1_DEB_WARN) 356 if (cs->debug & L1_DEB_WARN)
357 debugl1(cs, "empty_fifo hfcpci paket inv. len %d or crc %d", rcnt, df->data[zp->z1]); 357 debugl1(cs, "empty_fifo hfcpci packet inv. len %d or crc %d", rcnt, df->data[zp->z1]);
358#ifdef ERROR_STATISTIC 358#ifdef ERROR_STATISTIC
359 cs->err_rx++; 359 cs->err_rx++;
360#endif 360#endif
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 4db846be4369..4ec279ce052f 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -270,7 +270,7 @@ read_fifo(struct IsdnCardState *cs, u_char fifo, int trans_max)
270 270
271 if ((count > fifo_size) || (count < 4)) { 271 if ((count > fifo_size) || (count < 4)) {
272 if (cs->debug & L1_DEB_WARN) 272 if (cs->debug & L1_DEB_WARN)
273 debugl1(cs, "hfcsx_read_fifo %d paket inv. len %d ", fifo , count); 273 debugl1(cs, "hfcsx_read_fifo %d packet inv. len %d ", fifo , count);
274 while (count) { 274 while (count) {
275 count--; /* empty fifo */ 275 count--; /* empty fifo */
276 Read_hfc(cs, HFCSX_FIF_DRD); 276 Read_hfc(cs, HFCSX_FIF_DRD);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index db50f788855d..f8e405c383a0 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -277,7 +277,6 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
277 u16 timebase, u8 *buf, int len) 277 u16 timebase, u8 *buf, int len)
278{ 278{
279 u8 *p; 279 u8 *p;
280 int multi = 0;
281 u8 frame[len + 32]; 280 u8 frame[len + 32];
282 struct socket *socket = NULL; 281 struct socket *socket = NULL;
283 282
@@ -317,9 +316,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
317 *p++ = hc->id >> 8; 316 *p++ = hc->id >> 8;
318 *p++ = hc->id; 317 *p++ = hc->id;
319 } 318 }
320 *p++ = (multi == 1) ? 0x80 : 0x00 + channel; /* m-flag, channel */ 319 *p++ = 0x00 + channel; /* m-flag, channel */
321 if (multi == 1)
322 *p++ = len; /* length */
323 *p++ = timebase >> 8; /* time base */ 320 *p++ = timebase >> 8; /* time base */
324 *p++ = timebase; 321 *p++ = timebase;
325 322
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index be88728f1106..592f597d8951 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -250,7 +250,7 @@ tei_debug(struct FsmInst *fi, char *fmt, ...)
250static int 250static int
251get_free_id(struct manager *mgr) 251get_free_id(struct manager *mgr)
252{ 252{
253 u64 ids = 0; 253 DECLARE_BITMAP(ids, 64) = { [0 ... BITS_TO_LONGS(64) - 1] = 0 };
254 int i; 254 int i;
255 struct layer2 *l2; 255 struct layer2 *l2;
256 256
@@ -261,11 +261,11 @@ get_free_id(struct manager *mgr)
261 __func__); 261 __func__);
262 return -EBUSY; 262 return -EBUSY;
263 } 263 }
264 test_and_set_bit(l2->ch.nr, (u_long *)&ids); 264 __set_bit(l2->ch.nr, ids);
265 } 265 }
266 for (i = 1; i < 64; i++) 266 i = find_next_zero_bit(ids, 64, 1);
267 if (!test_bit(i, (u_long *)&ids)) 267 if (i < 64)
268 return i; 268 return i;
269 printk(KERN_WARNING "%s: more as 63 layer2 for one device\n", 269 printk(KERN_WARNING "%s: more as 63 layer2 for one device\n",
270 __func__); 270 __func__);
271 return -EBUSY; 271 return -EBUSY;
@@ -274,7 +274,7 @@ get_free_id(struct manager *mgr)
274static int 274static int
275get_free_tei(struct manager *mgr) 275get_free_tei(struct manager *mgr)
276{ 276{
277 u64 ids = 0; 277 DECLARE_BITMAP(ids, 64) = { [0 ... BITS_TO_LONGS(64) - 1] = 0 };
278 int i; 278 int i;
279 struct layer2 *l2; 279 struct layer2 *l2;
280 280
@@ -288,11 +288,11 @@ get_free_tei(struct manager *mgr)
288 continue; 288 continue;
289 i -= 64; 289 i -= 64;
290 290
291 test_and_set_bit(i, (u_long *)&ids); 291 __set_bit(i, ids);
292 } 292 }
293 for (i = 0; i < 64; i++) 293 i = find_first_zero_bit(ids, 64);
294 if (!test_bit(i, (u_long *)&ids)) 294 if (i < 64)
295 return i + 64; 295 return i + 64;
296 printk(KERN_WARNING "%s: more as 63 dynamic tei for one device\n", 296 printk(KERN_WARNING "%s: more as 63 dynamic tei for one device\n",
297 __func__); 297 __func__);
298 return -1; 298 return -1;
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c
index a18e639b40d7..42ecfef80132 100644
--- a/drivers/isdn/pcbit/layer2.c
+++ b/drivers/isdn/pcbit/layer2.c
@@ -508,7 +508,7 @@ pcbit_irq_handler(int interrupt, void *devptr)
508 return IRQ_NONE; 508 return IRQ_NONE;
509 } 509 }
510 if (dev->interrupt) { 510 if (dev->interrupt) {
511 printk(KERN_DEBUG "pcbit: reentering interrupt hander\n"); 511 printk(KERN_DEBUG "pcbit: reentering interrupt handler\n");
512 return IRQ_HANDLED; 512 return IRQ_HANDLED;
513 } 513 }
514 dev->interrupt = 1; 514 dev->interrupt = 1;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index d427493997b6..cbc44f53755a 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -61,7 +61,7 @@ module_param(clockp, int, 0);
61module_param(clockm, int, 0); 61module_param(clockm, int, 0);
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63 63
64static int __devinit com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 64static int com20020pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
65{ 65{
66 struct net_device *dev; 66 struct net_device *dev;
67 struct arcnet_local *lp; 67 struct arcnet_local *lp;
@@ -135,7 +135,7 @@ out_dev:
135 return err; 135 return err;
136} 136}
137 137
138static void __devexit com20020pci_remove(struct pci_dev *pdev) 138static void com20020pci_remove(struct pci_dev *pdev)
139{ 139{
140 struct net_device *dev = pci_get_drvdata(pdev); 140 struct net_device *dev = pci_get_drvdata(pdev);
141 unregister_netdev(dev); 141 unregister_netdev(dev);
@@ -178,7 +178,7 @@ static struct pci_driver com20020pci_driver = {
178 .name = "com20020", 178 .name = "com20020",
179 .id_table = com20020pci_id_table, 179 .id_table = com20020pci_id_table,
180 .probe = com20020pci_probe, 180 .probe = com20020pci_probe,
181 .remove = __devexit_p(com20020pci_remove), 181 .remove = com20020pci_remove,
182}; 182};
183 183
184static int __init com20020pci_init(void) 184static int __init com20020pci_init(void)
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e15cc11edbbe..7c9d136e74be 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -84,6 +84,10 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
84 84
85/* Forward declaration */ 85/* Forward declaration */
86static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); 86static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]);
87static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
88static void rlb_src_unlink(struct bonding *bond, u32 index);
89static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
90 u32 ip_dst_hash);
87 91
88static inline u8 _simple_hash(const u8 *hash_start, int hash_size) 92static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
89{ 93{
@@ -354,6 +358,18 @@ static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
354 if (!arp) 358 if (!arp)
355 goto out; 359 goto out;
356 360
361 /* We received an ARP from arp->ip_src.
362 * We might have used this IP address previously (on the bonding host
363 * itself or on a system that is bridged together with the bond).
364 * However, if arp->mac_src is different than what is stored in
365 * rx_hashtbl, some other host is now using the IP and we must prevent
366 * sending out client updates with this IP address and the old MAC
367 * address.
368 * Clean up all hash table entries that have this address as ip_src but
369 * have a different mac_src.
370 */
371 rlb_purge_src_ip(bond, arp);
372
357 if (arp->op_code == htons(ARPOP_REPLY)) { 373 if (arp->op_code == htons(ARPOP_REPLY)) {
358 /* update rx hash table for this ARP */ 374 /* update rx hash table for this ARP */
359 rlb_update_entry_from_arp(bond, arp); 375 rlb_update_entry_from_arp(bond, arp);
@@ -432,9 +448,9 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
432 _lock_rx_hashtbl_bh(bond); 448 _lock_rx_hashtbl_bh(bond);
433 449
434 rx_hash_table = bond_info->rx_hashtbl; 450 rx_hash_table = bond_info->rx_hashtbl;
435 index = bond_info->rx_hashtbl_head; 451 index = bond_info->rx_hashtbl_used_head;
436 for (; index != RLB_NULL_INDEX; index = next_index) { 452 for (; index != RLB_NULL_INDEX; index = next_index) {
437 next_index = rx_hash_table[index].next; 453 next_index = rx_hash_table[index].used_next;
438 if (rx_hash_table[index].slave == slave) { 454 if (rx_hash_table[index].slave == slave) {
439 struct slave *assigned_slave = rlb_next_rx_slave(bond); 455 struct slave *assigned_slave = rlb_next_rx_slave(bond);
440 456
@@ -519,8 +535,9 @@ static void rlb_update_rx_clients(struct bonding *bond)
519 535
520 _lock_rx_hashtbl_bh(bond); 536 _lock_rx_hashtbl_bh(bond);
521 537
522 hash_index = bond_info->rx_hashtbl_head; 538 hash_index = bond_info->rx_hashtbl_used_head;
523 for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { 539 for (; hash_index != RLB_NULL_INDEX;
540 hash_index = client_info->used_next) {
524 client_info = &(bond_info->rx_hashtbl[hash_index]); 541 client_info = &(bond_info->rx_hashtbl[hash_index]);
525 if (client_info->ntt) { 542 if (client_info->ntt) {
526 rlb_update_client(client_info); 543 rlb_update_client(client_info);
@@ -548,8 +565,9 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
548 565
549 _lock_rx_hashtbl_bh(bond); 566 _lock_rx_hashtbl_bh(bond);
550 567
551 hash_index = bond_info->rx_hashtbl_head; 568 hash_index = bond_info->rx_hashtbl_used_head;
552 for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { 569 for (; hash_index != RLB_NULL_INDEX;
570 hash_index = client_info->used_next) {
553 client_info = &(bond_info->rx_hashtbl[hash_index]); 571 client_info = &(bond_info->rx_hashtbl[hash_index]);
554 572
555 if ((client_info->slave == slave) && 573 if ((client_info->slave == slave) &&
@@ -578,8 +596,9 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
578 596
579 _lock_rx_hashtbl(bond); 597 _lock_rx_hashtbl(bond);
580 598
581 hash_index = bond_info->rx_hashtbl_head; 599 hash_index = bond_info->rx_hashtbl_used_head;
582 for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { 600 for (; hash_index != RLB_NULL_INDEX;
601 hash_index = client_info->used_next) {
583 client_info = &(bond_info->rx_hashtbl[hash_index]); 602 client_info = &(bond_info->rx_hashtbl[hash_index]);
584 603
585 if (!client_info->slave) { 604 if (!client_info->slave) {
@@ -625,6 +644,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
625 /* update mac address from arp */ 644 /* update mac address from arp */
626 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); 645 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
627 } 646 }
647 memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
628 648
629 assigned_slave = client_info->slave; 649 assigned_slave = client_info->slave;
630 if (assigned_slave) { 650 if (assigned_slave) {
@@ -647,6 +667,17 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
647 assigned_slave = rlb_next_rx_slave(bond); 667 assigned_slave = rlb_next_rx_slave(bond);
648 668
649 if (assigned_slave) { 669 if (assigned_slave) {
670 if (!(client_info->assigned &&
671 client_info->ip_src == arp->ip_src)) {
672 /* ip_src is going to be updated,
673 * fix the src hash list
674 */
675 u32 hash_src = _simple_hash((u8 *)&arp->ip_src,
676 sizeof(arp->ip_src));
677 rlb_src_unlink(bond, hash_index);
678 rlb_src_link(bond, hash_src, hash_index);
679 }
680
650 client_info->ip_src = arp->ip_src; 681 client_info->ip_src = arp->ip_src;
651 client_info->ip_dst = arp->ip_dst; 682 client_info->ip_dst = arp->ip_dst;
652 /* arp->mac_dst is broadcast for arp reqeusts. 683 /* arp->mac_dst is broadcast for arp reqeusts.
@@ -654,6 +685,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
654 * upon receiving an arp reply. 685 * upon receiving an arp reply.
655 */ 686 */
656 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN); 687 memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
688 memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
657 client_info->slave = assigned_slave; 689 client_info->slave = assigned_slave;
658 690
659 if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) { 691 if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
@@ -669,11 +701,11 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
669 } 701 }
670 702
671 if (!client_info->assigned) { 703 if (!client_info->assigned) {
672 u32 prev_tbl_head = bond_info->rx_hashtbl_head; 704 u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
673 bond_info->rx_hashtbl_head = hash_index; 705 bond_info->rx_hashtbl_used_head = hash_index;
674 client_info->next = prev_tbl_head; 706 client_info->used_next = prev_tbl_head;
675 if (prev_tbl_head != RLB_NULL_INDEX) { 707 if (prev_tbl_head != RLB_NULL_INDEX) {
676 bond_info->rx_hashtbl[prev_tbl_head].prev = 708 bond_info->rx_hashtbl[prev_tbl_head].used_prev =
677 hash_index; 709 hash_index;
678 } 710 }
679 client_info->assigned = 1; 711 client_info->assigned = 1;
@@ -694,6 +726,12 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
694 struct arp_pkt *arp = arp_pkt(skb); 726 struct arp_pkt *arp = arp_pkt(skb);
695 struct slave *tx_slave = NULL; 727 struct slave *tx_slave = NULL;
696 728
729 /* Don't modify or load balance ARPs that do not originate locally
730 * (e.g.,arrive via a bridge).
731 */
732 if (!bond_slave_has_mac(bond, arp->mac_src))
733 return NULL;
734
697 if (arp->op_code == htons(ARPOP_REPLY)) { 735 if (arp->op_code == htons(ARPOP_REPLY)) {
698 /* the arp must be sent on the selected 736 /* the arp must be sent on the selected
699 * rx channel 737 * rx channel
@@ -740,8 +778,9 @@ static void rlb_rebalance(struct bonding *bond)
740 _lock_rx_hashtbl_bh(bond); 778 _lock_rx_hashtbl_bh(bond);
741 779
742 ntt = 0; 780 ntt = 0;
743 hash_index = bond_info->rx_hashtbl_head; 781 hash_index = bond_info->rx_hashtbl_used_head;
744 for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { 782 for (; hash_index != RLB_NULL_INDEX;
783 hash_index = client_info->used_next) {
745 client_info = &(bond_info->rx_hashtbl[hash_index]); 784 client_info = &(bond_info->rx_hashtbl[hash_index]);
746 assigned_slave = rlb_next_rx_slave(bond); 785 assigned_slave = rlb_next_rx_slave(bond);
747 if (assigned_slave && (client_info->slave != assigned_slave)) { 786 if (assigned_slave && (client_info->slave != assigned_slave)) {
@@ -759,11 +798,113 @@ static void rlb_rebalance(struct bonding *bond)
759} 798}
760 799
761/* Caller must hold rx_hashtbl lock */ 800/* Caller must hold rx_hashtbl lock */
801static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
802{
803 entry->used_next = RLB_NULL_INDEX;
804 entry->used_prev = RLB_NULL_INDEX;
805 entry->assigned = 0;
806 entry->slave = NULL;
807 entry->tag = 0;
808}
809static void rlb_init_table_entry_src(struct rlb_client_info *entry)
810{
811 entry->src_first = RLB_NULL_INDEX;
812 entry->src_prev = RLB_NULL_INDEX;
813 entry->src_next = RLB_NULL_INDEX;
814}
815
762static void rlb_init_table_entry(struct rlb_client_info *entry) 816static void rlb_init_table_entry(struct rlb_client_info *entry)
763{ 817{
764 memset(entry, 0, sizeof(struct rlb_client_info)); 818 memset(entry, 0, sizeof(struct rlb_client_info));
765 entry->next = RLB_NULL_INDEX; 819 rlb_init_table_entry_dst(entry);
766 entry->prev = RLB_NULL_INDEX; 820 rlb_init_table_entry_src(entry);
821}
822
823static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index)
824{
825 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
826 u32 next_index = bond_info->rx_hashtbl[index].used_next;
827 u32 prev_index = bond_info->rx_hashtbl[index].used_prev;
828
829 if (index == bond_info->rx_hashtbl_used_head)
830 bond_info->rx_hashtbl_used_head = next_index;
831 if (prev_index != RLB_NULL_INDEX)
832 bond_info->rx_hashtbl[prev_index].used_next = next_index;
833 if (next_index != RLB_NULL_INDEX)
834 bond_info->rx_hashtbl[next_index].used_prev = prev_index;
835}
836
837/* unlink a rlb hash table entry from the src list */
838static void rlb_src_unlink(struct bonding *bond, u32 index)
839{
840 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
841 u32 next_index = bond_info->rx_hashtbl[index].src_next;
842 u32 prev_index = bond_info->rx_hashtbl[index].src_prev;
843
844 bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX;
845 bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX;
846
847 if (next_index != RLB_NULL_INDEX)
848 bond_info->rx_hashtbl[next_index].src_prev = prev_index;
849
850 if (prev_index == RLB_NULL_INDEX)
851 return;
852
853 /* is prev_index pointing to the head of this list? */
854 if (bond_info->rx_hashtbl[prev_index].src_first == index)
855 bond_info->rx_hashtbl[prev_index].src_first = next_index;
856 else
857 bond_info->rx_hashtbl[prev_index].src_next = next_index;
858
859}
860
861static void rlb_delete_table_entry(struct bonding *bond, u32 index)
862{
863 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
864 struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
865
866 rlb_delete_table_entry_dst(bond, index);
867 rlb_init_table_entry_dst(entry);
868
869 rlb_src_unlink(bond, index);
870}
871
872/* add the rx_hashtbl[ip_dst_hash] entry to the list
873 * of entries with identical ip_src_hash
874 */
875static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
876{
877 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
878 u32 next;
879
880 bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash;
881 next = bond_info->rx_hashtbl[ip_src_hash].src_first;
882 bond_info->rx_hashtbl[ip_dst_hash].src_next = next;
883 if (next != RLB_NULL_INDEX)
884 bond_info->rx_hashtbl[next].src_prev = ip_dst_hash;
885 bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash;
886}
887
888/* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
889 * not match arp->mac_src */
890static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
891{
892 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
893 u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
894 u32 index;
895
896 _lock_rx_hashtbl_bh(bond);
897
898 index = bond_info->rx_hashtbl[ip_src_hash].src_first;
899 while (index != RLB_NULL_INDEX) {
900 struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
901 u32 next_index = entry->src_next;
902 if (entry->ip_src == arp->ip_src &&
903 !ether_addr_equal_64bits(arp->mac_src, entry->mac_src))
904 rlb_delete_table_entry(bond, index);
905 index = next_index;
906 }
907 _unlock_rx_hashtbl_bh(bond);
767} 908}
768 909
769static int rlb_initialize(struct bonding *bond) 910static int rlb_initialize(struct bonding *bond)
@@ -781,7 +922,7 @@ static int rlb_initialize(struct bonding *bond)
781 922
782 bond_info->rx_hashtbl = new_hashtbl; 923 bond_info->rx_hashtbl = new_hashtbl;
783 924
784 bond_info->rx_hashtbl_head = RLB_NULL_INDEX; 925 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
785 926
786 for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) { 927 for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
787 rlb_init_table_entry(bond_info->rx_hashtbl + i); 928 rlb_init_table_entry(bond_info->rx_hashtbl + i);
@@ -803,7 +944,7 @@ static void rlb_deinitialize(struct bonding *bond)
803 944
804 kfree(bond_info->rx_hashtbl); 945 kfree(bond_info->rx_hashtbl);
805 bond_info->rx_hashtbl = NULL; 946 bond_info->rx_hashtbl = NULL;
806 bond_info->rx_hashtbl_head = RLB_NULL_INDEX; 947 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
807 948
808 _unlock_rx_hashtbl_bh(bond); 949 _unlock_rx_hashtbl_bh(bond);
809} 950}
@@ -815,25 +956,13 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
815 956
816 _lock_rx_hashtbl_bh(bond); 957 _lock_rx_hashtbl_bh(bond);
817 958
818 curr_index = bond_info->rx_hashtbl_head; 959 curr_index = bond_info->rx_hashtbl_used_head;
819 while (curr_index != RLB_NULL_INDEX) { 960 while (curr_index != RLB_NULL_INDEX) {
820 struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]); 961 struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
821 u32 next_index = bond_info->rx_hashtbl[curr_index].next; 962 u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
822 u32 prev_index = bond_info->rx_hashtbl[curr_index].prev;
823
824 if (curr->tag && (curr->vlan_id == vlan_id)) {
825 if (curr_index == bond_info->rx_hashtbl_head) {
826 bond_info->rx_hashtbl_head = next_index;
827 }
828 if (prev_index != RLB_NULL_INDEX) {
829 bond_info->rx_hashtbl[prev_index].next = next_index;
830 }
831 if (next_index != RLB_NULL_INDEX) {
832 bond_info->rx_hashtbl[next_index].prev = prev_index;
833 }
834 963
835 rlb_init_table_entry(curr); 964 if (curr->tag && (curr->vlan_id == vlan_id))
836 } 965 rlb_delete_table_entry(bond, curr_index);
837 966
838 curr_index = next_index; 967 curr_index = next_index;
839 } 968 }
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 90f140a2d197..e7a5b8b37ea3 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -94,15 +94,35 @@ struct tlb_client_info {
94 94
95/* ------------------------------------------------------------------------- 95/* -------------------------------------------------------------------------
96 * struct rlb_client_info contains all info related to a specific rx client 96 * struct rlb_client_info contains all info related to a specific rx client
97 * connection. This is the Clients Hash Table entry struct 97 * connection. This is the Clients Hash Table entry struct.
98 * Note that this is not a proper hash table; if a new client's IP address
99 * hash collides with an existing client entry, the old entry is replaced.
100 *
101 * There is a linked list (linked by the used_next and used_prev members)
102 * linking all the used entries of the hash table. This allows updating
103 * all the clients without walking over all the unused elements of the table.
104 *
105 * There are also linked lists of entries with identical hash(ip_src). These
106 * allow cleaning up the table from ip_src<->mac_src associations that have
107 * become outdated and would cause sending out invalid ARP updates to the
108 * network. These are linked by the (src_next and src_prev members).
98 * ------------------------------------------------------------------------- 109 * -------------------------------------------------------------------------
99 */ 110 */
100struct rlb_client_info { 111struct rlb_client_info {
101 __be32 ip_src; /* the server IP address */ 112 __be32 ip_src; /* the server IP address */
102 __be32 ip_dst; /* the client IP address */ 113 __be32 ip_dst; /* the client IP address */
114 u8 mac_src[ETH_ALEN]; /* the server MAC address */
103 u8 mac_dst[ETH_ALEN]; /* the client MAC address */ 115 u8 mac_dst[ETH_ALEN]; /* the client MAC address */
104 u32 next; /* The next Hash table entry index */ 116
105 u32 prev; /* The previous Hash table entry index */ 117 /* list of used hash table entries, starting at rx_hashtbl_used_head */
118 u32 used_next;
119 u32 used_prev;
120
121 /* ip_src based hashing */
122 u32 src_next; /* next entry with same hash(ip_src) */
123 u32 src_prev; /* prev entry with same hash(ip_src) */
124 u32 src_first; /* first entry with hash(ip_src) == this entry's index */
125
106 u8 assigned; /* checking whether this entry is assigned */ 126 u8 assigned; /* checking whether this entry is assigned */
107 u8 ntt; /* flag - need to transmit client info */ 127 u8 ntt; /* flag - need to transmit client info */
108 struct slave *slave; /* the slave assigned to this client */ 128 struct slave *slave; /* the slave assigned to this client */
@@ -131,7 +151,7 @@ struct alb_bond_info {
131 int rlb_enabled; 151 int rlb_enabled;
132 struct rlb_client_info *rx_hashtbl; /* Receive hash table */ 152 struct rlb_client_info *rx_hashtbl; /* Receive hash table */
133 spinlock_t rx_hashtbl_lock; 153 spinlock_t rx_hashtbl_lock;
134 u32 rx_hashtbl_head; 154 u32 rx_hashtbl_used_head;
135 u8 rx_ntt; /* flag - need to transmit 155 u8 rx_ntt; /* flag - need to transmit
136 * to all rx clients 156 * to all rx clients
137 */ 157 */
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 2cf084eb9d52..5fc4c2351478 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -31,8 +31,9 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
31 31
32 spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock)); 32 spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
33 33
34 hash_index = bond_info->rx_hashtbl_head; 34 hash_index = bond_info->rx_hashtbl_used_head;
35 for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) { 35 for (; hash_index != RLB_NULL_INDEX;
36 hash_index = client_info->used_next) {
36 client_info = &(bond_info->rx_hashtbl[hash_index]); 37 client_info = &(bond_info->rx_hashtbl[hash_index]);
37 seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n", 38 seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n",
38 &client_info->ip_src, 39 &client_info->ip_src,
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a7d47350ea4b..ef2cb2418535 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -615,15 +615,9 @@ static int bond_check_dev_link(struct bonding *bond,
615 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; 615 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
616 616
617 /* Try to get link status using Ethtool first. */ 617 /* Try to get link status using Ethtool first. */
618 if (slave_dev->ethtool_ops) { 618 if (slave_dev->ethtool_ops->get_link)
619 if (slave_dev->ethtool_ops->get_link) { 619 return slave_dev->ethtool_ops->get_link(slave_dev) ?
620 u32 link; 620 BMSR_LSTATUS : 0;
621
622 link = slave_dev->ethtool_ops->get_link(slave_dev);
623
624 return link ? BMSR_LSTATUS : 0;
625 }
626 }
627 621
628 /* Ethtool can't be used, fallback to MII ioctls. */ 622 /* Ethtool can't be used, fallback to MII ioctls. */
629 ioctl = slave_ops->ndo_do_ioctl; 623 ioctl = slave_ops->ndo_do_ioctl;
@@ -1510,8 +1504,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1510 int link_reporting; 1504 int link_reporting;
1511 int res = 0; 1505 int res = 0;
1512 1506
1513 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && 1507 if (!bond->params.use_carrier &&
1514 slave_ops->ndo_do_ioctl == NULL) { 1508 slave_dev->ethtool_ops->get_link == NULL &&
1509 slave_ops->ndo_do_ioctl == NULL) {
1515 pr_warning("%s: Warning: no link monitoring support for %s\n", 1510 pr_warning("%s: Warning: no link monitoring support for %s\n",
1516 bond_dev->name, slave_dev->name); 1511 bond_dev->name, slave_dev->name);
1517 } 1512 }
@@ -1838,7 +1833,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1838 * anyway (it holds no special properties of the bond device), 1833 * anyway (it holds no special properties of the bond device),
1839 * so we can change it without calling change_active_interface() 1834 * so we can change it without calling change_active_interface()
1840 */ 1835 */
1841 if (!bond->curr_active_slave) 1836 if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
1842 bond->curr_active_slave = new_slave; 1837 bond->curr_active_slave = new_slave;
1843 1838
1844 break; 1839 break;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index f8af2fcd3d16..6dded569b111 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -22,6 +22,7 @@
22#include <linux/in6.h> 22#include <linux/in6.h>
23#include <linux/netpoll.h> 23#include <linux/netpoll.h>
24#include <linux/inetdevice.h> 24#include <linux/inetdevice.h>
25#include <linux/etherdevice.h>
25#include "bond_3ad.h" 26#include "bond_3ad.h"
26#include "bond_alb.h" 27#include "bond_alb.h"
27 28
@@ -450,6 +451,18 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
450} 451}
451#endif 452#endif
452 453
454static inline struct slave *bond_slave_has_mac(struct bonding *bond,
455 const u8 *mac)
456{
457 int i = 0;
458 struct slave *tmp;
459
460 bond_for_each_slave(bond, tmp, i)
461 if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
462 return tmp;
463
464 return NULL;
465}
453 466
454/* exported from bond_main.c */ 467/* exported from bond_main.c */
455extern int bond_net_id; 468extern int bond_net_id;
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index bb709fd66993..b56bd9e80957 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -110,6 +110,15 @@ config PCH_CAN
110 is an IOH for x86 embedded processor (Intel Atom E6xx series). 110 is an IOH for x86 embedded processor (Intel Atom E6xx series).
111 This driver can access CAN bus. 111 This driver can access CAN bus.
112 112
113config CAN_GRCAN
114 tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
115 depends on CAN_DEV && OF
116 ---help---
117 Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
118 Note that the driver supports little endian, even though little
119 endian syntheses of the cores would need some modifications on
120 the hardware level to work.
121
113source "drivers/net/can/mscan/Kconfig" 122source "drivers/net/can/mscan/Kconfig"
114 123
115source "drivers/net/can/sja1000/Kconfig" 124source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 938be37b670c..7de59862bbe9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -22,5 +22,6 @@ obj-$(CONFIG_CAN_BFIN) += bfin_can.o
22obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o 22obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
23obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o 23obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
24obj-$(CONFIG_PCH_CAN) += pch_can.o 24obj-$(CONFIG_PCH_CAN) += pch_can.o
25obj-$(CONFIG_CAN_GRCAN) += grcan.o
25 26
26ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 27ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 994b6acd65f4..81baefda037b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -154,7 +154,7 @@ struct at91_priv {
154 canid_t mb0_id; 154 canid_t mb0_id;
155}; 155};
156 156
157static const struct at91_devtype_data at91_devtype_data[] __devinitconst = { 157static const struct at91_devtype_data at91_devtype_data[] = {
158 [AT91_DEVTYPE_SAM9263] = { 158 [AT91_DEVTYPE_SAM9263] = {
159 .rx_first = 1, 159 .rx_first = 1,
160 .rx_split = 8, 160 .rx_split = 8,
@@ -1241,7 +1241,7 @@ static struct attribute_group at91_sysfs_attr_group = {
1241 .attrs = at91_sysfs_attrs, 1241 .attrs = at91_sysfs_attrs,
1242}; 1242};
1243 1243
1244static int __devinit at91_can_probe(struct platform_device *pdev) 1244static int at91_can_probe(struct platform_device *pdev)
1245{ 1245{
1246 const struct at91_devtype_data *devtype_data; 1246 const struct at91_devtype_data *devtype_data;
1247 enum at91_devtype devtype; 1247 enum at91_devtype devtype;
@@ -1338,7 +1338,7 @@ static int __devinit at91_can_probe(struct platform_device *pdev)
1338 return err; 1338 return err;
1339} 1339}
1340 1340
1341static int __devexit at91_can_remove(struct platform_device *pdev) 1341static int at91_can_remove(struct platform_device *pdev)
1342{ 1342{
1343 struct net_device *dev = platform_get_drvdata(pdev); 1343 struct net_device *dev = platform_get_drvdata(pdev);
1344 struct at91_priv *priv = netdev_priv(dev); 1344 struct at91_priv *priv = netdev_priv(dev);
@@ -1371,10 +1371,11 @@ static const struct platform_device_id at91_can_id_table[] = {
1371 /* sentinel */ 1371 /* sentinel */
1372 } 1372 }
1373}; 1373};
1374MODULE_DEVICE_TABLE(platform, at91_can_id_table);
1374 1375
1375static struct platform_driver at91_can_driver = { 1376static struct platform_driver at91_can_driver = {
1376 .probe = at91_can_probe, 1377 .probe = at91_can_probe,
1377 .remove = __devexit_p(at91_can_remove), 1378 .remove = at91_can_remove,
1378 .driver = { 1379 .driver = {
1379 .name = KBUILD_MODNAME, 1380 .name = KBUILD_MODNAME,
1380 .owner = THIS_MODULE, 1381 .owner = THIS_MODULE,
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index f2d6d258a286..6a0532176b69 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -531,7 +531,7 @@ static const struct net_device_ops bfin_can_netdev_ops = {
531 .ndo_start_xmit = bfin_can_start_xmit, 531 .ndo_start_xmit = bfin_can_start_xmit,
532}; 532};
533 533
534static int __devinit bfin_can_probe(struct platform_device *pdev) 534static int bfin_can_probe(struct platform_device *pdev)
535{ 535{
536 int err; 536 int err;
537 struct net_device *dev; 537 struct net_device *dev;
@@ -611,7 +611,7 @@ exit:
611 return err; 611 return err;
612} 612}
613 613
614static int __devexit bfin_can_remove(struct platform_device *pdev) 614static int bfin_can_remove(struct platform_device *pdev)
615{ 615{
616 struct net_device *dev = dev_get_drvdata(&pdev->dev); 616 struct net_device *dev = dev_get_drvdata(&pdev->dev);
617 struct bfin_can_priv *priv = netdev_priv(dev); 617 struct bfin_can_priv *priv = netdev_priv(dev);
@@ -677,7 +677,7 @@ static int bfin_can_resume(struct platform_device *pdev)
677 677
678static struct platform_driver bfin_can_driver = { 678static struct platform_driver bfin_can_driver = {
679 .probe = bfin_can_probe, 679 .probe = bfin_can_probe,
680 .remove = __devexit_p(bfin_can_remove), 680 .remove = bfin_can_remove,
681 .suspend = bfin_can_suspend, 681 .suspend = bfin_can_suspend,
682 .resume = bfin_can_resume, 682 .resume = bfin_can_resume,
683 .driver = { 683 .driver = {
@@ -691,3 +691,4 @@ module_platform_driver(bfin_can_driver);
691MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); 691MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
692MODULE_LICENSE("GPL"); 692MODULE_LICENSE("GPL");
693MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver"); 693MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");
694MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e5180dfddba5..5233b8f58d77 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -233,6 +233,12 @@ static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
233 pm_runtime_put_sync(priv->device); 233 pm_runtime_put_sync(priv->device);
234} 234}
235 235
236static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
237{
238 if (priv->raminit)
239 priv->raminit(priv, enable);
240}
241
236static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) 242static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
237{ 243{
238 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) + 244 return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
@@ -1090,6 +1096,7 @@ static int c_can_open(struct net_device *dev)
1090 struct c_can_priv *priv = netdev_priv(dev); 1096 struct c_can_priv *priv = netdev_priv(dev);
1091 1097
1092 c_can_pm_runtime_get_sync(priv); 1098 c_can_pm_runtime_get_sync(priv);
1099 c_can_reset_ram(priv, true);
1093 1100
1094 /* open the can device */ 1101 /* open the can device */
1095 err = open_candev(dev); 1102 err = open_candev(dev);
@@ -1118,6 +1125,7 @@ static int c_can_open(struct net_device *dev)
1118exit_irq_fail: 1125exit_irq_fail:
1119 close_candev(dev); 1126 close_candev(dev);
1120exit_open_fail: 1127exit_open_fail:
1128 c_can_reset_ram(priv, false);
1121 c_can_pm_runtime_put_sync(priv); 1129 c_can_pm_runtime_put_sync(priv);
1122 return err; 1130 return err;
1123} 1131}
@@ -1131,6 +1139,8 @@ static int c_can_close(struct net_device *dev)
1131 c_can_stop(dev); 1139 c_can_stop(dev);
1132 free_irq(dev->irq, dev); 1140 free_irq(dev->irq, dev);
1133 close_candev(dev); 1141 close_candev(dev);
1142
1143 c_can_reset_ram(priv, false);
1134 c_can_pm_runtime_put_sync(priv); 1144 c_can_pm_runtime_put_sync(priv);
1135 1145
1136 return 0; 1146 return 0;
@@ -1188,6 +1198,7 @@ int c_can_power_down(struct net_device *dev)
1188 1198
1189 c_can_stop(dev); 1199 c_can_stop(dev);
1190 1200
1201 c_can_reset_ram(priv, false);
1191 c_can_pm_runtime_put_sync(priv); 1202 c_can_pm_runtime_put_sync(priv);
1192 1203
1193 return 0; 1204 return 0;
@@ -1206,6 +1217,7 @@ int c_can_power_up(struct net_device *dev)
1206 WARN_ON(priv->type != BOSCH_D_CAN); 1217 WARN_ON(priv->type != BOSCH_D_CAN);
1207 1218
1208 c_can_pm_runtime_get_sync(priv); 1219 c_can_pm_runtime_get_sync(priv);
1220 c_can_reset_ram(priv, true);
1209 1221
1210 /* Clear PDR and INIT bits */ 1222 /* Clear PDR and INIT bits */
1211 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG); 1223 val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index e5ed41dafa1b..d2e1c21b143f 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -169,6 +169,9 @@ struct c_can_priv {
169 void *priv; /* for board-specific data */ 169 void *priv; /* for board-specific data */
170 u16 irqstatus; 170 u16 irqstatus;
171 enum c_can_dev_id type; 171 enum c_can_dev_id type;
172 u32 __iomem *raminit_ctrlreg;
173 unsigned int instance;
174 void (*raminit) (const struct c_can_priv *priv, bool enable);
172}; 175};
173 176
174struct net_device *alloc_c_can_dev(void); 177struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 3d7830bcd2bf..b374be7891a2 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -63,8 +63,8 @@ static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
63 writew(val, priv->base + 2 * priv->regs[index]); 63 writew(val, priv->base + 2 * priv->regs[index]);
64} 64}
65 65
66static int __devinit c_can_pci_probe(struct pci_dev *pdev, 66static int c_can_pci_probe(struct pci_dev *pdev,
67 const struct pci_device_id *ent) 67 const struct pci_device_id *ent)
68{ 68{
69 struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data; 69 struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
70 struct c_can_priv *priv; 70 struct c_can_priv *priv;
@@ -174,7 +174,7 @@ out:
174 return ret; 174 return ret;
175} 175}
176 176
177static void __devexit c_can_pci_remove(struct pci_dev *pdev) 177static void c_can_pci_remove(struct pci_dev *pdev)
178{ 178{
179 struct net_device *dev = pci_get_drvdata(pdev); 179 struct net_device *dev = pci_get_drvdata(pdev);
180 struct c_can_priv *priv = netdev_priv(dev); 180 struct c_can_priv *priv = netdev_priv(dev);
@@ -210,7 +210,7 @@ static struct pci_driver c_can_pci_driver = {
210 .name = KBUILD_MODNAME, 210 .name = KBUILD_MODNAME,
211 .id_table = c_can_pci_tbl, 211 .id_table = c_can_pci_tbl,
212 .probe = c_can_pci_probe, 212 .probe = c_can_pci_probe,
213 .remove = __devexit_p(c_can_pci_remove), 213 .remove = c_can_pci_remove,
214}; 214};
215 215
216module_pci_driver(c_can_pci_driver); 216module_pci_driver(c_can_pci_driver);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index ee1416132aba..d63b91904f82 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -38,6 +38,8 @@
38 38
39#include "c_can.h" 39#include "c_can.h"
40 40
41#define CAN_RAMINIT_START_MASK(i) (1 << (i))
42
41/* 43/*
42 * 16-bit c_can registers can be arranged differently in the memory 44 * 16-bit c_can registers can be arranged differently in the memory
43 * architecture of different implementations. For example: 16-bit 45 * architecture of different implementations. For example: 16-bit
@@ -68,6 +70,18 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
68 writew(val, priv->base + 2 * priv->regs[index]); 70 writew(val, priv->base + 2 * priv->regs[index]);
69} 71}
70 72
73static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
74{
75 u32 val;
76
77 val = readl(priv->raminit_ctrlreg);
78 if (enable)
79 val |= CAN_RAMINIT_START_MASK(priv->instance);
80 else
81 val &= ~CAN_RAMINIT_START_MASK(priv->instance);
82 writel(val, priv->raminit_ctrlreg);
83}
84
71static struct platform_device_id c_can_id_table[] = { 85static struct platform_device_id c_can_id_table[] = {
72 [BOSCH_C_CAN_PLATFORM] = { 86 [BOSCH_C_CAN_PLATFORM] = {
73 .name = KBUILD_MODNAME, 87 .name = KBUILD_MODNAME,
@@ -83,14 +97,16 @@ static struct platform_device_id c_can_id_table[] = {
83 }, { 97 }, {
84 } 98 }
85}; 99};
100MODULE_DEVICE_TABLE(platform, c_can_id_table);
86 101
87static const struct of_device_id c_can_of_table[] = { 102static const struct of_device_id c_can_of_table[] = {
88 { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] }, 103 { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] },
89 { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] }, 104 { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] },
90 { /* sentinel */ }, 105 { /* sentinel */ },
91}; 106};
107MODULE_DEVICE_TABLE(of, c_can_of_table);
92 108
93static int __devinit c_can_plat_probe(struct platform_device *pdev) 109static int c_can_plat_probe(struct platform_device *pdev)
94{ 110{
95 int ret; 111 int ret;
96 void __iomem *addr; 112 void __iomem *addr;
@@ -99,7 +115,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
99 const struct of_device_id *match; 115 const struct of_device_id *match;
100 const struct platform_device_id *id; 116 const struct platform_device_id *id;
101 struct pinctrl *pinctrl; 117 struct pinctrl *pinctrl;
102 struct resource *mem; 118 struct resource *mem, *res;
103 int irq; 119 int irq;
104 struct clk *clk; 120 struct clk *clk;
105 121
@@ -178,6 +194,18 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
178 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; 194 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
179 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; 195 priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
180 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; 196 priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
197
198 if (pdev->dev.of_node)
199 priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
200 else
201 priv->instance = pdev->id;
202
203 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
204 priv->raminit_ctrlreg = devm_request_and_ioremap(&pdev->dev, res);
205 if (!priv->raminit_ctrlreg || priv->instance < 0)
206 dev_info(&pdev->dev, "control memory is not used for raminit\n");
207 else
208 priv->raminit = c_can_hw_raminit;
181 break; 209 break;
182 default: 210 default:
183 ret = -EINVAL; 211 ret = -EINVAL;
@@ -220,7 +248,7 @@ exit:
220 return ret; 248 return ret;
221} 249}
222 250
223static int __devexit c_can_plat_remove(struct platform_device *pdev) 251static int c_can_plat_remove(struct platform_device *pdev)
224{ 252{
225 struct net_device *dev = platform_get_drvdata(pdev); 253 struct net_device *dev = platform_get_drvdata(pdev);
226 struct c_can_priv *priv = netdev_priv(dev); 254 struct c_can_priv *priv = netdev_priv(dev);
@@ -306,7 +334,7 @@ static struct platform_driver c_can_plat_driver = {
306 .of_match_table = of_match_ptr(c_can_of_table), 334 .of_match_table = of_match_ptr(c_can_of_table),
307 }, 335 },
308 .probe = c_can_plat_probe, 336 .probe = c_can_plat_probe,
309 .remove = __devexit_p(c_can_plat_remove), 337 .remove = c_can_plat_remove,
310 .suspend = c_can_suspend, 338 .suspend = c_can_suspend,
311 .resume = c_can_resume, 339 .resume = c_can_resume,
312 .id_table = c_can_id_table, 340 .id_table = c_can_id_table,
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 9f3a25ccd665..8eaaac81f320 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -75,12 +75,12 @@ MODULE_LICENSE("GPL v2");
75 75
76static unsigned long port[MAXDEV]; 76static unsigned long port[MAXDEV];
77static unsigned long mem[MAXDEV]; 77static unsigned long mem[MAXDEV];
78static int __devinitdata irq[MAXDEV]; 78static int irq[MAXDEV];
79static int __devinitdata clk[MAXDEV]; 79static int clk[MAXDEV];
80static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 80static u8 cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
81static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 81static u8 cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
82static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 82static u8 bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
83static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; 83static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
84 84
85module_param_array(port, ulong, NULL, S_IRUGO); 85module_param_array(port, ulong, NULL, S_IRUGO);
86MODULE_PARM_DESC(port, "I/O port number"); 86MODULE_PARM_DESC(port, "I/O port number");
@@ -166,7 +166,7 @@ static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
166 spin_unlock_irqrestore(&cc770_isa_port_lock, flags); 166 spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
167} 167}
168 168
169static int __devinit cc770_isa_probe(struct platform_device *pdev) 169static int cc770_isa_probe(struct platform_device *pdev)
170{ 170{
171 struct net_device *dev; 171 struct net_device *dev;
172 struct cc770_priv *priv; 172 struct cc770_priv *priv;
@@ -291,7 +291,7 @@ static int __devinit cc770_isa_probe(struct platform_device *pdev)
291 return err; 291 return err;
292} 292}
293 293
294static int __devexit cc770_isa_remove(struct platform_device *pdev) 294static int cc770_isa_remove(struct platform_device *pdev)
295{ 295{
296 struct net_device *dev = dev_get_drvdata(&pdev->dev); 296 struct net_device *dev = dev_get_drvdata(&pdev->dev);
297 struct cc770_priv *priv = netdev_priv(dev); 297 struct cc770_priv *priv = netdev_priv(dev);
@@ -316,7 +316,7 @@ static int __devexit cc770_isa_remove(struct platform_device *pdev)
316 316
317static struct platform_driver cc770_isa_driver = { 317static struct platform_driver cc770_isa_driver = {
318 .probe = cc770_isa_probe, 318 .probe = cc770_isa_probe,
319 .remove = __devexit_p(cc770_isa_remove), 319 .remove = cc770_isa_remove,
320 .driver = { 320 .driver = {
321 .name = KBUILD_MODNAME, 321 .name = KBUILD_MODNAME,
322 .owner = THIS_MODULE, 322 .owner = THIS_MODULE,
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 688371cda37a..d0f6bfc45aea 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -60,6 +60,7 @@
60MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>"); 60MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
61MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus"); 61MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
62MODULE_LICENSE("GPL v2"); 62MODULE_LICENSE("GPL v2");
63MODULE_ALIAS("platform:" DRV_NAME);
63 64
64#define CC770_PLATFORM_CAN_CLOCK 16000000 65#define CC770_PLATFORM_CAN_CLOCK 16000000
65 66
@@ -74,8 +75,8 @@ static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
74 iowrite8(val, priv->reg_base + reg); 75 iowrite8(val, priv->reg_base + reg);
75} 76}
76 77
77static int __devinit cc770_get_of_node_data(struct platform_device *pdev, 78static int cc770_get_of_node_data(struct platform_device *pdev,
78 struct cc770_priv *priv) 79 struct cc770_priv *priv)
79{ 80{
80 struct device_node *np = pdev->dev.of_node; 81 struct device_node *np = pdev->dev.of_node;
81 const u32 *prop; 82 const u32 *prop;
@@ -147,8 +148,8 @@ static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
147 return 0; 148 return 0;
148} 149}
149 150
150static int __devinit cc770_get_platform_data(struct platform_device *pdev, 151static int cc770_get_platform_data(struct platform_device *pdev,
151 struct cc770_priv *priv) 152 struct cc770_priv *priv)
152{ 153{
153 154
154 struct cc770_platform_data *pdata = pdev->dev.platform_data; 155 struct cc770_platform_data *pdata = pdev->dev.platform_data;
@@ -163,7 +164,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
163 return 0; 164 return 0;
164} 165}
165 166
166static int __devinit cc770_platform_probe(struct platform_device *pdev) 167static int cc770_platform_probe(struct platform_device *pdev)
167{ 168{
168 struct net_device *dev; 169 struct net_device *dev;
169 struct cc770_priv *priv; 170 struct cc770_priv *priv;
@@ -237,7 +238,7 @@ exit_release_mem:
237 return err; 238 return err;
238} 239}
239 240
240static int __devexit cc770_platform_remove(struct platform_device *pdev) 241static int cc770_platform_remove(struct platform_device *pdev)
241{ 242{
242 struct net_device *dev = dev_get_drvdata(&pdev->dev); 243 struct net_device *dev = dev_get_drvdata(&pdev->dev);
243 struct cc770_priv *priv = netdev_priv(dev); 244 struct cc770_priv *priv = netdev_priv(dev);
@@ -253,11 +254,12 @@ static int __devexit cc770_platform_remove(struct platform_device *pdev)
253 return 0; 254 return 0;
254} 255}
255 256
256static struct of_device_id __devinitdata cc770_platform_table[] = { 257static struct of_device_id cc770_platform_table[] = {
257 {.compatible = "bosch,cc770"}, /* CC770 from Bosch */ 258 {.compatible = "bosch,cc770"}, /* CC770 from Bosch */
258 {.compatible = "intc,82527"}, /* AN82527 from Intel CP */ 259 {.compatible = "intc,82527"}, /* AN82527 from Intel CP */
259 {}, 260 {},
260}; 261};
262MODULE_DEVICE_TABLE(of, cc770_platform_table);
261 263
262static struct platform_driver cc770_platform_driver = { 264static struct platform_driver cc770_platform_driver = {
263 .driver = { 265 .driver = {
@@ -266,7 +268,7 @@ static struct platform_driver cc770_platform_driver = {
266 .of_match_table = cc770_platform_table, 268 .of_match_table = cc770_platform_table,
267 }, 269 },
268 .probe = cc770_platform_probe, 270 .probe = cc770_platform_probe,
269 .remove = __devexit_p(cc770_platform_remove), 271 .remove = cc770_platform_remove,
270}; 272};
271 273
272module_platform_driver(cc770_platform_driver); 274module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 963e2ccd10db..8233e5ed2939 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -609,8 +609,7 @@ void close_candev(struct net_device *dev)
609{ 609{
610 struct can_priv *priv = netdev_priv(dev); 610 struct can_priv *priv = netdev_priv(dev);
611 611
612 if (del_timer_sync(&priv->restart_timer)) 612 del_timer_sync(&priv->restart_timer);
613 dev_put(dev);
614 can_flush_echo_skb(dev); 613 can_flush_echo_skb(dev);
615} 614}
616EXPORT_SYMBOL_GPL(close_candev); 615EXPORT_SYMBOL_GPL(close_candev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index a412bf6d73ef..0289a6d86f66 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -922,7 +922,7 @@ static const struct net_device_ops flexcan_netdev_ops = {
922 .ndo_start_xmit = flexcan_start_xmit, 922 .ndo_start_xmit = flexcan_start_xmit,
923}; 923};
924 924
925static int __devinit register_flexcandev(struct net_device *dev) 925static int register_flexcandev(struct net_device *dev)
926{ 926{
927 struct flexcan_priv *priv = netdev_priv(dev); 927 struct flexcan_priv *priv = netdev_priv(dev);
928 struct flexcan_regs __iomem *regs = priv->base; 928 struct flexcan_regs __iomem *regs = priv->base;
@@ -968,7 +968,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
968 return err; 968 return err;
969} 969}
970 970
971static void __devexit unregister_flexcandev(struct net_device *dev) 971static void unregister_flexcandev(struct net_device *dev)
972{ 972{
973 unregister_candev(dev); 973 unregister_candev(dev);
974} 974}
@@ -979,13 +979,15 @@ static const struct of_device_id flexcan_of_match[] = {
979 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, 979 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
980 { /* sentinel */ }, 980 { /* sentinel */ },
981}; 981};
982MODULE_DEVICE_TABLE(of, flexcan_of_match);
982 983
983static const struct platform_device_id flexcan_id_table[] = { 984static const struct platform_device_id flexcan_id_table[] = {
984 { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, }, 985 { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
985 { /* sentinel */ }, 986 { /* sentinel */ },
986}; 987};
988MODULE_DEVICE_TABLE(platform, flexcan_id_table);
987 989
988static int __devinit flexcan_probe(struct platform_device *pdev) 990static int flexcan_probe(struct platform_device *pdev)
989{ 991{
990 const struct of_device_id *of_id; 992 const struct of_device_id *of_id;
991 const struct flexcan_devtype_data *devtype_data; 993 const struct flexcan_devtype_data *devtype_data;
@@ -1107,7 +1109,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
1107 return err; 1109 return err;
1108} 1110}
1109 1111
1110static int __devexit flexcan_remove(struct platform_device *pdev) 1112static int flexcan_remove(struct platform_device *pdev)
1111{ 1113{
1112 struct net_device *dev = platform_get_drvdata(pdev); 1114 struct net_device *dev = platform_get_drvdata(pdev);
1113 struct flexcan_priv *priv = netdev_priv(dev); 1115 struct flexcan_priv *priv = netdev_priv(dev);
@@ -1168,7 +1170,7 @@ static struct platform_driver flexcan_driver = {
1168 .of_match_table = flexcan_of_match, 1170 .of_match_table = flexcan_of_match,
1169 }, 1171 },
1170 .probe = flexcan_probe, 1172 .probe = flexcan_probe,
1171 .remove = __devexit_p(flexcan_remove), 1173 .remove = flexcan_remove,
1172 .suspend = flexcan_suspend, 1174 .suspend = flexcan_suspend,
1173 .resume = flexcan_resume, 1175 .resume = flexcan_resume,
1174 .id_table = flexcan_id_table, 1176 .id_table = flexcan_id_table,
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
new file mode 100644
index 000000000000..17fbc7a09224
--- /dev/null
+++ b/drivers/net/can/grcan.c
@@ -0,0 +1,1756 @@
1/*
2 * Socket CAN driver for Aeroflex Gaisler GRCAN and GRHCAN.
3 *
4 * 2012 (c) Aeroflex Gaisler AB
5 *
6 * This driver supports GRCAN and GRHCAN CAN controllers available in the GRLIB
7 * VHDL IP core library.
8 *
9 * Full documentation of the GRCAN core can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
11 *
12 * See "Documentation/devicetree/bindings/net/can/grcan.txt" for information on
13 * open firmware properties.
14 *
15 * See "Documentation/ABI/testing/sysfs-class-net-grcan" for information on the
16 * sysfs interface.
17 *
18 * See "Documentation/kernel-parameters.txt" for information on the module
19 * parameters.
20 *
21 * This program is free software; you can redistribute it and/or modify it
22 * under the terms of the GNU General Public License as published by the
23 * Free Software Foundation; either version 2 of the License, or (at your
24 * option) any later version.
25 *
26 * Contributors: Andreas Larsson <andreas@gaisler.com>
27 */
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/interrupt.h>
32#include <linux/netdevice.h>
33#include <linux/delay.h>
34#include <linux/io.h>
35#include <linux/can/dev.h>
36#include <linux/spinlock.h>
37
38#include <linux/of_platform.h>
39#include <asm/prom.h>
40
41#include <linux/of_irq.h>
42
43#include <linux/dma-mapping.h>
44
45#define DRV_NAME "grcan"
46
47#define GRCAN_NAPI_WEIGHT 32
48
49#define GRCAN_RESERVE_SIZE(slot1, slot2) (((slot2) - (slot1)) / 4 - 1)
50
51struct grcan_registers {
52 u32 conf; /* 0x00 */
53 u32 stat; /* 0x04 */
54 u32 ctrl; /* 0x08 */
55 u32 __reserved1[GRCAN_RESERVE_SIZE(0x08, 0x18)];
56 u32 smask; /* 0x18 - CanMASK */
57 u32 scode; /* 0x1c - CanCODE */
58 u32 __reserved2[GRCAN_RESERVE_SIZE(0x1c, 0x100)];
59 u32 pimsr; /* 0x100 */
60 u32 pimr; /* 0x104 */
61 u32 pisr; /* 0x108 */
62 u32 pir; /* 0x10C */
63 u32 imr; /* 0x110 */
64 u32 picr; /* 0x114 */
65 u32 __reserved3[GRCAN_RESERVE_SIZE(0x114, 0x200)];
66 u32 txctrl; /* 0x200 */
67 u32 txaddr; /* 0x204 */
68 u32 txsize; /* 0x208 */
69 u32 txwr; /* 0x20C */
70 u32 txrd; /* 0x210 */
71 u32 txirq; /* 0x214 */
72 u32 __reserved4[GRCAN_RESERVE_SIZE(0x214, 0x300)];
73 u32 rxctrl; /* 0x300 */
74 u32 rxaddr; /* 0x304 */
75 u32 rxsize; /* 0x308 */
76 u32 rxwr; /* 0x30C */
77 u32 rxrd; /* 0x310 */
78 u32 rxirq; /* 0x314 */
79 u32 rxmask; /* 0x318 */
80 u32 rxcode; /* 0x31C */
81};
82
83#define GRCAN_CONF_ABORT 0x00000001
84#define GRCAN_CONF_ENABLE0 0x00000002
85#define GRCAN_CONF_ENABLE1 0x00000004
86#define GRCAN_CONF_SELECT 0x00000008
87#define GRCAN_CONF_SILENT 0x00000010
88#define GRCAN_CONF_SAM 0x00000020 /* Available in some hardware */
89#define GRCAN_CONF_BPR 0x00000300 /* Note: not BRP */
90#define GRCAN_CONF_RSJ 0x00007000
91#define GRCAN_CONF_PS1 0x00f00000
92#define GRCAN_CONF_PS2 0x000f0000
93#define GRCAN_CONF_SCALER 0xff000000
94#define GRCAN_CONF_OPERATION \
95 (GRCAN_CONF_ABORT | GRCAN_CONF_ENABLE0 | GRCAN_CONF_ENABLE1 \
96 | GRCAN_CONF_SELECT | GRCAN_CONF_SILENT | GRCAN_CONF_SAM)
97#define GRCAN_CONF_TIMING \
98 (GRCAN_CONF_BPR | GRCAN_CONF_RSJ | GRCAN_CONF_PS1 \
99 | GRCAN_CONF_PS2 | GRCAN_CONF_SCALER)
100
101#define GRCAN_CONF_RSJ_MIN 1
102#define GRCAN_CONF_RSJ_MAX 4
103#define GRCAN_CONF_PS1_MIN 1
104#define GRCAN_CONF_PS1_MAX 15
105#define GRCAN_CONF_PS2_MIN 2
106#define GRCAN_CONF_PS2_MAX 8
107#define GRCAN_CONF_SCALER_MIN 0
108#define GRCAN_CONF_SCALER_MAX 255
109#define GRCAN_CONF_SCALER_INC 1
110
111#define GRCAN_CONF_BPR_BIT 8
112#define GRCAN_CONF_RSJ_BIT 12
113#define GRCAN_CONF_PS1_BIT 20
114#define GRCAN_CONF_PS2_BIT 16
115#define GRCAN_CONF_SCALER_BIT 24
116
117#define GRCAN_STAT_PASS 0x000001
118#define GRCAN_STAT_OFF 0x000002
119#define GRCAN_STAT_OR 0x000004
120#define GRCAN_STAT_AHBERR 0x000008
121#define GRCAN_STAT_ACTIVE 0x000010
122#define GRCAN_STAT_RXERRCNT 0x00ff00
123#define GRCAN_STAT_TXERRCNT 0xff0000
124
125#define GRCAN_STAT_ERRCTR_RELATED (GRCAN_STAT_PASS | GRCAN_STAT_OFF)
126
127#define GRCAN_STAT_RXERRCNT_BIT 8
128#define GRCAN_STAT_TXERRCNT_BIT 16
129
130#define GRCAN_STAT_ERRCNT_WARNING_LIMIT 96
131#define GRCAN_STAT_ERRCNT_PASSIVE_LIMIT 127
132
133#define GRCAN_CTRL_RESET 0x2
134#define GRCAN_CTRL_ENABLE 0x1
135
136#define GRCAN_TXCTRL_ENABLE 0x1
137#define GRCAN_TXCTRL_ONGOING 0x2
138#define GRCAN_TXCTRL_SINGLE 0x4
139
140#define GRCAN_RXCTRL_ENABLE 0x1
141#define GRCAN_RXCTRL_ONGOING 0x2
142
143/* Relative offset of IRQ sources to AMBA Plug&Play */
144#define GRCAN_IRQIX_IRQ 0
145#define GRCAN_IRQIX_TXSYNC 1
146#define GRCAN_IRQIX_RXSYNC 2
147
148#define GRCAN_IRQ_PASS 0x00001
149#define GRCAN_IRQ_OFF 0x00002
150#define GRCAN_IRQ_OR 0x00004
151#define GRCAN_IRQ_RXAHBERR 0x00008
152#define GRCAN_IRQ_TXAHBERR 0x00010
153#define GRCAN_IRQ_RXIRQ 0x00020
154#define GRCAN_IRQ_TXIRQ 0x00040
155#define GRCAN_IRQ_RXFULL 0x00080
156#define GRCAN_IRQ_TXEMPTY 0x00100
157#define GRCAN_IRQ_RX 0x00200
158#define GRCAN_IRQ_TX 0x00400
159#define GRCAN_IRQ_RXSYNC 0x00800
160#define GRCAN_IRQ_TXSYNC 0x01000
161#define GRCAN_IRQ_RXERRCTR 0x02000
162#define GRCAN_IRQ_TXERRCTR 0x04000
163#define GRCAN_IRQ_RXMISS 0x08000
164#define GRCAN_IRQ_TXLOSS 0x10000
165
166#define GRCAN_IRQ_NONE 0
167#define GRCAN_IRQ_ALL \
168 (GRCAN_IRQ_PASS | GRCAN_IRQ_OFF | GRCAN_IRQ_OR \
169 | GRCAN_IRQ_RXAHBERR | GRCAN_IRQ_TXAHBERR \
170 | GRCAN_IRQ_RXIRQ | GRCAN_IRQ_TXIRQ \
171 | GRCAN_IRQ_RXFULL | GRCAN_IRQ_TXEMPTY \
172 | GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_RXSYNC \
173 | GRCAN_IRQ_TXSYNC | GRCAN_IRQ_RXERRCTR \
174 | GRCAN_IRQ_TXERRCTR | GRCAN_IRQ_RXMISS \
175 | GRCAN_IRQ_TXLOSS)
176
177#define GRCAN_IRQ_ERRCTR_RELATED (GRCAN_IRQ_RXERRCTR | GRCAN_IRQ_TXERRCTR \
178 | GRCAN_IRQ_PASS | GRCAN_IRQ_OFF)
179#define GRCAN_IRQ_ERRORS (GRCAN_IRQ_ERRCTR_RELATED | GRCAN_IRQ_OR \
180 | GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR \
181 | GRCAN_IRQ_TXLOSS)
182#define GRCAN_IRQ_DEFAULT (GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_ERRORS)
183
184#define GRCAN_MSG_SIZE 16
185
186#define GRCAN_MSG_IDE 0x80000000
187#define GRCAN_MSG_RTR 0x40000000
188#define GRCAN_MSG_BID 0x1ffc0000
189#define GRCAN_MSG_EID 0x1fffffff
190#define GRCAN_MSG_IDE_BIT 31
191#define GRCAN_MSG_RTR_BIT 30
192#define GRCAN_MSG_BID_BIT 18
193#define GRCAN_MSG_EID_BIT 0
194
195#define GRCAN_MSG_DLC 0xf0000000
196#define GRCAN_MSG_TXERRC 0x00ff0000
197#define GRCAN_MSG_RXERRC 0x0000ff00
198#define GRCAN_MSG_DLC_BIT 28
199#define GRCAN_MSG_TXERRC_BIT 16
200#define GRCAN_MSG_RXERRC_BIT 8
201#define GRCAN_MSG_AHBERR 0x00000008
202#define GRCAN_MSG_OR 0x00000004
203#define GRCAN_MSG_OFF 0x00000002
204#define GRCAN_MSG_PASS 0x00000001
205
206#define GRCAN_MSG_DATA_SLOT_INDEX(i) (2 + (i) / 4)
207#define GRCAN_MSG_DATA_SHIFT(i) ((3 - (i) % 4) * 8)
208
209#define GRCAN_BUFFER_ALIGNMENT 1024
210#define GRCAN_DEFAULT_BUFFER_SIZE 1024
211#define GRCAN_VALID_TR_SIZE_MASK 0x001fffc0
212
213#define GRCAN_INVALID_BUFFER_SIZE(s) \
214 ((s) == 0 || ((s) & ~GRCAN_VALID_TR_SIZE_MASK))
215
216#if GRCAN_INVALID_BUFFER_SIZE(GRCAN_DEFAULT_BUFFER_SIZE)
217#error "Invalid default buffer size"
218#endif
219
220struct grcan_dma_buffer {
221 size_t size;
222 void *buf;
223 dma_addr_t handle;
224};
225
226struct grcan_dma {
227 size_t base_size;
228 void *base_buf;
229 dma_addr_t base_handle;
230 struct grcan_dma_buffer tx;
231 struct grcan_dma_buffer rx;
232};
233
234/* GRCAN configuration parameters */
235struct grcan_device_config {
236 unsigned short enable0;
237 unsigned short enable1;
238 unsigned short select;
239 unsigned int txsize;
240 unsigned int rxsize;
241};
242
243#define GRCAN_DEFAULT_DEVICE_CONFIG { \
244 .enable0 = 0, \
245 .enable1 = 0, \
246 .select = 0, \
247 .txsize = GRCAN_DEFAULT_BUFFER_SIZE, \
248 .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
249 }
250
251#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
252#define GRLIB_VERSION_MASK 0xffff
253
254/* GRCAN private data structure */
255struct grcan_priv {
256 struct can_priv can; /* must be the first member */
257 struct net_device *dev;
258 struct napi_struct napi;
259
260 struct grcan_registers __iomem *regs; /* ioremap'ed registers */
261 struct grcan_device_config config;
262 struct grcan_dma dma;
263
264 struct sk_buff **echo_skb; /* We allocate this on our own */
265 u8 *txdlc; /* Length of queued frames */
266
267 /* The echo skb pointer, pointing into echo_skb and indicating which
268 * frames can be echoed back. See the "Notes on the tx cyclic buffer
269 * handling"-comment for grcan_start_xmit for more details.
270 */
271 u32 eskbp;
272
273 /* Lock for controlling changes to the netif tx queue state, accesses to
274 * the echo_skb pointer eskbp and for making sure that a running reset
275 * and/or a close of the interface is done without interference from
276 * other parts of the code.
277 *
278 * The echo_skb pointer, eskbp, should only be accessed under this lock
279 * as it can be changed in several places and together with decisions on
280 * whether to wake up the tx queue.
281 *
282 * The tx queue must never be woken up if there is a running reset or
283 * close in progress.
284 *
285 * A running reset (see below on need_txbug_workaround) should never be
286 * done if the interface is closing down and several running resets
287 * should never be scheduled simultaneously.
288 */
289 spinlock_t lock;
290
291 /* Whether a workaround is needed due to a bug in older hardware. In
292 * this case, the driver both tries to prevent the bug from being
293 * triggered and recovers, if the bug nevertheless happens, by doing a
294 * running reset. A running reset, resets the device and continues from
295 * where it were without being noticeable from outside the driver (apart
296 * from slight delays).
297 */
298 bool need_txbug_workaround;
299
300 /* To trigger initization of running reset and to trigger running reset
301 * respectively in the case of a hanged device due to a txbug.
302 */
303 struct timer_list hang_timer;
304 struct timer_list rr_timer;
305
306 /* To avoid waking up the netif queue and restarting timers
307 * when a reset is scheduled or when closing of the device is
308 * undergoing
309 */
310 bool resetting;
311 bool closing;
312};
313
314/* Wait time for a short wait for ongoing to clear */
315#define GRCAN_SHORTWAIT_USECS 10
316
317/* Limit on the number of transmitted bits of an eff frame according to the CAN
318 * specification: 1 bit start of frame, 32 bits arbitration field, 6 bits
319 * control field, 8 bytes data field, 16 bits crc field, 2 bits ACK field and 7
320 * bits end of frame
321 */
322#define GRCAN_EFF_FRAME_MAX_BITS (1+32+6+8*8+16+2+7)
323
324#if defined(__BIG_ENDIAN)
325static inline u32 grcan_read_reg(u32 __iomem *reg)
326{
327 return ioread32be(reg);
328}
329
330static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
331{
332 iowrite32be(val, reg);
333}
334#else
335static inline u32 grcan_read_reg(u32 __iomem *reg)
336{
337 return ioread32(reg);
338}
339
340static inline void grcan_write_reg(u32 __iomem *reg, u32 val)
341{
342 iowrite32(val, reg);
343}
344#endif
345
346static inline void grcan_clear_bits(u32 __iomem *reg, u32 mask)
347{
348 grcan_write_reg(reg, grcan_read_reg(reg) & ~mask);
349}
350
351static inline void grcan_set_bits(u32 __iomem *reg, u32 mask)
352{
353 grcan_write_reg(reg, grcan_read_reg(reg) | mask);
354}
355
356static inline u32 grcan_read_bits(u32 __iomem *reg, u32 mask)
357{
358 return grcan_read_reg(reg) & mask;
359}
360
361static inline void grcan_write_bits(u32 __iomem *reg, u32 value, u32 mask)
362{
363 u32 old = grcan_read_reg(reg);
364
365 grcan_write_reg(reg, (old & ~mask) | (value & mask));
366}
367
368/* a and b should both be in [0,size] and a == b == size should not hold */
369static inline u32 grcan_ring_add(u32 a, u32 b, u32 size)
370{
371 u32 sum = a + b;
372
373 if (sum < size)
374 return sum;
375 else
376 return sum - size;
377}
378
379/* a and b should both be in [0,size) */
380static inline u32 grcan_ring_sub(u32 a, u32 b, u32 size)
381{
382 return grcan_ring_add(a, size - b, size);
383}
384
385/* Available slots for new transmissions */
386static inline u32 grcan_txspace(size_t txsize, u32 txwr, u32 eskbp)
387{
388 u32 slots = txsize / GRCAN_MSG_SIZE - 1;
389 u32 used = grcan_ring_sub(txwr, eskbp, txsize) / GRCAN_MSG_SIZE;
390
391 return slots - used;
392}
393
394/* Configuration parameters that can be set via module parameters */
395static struct grcan_device_config grcan_module_config =
396 GRCAN_DEFAULT_DEVICE_CONFIG;
397
398static const struct can_bittiming_const grcan_bittiming_const = {
399 .name = DRV_NAME,
400 .tseg1_min = GRCAN_CONF_PS1_MIN + 1,
401 .tseg1_max = GRCAN_CONF_PS1_MAX + 1,
402 .tseg2_min = GRCAN_CONF_PS2_MIN,
403 .tseg2_max = GRCAN_CONF_PS2_MAX,
404 .sjw_max = GRCAN_CONF_RSJ_MAX,
405 .brp_min = GRCAN_CONF_SCALER_MIN + 1,
406 .brp_max = GRCAN_CONF_SCALER_MAX + 1,
407 .brp_inc = GRCAN_CONF_SCALER_INC,
408};
409
410static int grcan_set_bittiming(struct net_device *dev)
411{
412 struct grcan_priv *priv = netdev_priv(dev);
413 struct grcan_registers __iomem *regs = priv->regs;
414 struct can_bittiming *bt = &priv->can.bittiming;
415 u32 timing = 0;
416 int bpr, rsj, ps1, ps2, scaler;
417
418 /* Should never happen - function will not be called when
419 * device is up
420 */
421 if (grcan_read_bits(&regs->ctrl, GRCAN_CTRL_ENABLE))
422 return -EBUSY;
423
424 bpr = 0; /* Note bpr and brp are different concepts */
425 rsj = bt->sjw;
426 ps1 = (bt->prop_seg + bt->phase_seg1) - 1; /* tseg1 - 1 */
427 ps2 = bt->phase_seg2;
428 scaler = (bt->brp - 1);
429 netdev_dbg(dev, "Request for BPR=%d, RSJ=%d, PS1=%d, PS2=%d, SCALER=%d",
430 bpr, rsj, ps1, ps2, scaler);
431 if (!(ps1 > ps2)) {
432 netdev_err(dev, "PS1 > PS2 must hold: PS1=%d, PS2=%d\n",
433 ps1, ps2);
434 return -EINVAL;
435 }
436 if (!(ps2 >= rsj)) {
437 netdev_err(dev, "PS2 >= RSJ must hold: PS2=%d, RSJ=%d\n",
438 ps2, rsj);
439 return -EINVAL;
440 }
441
442 timing |= (bpr << GRCAN_CONF_BPR_BIT) & GRCAN_CONF_BPR;
443 timing |= (rsj << GRCAN_CONF_RSJ_BIT) & GRCAN_CONF_RSJ;
444 timing |= (ps1 << GRCAN_CONF_PS1_BIT) & GRCAN_CONF_PS1;
445 timing |= (ps2 << GRCAN_CONF_PS2_BIT) & GRCAN_CONF_PS2;
446 timing |= (scaler << GRCAN_CONF_SCALER_BIT) & GRCAN_CONF_SCALER;
447 netdev_info(dev, "setting timing=0x%x\n", timing);
448 grcan_write_bits(&regs->conf, timing, GRCAN_CONF_TIMING);
449
450 return 0;
451}
452
453static int grcan_get_berr_counter(const struct net_device *dev,
454 struct can_berr_counter *bec)
455{
456 struct grcan_priv *priv = netdev_priv(dev);
457 struct grcan_registers __iomem *regs = priv->regs;
458 u32 status = grcan_read_reg(&regs->stat);
459
460 bec->txerr = (status & GRCAN_STAT_TXERRCNT) >> GRCAN_STAT_TXERRCNT_BIT;
461 bec->rxerr = (status & GRCAN_STAT_RXERRCNT) >> GRCAN_STAT_RXERRCNT_BIT;
462 return 0;
463}
464
465static int grcan_poll(struct napi_struct *napi, int budget);
466
467/* Reset device, but keep configuration information */
468static void grcan_reset(struct net_device *dev)
469{
470 struct grcan_priv *priv = netdev_priv(dev);
471 struct grcan_registers __iomem *regs = priv->regs;
472 u32 config = grcan_read_reg(&regs->conf);
473
474 grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET);
475 grcan_write_reg(&regs->conf, config);
476
477 priv->eskbp = grcan_read_reg(&regs->txrd);
478 priv->can.state = CAN_STATE_STOPPED;
479
480 /* Turn off hardware filtering - regs->rxcode set to 0 by reset */
481 grcan_write_reg(&regs->rxmask, 0);
482}
483
484/* stop device without changing any configurations */
485static void grcan_stop_hardware(struct net_device *dev)
486{
487 struct grcan_priv *priv = netdev_priv(dev);
488 struct grcan_registers __iomem *regs = priv->regs;
489
490 grcan_write_reg(&regs->imr, GRCAN_IRQ_NONE);
491 grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
492 grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
493 grcan_clear_bits(&regs->ctrl, GRCAN_CTRL_ENABLE);
494}
495
496/* Let priv->eskbp catch up to regs->txrd and echo back the skbs if echo
497 * is true and free them otherwise.
498 *
499 * If budget is >= 0, stop after handling at most budget skbs. Otherwise,
500 * continue until priv->eskbp catches up to regs->txrd.
501 *
502 * priv->lock *must* be held when calling this function
503 */
504static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
505{
506 struct grcan_priv *priv = netdev_priv(dev);
507 struct grcan_registers __iomem *regs = priv->regs;
508 struct grcan_dma *dma = &priv->dma;
509 struct net_device_stats *stats = &dev->stats;
510 int i, work_done;
511
512 /* Updates to priv->eskbp and wake-ups of the queue needs to
513 * be atomic towards the reads of priv->eskbp and shut-downs
514 * of the queue in grcan_start_xmit.
515 */
516 u32 txrd = grcan_read_reg(&regs->txrd);
517
518 for (work_done = 0; work_done < budget || budget < 0; work_done++) {
519 if (priv->eskbp == txrd)
520 break;
521 i = priv->eskbp / GRCAN_MSG_SIZE;
522 if (echo) {
523 /* Normal echo of messages */
524 stats->tx_packets++;
525 stats->tx_bytes += priv->txdlc[i];
526 priv->txdlc[i] = 0;
527 can_get_echo_skb(dev, i);
528 } else {
529 /* For cleanup of untransmitted messages */
530 can_free_echo_skb(dev, i);
531 }
532
533 priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE,
534 dma->tx.size);
535 txrd = grcan_read_reg(&regs->txrd);
536 }
537 return work_done;
538}
539
540static void grcan_lost_one_shot_frame(struct net_device *dev)
541{
542 struct grcan_priv *priv = netdev_priv(dev);
543 struct grcan_registers __iomem *regs = priv->regs;
544 struct grcan_dma *dma = &priv->dma;
545 u32 txrd;
546 unsigned long flags;
547
548 spin_lock_irqsave(&priv->lock, flags);
549
550 catch_up_echo_skb(dev, -1, true);
551
552 if (unlikely(grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE))) {
553 /* Should never happen */
554 netdev_err(dev, "TXCTRL enabled at TXLOSS in one shot mode\n");
555 } else {
556 /* By the time an GRCAN_IRQ_TXLOSS is generated in
557 * one-shot mode there is no problem in writing
558 * to TXRD even in versions of the hardware in
559 * which GRCAN_TXCTRL_ONGOING is not cleared properly
560 * in one-shot mode.
561 */
562
563 /* Skip message and discard echo-skb */
564 txrd = grcan_read_reg(&regs->txrd);
565 txrd = grcan_ring_add(txrd, GRCAN_MSG_SIZE, dma->tx.size);
566 grcan_write_reg(&regs->txrd, txrd);
567 catch_up_echo_skb(dev, -1, false);
568
569 if (!priv->resetting && !priv->closing &&
570 !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) {
571 netif_wake_queue(dev);
572 grcan_set_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
573 }
574 }
575
576 spin_unlock_irqrestore(&priv->lock, flags);
577}
578
579static void grcan_err(struct net_device *dev, u32 sources, u32 status)
580{
581 struct grcan_priv *priv = netdev_priv(dev);
582 struct grcan_registers __iomem *regs = priv->regs;
583 struct grcan_dma *dma = &priv->dma;
584 struct net_device_stats *stats = &dev->stats;
585 struct can_frame cf;
586
587 /* Zero potential error_frame */
588 memset(&cf, 0, sizeof(cf));
589
590 /* Message lost interrupt. This might be due to arbitration error, but
591 * is also triggered when there is no one else on the can bus or when
592 * there is a problem with the hardware interface or the bus itself. As
593 * arbitration errors can not be singled out, no error frames are
594 * generated reporting this event as an arbitration error.
595 */
596 if (sources & GRCAN_IRQ_TXLOSS) {
597 /* Take care of failed one-shot transmit */
598 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
599 grcan_lost_one_shot_frame(dev);
600
601 /* Stop printing as soon as error passive or bus off is in
602 * effect to limit the amount of txloss debug printouts.
603 */
604 if (!(status & GRCAN_STAT_ERRCTR_RELATED)) {
605 netdev_dbg(dev, "tx message lost\n");
606 stats->tx_errors++;
607 }
608 }
609
610 /* Conditions dealing with the error counters. There is no interrupt for
611 * error warning, but there are interrupts for increases of the error
612 * counters.
613 */
614 if ((sources & GRCAN_IRQ_ERRCTR_RELATED) ||
615 (status & GRCAN_STAT_ERRCTR_RELATED)) {
616 enum can_state state = priv->can.state;
617 enum can_state oldstate = state;
618 u32 txerr = (status & GRCAN_STAT_TXERRCNT)
619 >> GRCAN_STAT_TXERRCNT_BIT;
620 u32 rxerr = (status & GRCAN_STAT_RXERRCNT)
621 >> GRCAN_STAT_RXERRCNT_BIT;
622
623 /* Figure out current state */
624 if (status & GRCAN_STAT_OFF) {
625 state = CAN_STATE_BUS_OFF;
626 } else if (status & GRCAN_STAT_PASS) {
627 state = CAN_STATE_ERROR_PASSIVE;
628 } else if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT ||
629 rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT) {
630 state = CAN_STATE_ERROR_WARNING;
631 } else {
632 state = CAN_STATE_ERROR_ACTIVE;
633 }
634
635 /* Handle and report state changes */
636 if (state != oldstate) {
637 switch (state) {
638 case CAN_STATE_BUS_OFF:
639 netdev_dbg(dev, "bus-off\n");
640 netif_carrier_off(dev);
641 priv->can.can_stats.bus_off++;
642
643 /* Prevent the hardware from recovering from bus
644 * off on its own if restart is disabled.
645 */
646 if (!priv->can.restart_ms)
647 grcan_stop_hardware(dev);
648
649 cf.can_id |= CAN_ERR_BUSOFF;
650 break;
651
652 case CAN_STATE_ERROR_PASSIVE:
653 netdev_dbg(dev, "Error passive condition\n");
654 priv->can.can_stats.error_passive++;
655
656 cf.can_id |= CAN_ERR_CRTL;
657 if (txerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
658 cf.data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
659 if (rxerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT)
660 cf.data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
661 break;
662
663 case CAN_STATE_ERROR_WARNING:
664 netdev_dbg(dev, "Error warning condition\n");
665 priv->can.can_stats.error_warning++;
666
667 cf.can_id |= CAN_ERR_CRTL;
668 if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
669 cf.data[1] |= CAN_ERR_CRTL_TX_WARNING;
670 if (rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT)
671 cf.data[1] |= CAN_ERR_CRTL_RX_WARNING;
672 break;
673
674 case CAN_STATE_ERROR_ACTIVE:
675 netdev_dbg(dev, "Error active condition\n");
676 cf.can_id |= CAN_ERR_CRTL;
677 break;
678
679 default:
680 /* There are no others at this point */
681 break;
682 }
683 cf.data[6] = txerr;
684 cf.data[7] = rxerr;
685 priv->can.state = state;
686 }
687
688 /* Report automatic restarts */
689 if (priv->can.restart_ms && oldstate == CAN_STATE_BUS_OFF) {
690 unsigned long flags;
691
692 cf.can_id |= CAN_ERR_RESTARTED;
693 netdev_dbg(dev, "restarted\n");
694 priv->can.can_stats.restarts++;
695 netif_carrier_on(dev);
696
697 spin_lock_irqsave(&priv->lock, flags);
698
699 if (!priv->resetting && !priv->closing) {
700 u32 txwr = grcan_read_reg(&regs->txwr);
701
702 if (grcan_txspace(dma->tx.size, txwr,
703 priv->eskbp))
704 netif_wake_queue(dev);
705 }
706
707 spin_unlock_irqrestore(&priv->lock, flags);
708 }
709 }
710
711 /* Data overrun interrupt */
712 if ((sources & GRCAN_IRQ_OR) || (status & GRCAN_STAT_OR)) {
713 netdev_dbg(dev, "got data overrun interrupt\n");
714 stats->rx_over_errors++;
715 stats->rx_errors++;
716
717 cf.can_id |= CAN_ERR_CRTL;
718 cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
719 }
720
721 /* AHB bus error interrupts (not CAN bus errors) - shut down the
722 * device.
723 */
724 if (sources & (GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR) ||
725 (status & GRCAN_STAT_AHBERR)) {
726 char *txrx = "";
727 unsigned long flags;
728
729 if (sources & GRCAN_IRQ_TXAHBERR) {
730 txrx = "on tx ";
731 stats->tx_errors++;
732 } else if (sources & GRCAN_IRQ_RXAHBERR) {
733 txrx = "on rx ";
734 stats->rx_errors++;
735 }
736 netdev_err(dev, "Fatal AHB buss error %s- halting device\n",
737 txrx);
738
739 spin_lock_irqsave(&priv->lock, flags);
740
741 /* Prevent anything to be enabled again and halt device */
742 priv->closing = true;
743 netif_stop_queue(dev);
744 grcan_stop_hardware(dev);
745 priv->can.state = CAN_STATE_STOPPED;
746
747 spin_unlock_irqrestore(&priv->lock, flags);
748 }
749
750 /* Pass on error frame if something to report,
751 * i.e. id contains some information
752 */
753 if (cf.can_id) {
754 struct can_frame *skb_cf;
755 struct sk_buff *skb = alloc_can_err_skb(dev, &skb_cf);
756
757 if (skb == NULL) {
758 netdev_dbg(dev, "could not allocate error frame\n");
759 return;
760 }
761 skb_cf->can_id |= cf.can_id;
762 memcpy(skb_cf->data, cf.data, sizeof(cf.data));
763
764 netif_rx(skb);
765 }
766}
767
768static irqreturn_t grcan_interrupt(int irq, void *dev_id)
769{
770 struct net_device *dev = dev_id;
771 struct grcan_priv *priv = netdev_priv(dev);
772 struct grcan_registers __iomem *regs = priv->regs;
773 u32 sources, status;
774
775 /* Find out the source */
776 sources = grcan_read_reg(&regs->pimsr);
777 if (!sources)
778 return IRQ_NONE;
779 grcan_write_reg(&regs->picr, sources);
780 status = grcan_read_reg(&regs->stat);
781
782 /* If we got TX progress, the device has not hanged,
783 * so disable the hang timer
784 */
785 if (priv->need_txbug_workaround &&
786 (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) {
787 del_timer(&priv->hang_timer);
788 }
789
790 /* Frame(s) received or transmitted */
791 if (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_RX)) {
792 /* Disable tx/rx interrupts and schedule poll(). No need for
793 * locking as interference from a running reset at worst leads
794 * to an extra interrupt.
795 */
796 grcan_clear_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
797 napi_schedule(&priv->napi);
798 }
799
800 /* (Potential) error conditions to take care of */
801 if (sources & GRCAN_IRQ_ERRORS)
802 grcan_err(dev, sources, status);
803
804 return IRQ_HANDLED;
805}
806
807/* Reset device and restart operations from where they were.
808 *
809 * This assumes that RXCTRL & RXCTRL is properly disabled and that RX
810 * is not ONGOING (TX might be stuck in ONGOING due to a harwrware bug
811 * for single shot)
812 */
813static void grcan_running_reset(unsigned long data)
814{
815 struct net_device *dev = (struct net_device *)data;
816 struct grcan_priv *priv = netdev_priv(dev);
817 struct grcan_registers __iomem *regs = priv->regs;
818 unsigned long flags;
819
820 /* This temporarily messes with eskbp, so we need to lock
821 * priv->lock
822 */
823 spin_lock_irqsave(&priv->lock, flags);
824
825 priv->resetting = false;
826 del_timer(&priv->hang_timer);
827 del_timer(&priv->rr_timer);
828
829 if (!priv->closing) {
830 /* Save and reset - config register preserved by grcan_reset */
831 u32 imr = grcan_read_reg(&regs->imr);
832
833 u32 txaddr = grcan_read_reg(&regs->txaddr);
834 u32 txsize = grcan_read_reg(&regs->txsize);
835 u32 txwr = grcan_read_reg(&regs->txwr);
836 u32 txrd = grcan_read_reg(&regs->txrd);
837 u32 eskbp = priv->eskbp;
838
839 u32 rxaddr = grcan_read_reg(&regs->rxaddr);
840 u32 rxsize = grcan_read_reg(&regs->rxsize);
841 u32 rxwr = grcan_read_reg(&regs->rxwr);
842 u32 rxrd = grcan_read_reg(&regs->rxrd);
843
844 grcan_reset(dev);
845
846 /* Restore */
847 grcan_write_reg(&regs->txaddr, txaddr);
848 grcan_write_reg(&regs->txsize, txsize);
849 grcan_write_reg(&regs->txwr, txwr);
850 grcan_write_reg(&regs->txrd, txrd);
851 priv->eskbp = eskbp;
852
853 grcan_write_reg(&regs->rxaddr, rxaddr);
854 grcan_write_reg(&regs->rxsize, rxsize);
855 grcan_write_reg(&regs->rxwr, rxwr);
856 grcan_write_reg(&regs->rxrd, rxrd);
857
858 /* Turn on device again */
859 grcan_write_reg(&regs->imr, imr);
860 priv->can.state = CAN_STATE_ERROR_ACTIVE;
861 grcan_write_reg(&regs->txctrl, GRCAN_TXCTRL_ENABLE
862 | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
863 ? GRCAN_TXCTRL_SINGLE : 0));
864 grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
865 grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE);
866
867 /* Start queue if there is size and listen-onle mode is not
868 * enabled
869 */
870 if (grcan_txspace(priv->dma.tx.size, txwr, priv->eskbp) &&
871 !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
872 netif_wake_queue(dev);
873 }
874
875 spin_unlock_irqrestore(&priv->lock, flags);
876
877 netdev_err(dev, "Device reset and restored\n");
878}
879
880/* Waiting time in usecs corresponding to the transmission of three maximum
881 * sized can frames in the given bitrate (in bits/sec). Waiting for this amount
882 * of time makes sure that the can controller have time to finish sending or
883 * receiving a frame with a good margin.
884 *
885 * usecs/sec * number of frames * bits/frame / bits/sec
886 */
887static inline u32 grcan_ongoing_wait_usecs(__u32 bitrate)
888{
889 return 1000000 * 3 * GRCAN_EFF_FRAME_MAX_BITS / bitrate;
890}
891
892/* Set timer so that it will not fire until after a period in which the can
893 * controller have a good margin to finish transmitting a frame unless it has
894 * hanged
895 */
896static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate)
897{
898 u32 wait_jiffies = usecs_to_jiffies(grcan_ongoing_wait_usecs(bitrate));
899
900 mod_timer(timer, jiffies + wait_jiffies);
901}
902
903/* Disable channels and schedule a running reset */
904static void grcan_initiate_running_reset(unsigned long data)
905{
906 struct net_device *dev = (struct net_device *)data;
907 struct grcan_priv *priv = netdev_priv(dev);
908 struct grcan_registers __iomem *regs = priv->regs;
909 unsigned long flags;
910
911 netdev_err(dev, "Device seems hanged - reset scheduled\n");
912
913 spin_lock_irqsave(&priv->lock, flags);
914
915 /* The main body of this function must never be executed again
916 * until after an execution of grcan_running_reset
917 */
918 if (!priv->resetting && !priv->closing) {
919 priv->resetting = true;
920 netif_stop_queue(dev);
921 grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE);
922 grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
923 grcan_reset_timer(&priv->rr_timer, priv->can.bittiming.bitrate);
924 }
925
926 spin_unlock_irqrestore(&priv->lock, flags);
927}
928
929static void grcan_free_dma_buffers(struct net_device *dev)
930{
931 struct grcan_priv *priv = netdev_priv(dev);
932 struct grcan_dma *dma = &priv->dma;
933
934 dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
935 dma->base_handle);
936 memset(dma, 0, sizeof(*dma));
937}
938
939static int grcan_allocate_dma_buffers(struct net_device *dev,
940 size_t tsize, size_t rsize)
941{
942 struct grcan_priv *priv = netdev_priv(dev);
943 struct grcan_dma *dma = &priv->dma;
944 struct grcan_dma_buffer *large = rsize > tsize ? &dma->rx : &dma->tx;
945 struct grcan_dma_buffer *small = rsize > tsize ? &dma->tx : &dma->rx;
946 size_t shift;
947
948 /* Need a whole number of GRCAN_BUFFER_ALIGNMENT for the large,
949 * i.e. first buffer
950 */
951 size_t maxs = max(tsize, rsize);
952 size_t lsize = ALIGN(maxs, GRCAN_BUFFER_ALIGNMENT);
953
954 /* Put the small buffer after that */
955 size_t ssize = min(tsize, rsize);
956
957 /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
958 dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
959 dma->base_buf = dma_alloc_coherent(&dev->dev,
960 dma->base_size,
961 &dma->base_handle,
962 GFP_KERNEL);
963
964 if (!dma->base_buf)
965 return -ENOMEM;
966
967 dma->tx.size = tsize;
968 dma->rx.size = rsize;
969
970 large->handle = ALIGN(dma->base_handle, GRCAN_BUFFER_ALIGNMENT);
971 small->handle = large->handle + lsize;
972 shift = large->handle - dma->base_handle;
973
974 large->buf = dma->base_buf + shift;
975 small->buf = large->buf + lsize;
976
977 return 0;
978}
979
980/* priv->lock *must* be held when calling this function */
981static int grcan_start(struct net_device *dev)
982{
983 struct grcan_priv *priv = netdev_priv(dev);
984 struct grcan_registers __iomem *regs = priv->regs;
985 u32 confop, txctrl;
986
987 grcan_reset(dev);
988
989 grcan_write_reg(&regs->txaddr, priv->dma.tx.handle);
990 grcan_write_reg(&regs->txsize, priv->dma.tx.size);
991 /* regs->txwr, regs->txrd and priv->eskbp already set to 0 by reset */
992
993 grcan_write_reg(&regs->rxaddr, priv->dma.rx.handle);
994 grcan_write_reg(&regs->rxsize, priv->dma.rx.size);
995 /* regs->rxwr and regs->rxrd already set to 0 by reset */
996
997 /* Enable interrupts */
998 grcan_read_reg(&regs->pir);
999 grcan_write_reg(&regs->imr, GRCAN_IRQ_DEFAULT);
1000
1001 /* Enable interfaces, channels and device */
1002 confop = GRCAN_CONF_ABORT
1003 | (priv->config.enable0 ? GRCAN_CONF_ENABLE0 : 0)
1004 | (priv->config.enable1 ? GRCAN_CONF_ENABLE1 : 0)
1005 | (priv->config.select ? GRCAN_CONF_SELECT : 0)
1006 | (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY ?
1007 GRCAN_CONF_SILENT : 0)
1008 | (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
1009 GRCAN_CONF_SAM : 0);
1010 grcan_write_bits(&regs->conf, confop, GRCAN_CONF_OPERATION);
1011 txctrl = GRCAN_TXCTRL_ENABLE
1012 | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT
1013 ? GRCAN_TXCTRL_SINGLE : 0);
1014 grcan_write_reg(&regs->txctrl, txctrl);
1015 grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE);
1016 grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE);
1017
1018 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1019
1020 return 0;
1021}
1022
1023static int grcan_set_mode(struct net_device *dev, enum can_mode mode)
1024{
1025 struct grcan_priv *priv = netdev_priv(dev);
1026 unsigned long flags;
1027 int err = 0;
1028
1029 if (mode == CAN_MODE_START) {
1030 /* This might be called to restart the device to recover from
1031 * bus off errors
1032 */
1033 spin_lock_irqsave(&priv->lock, flags);
1034 if (priv->closing || priv->resetting) {
1035 err = -EBUSY;
1036 } else {
1037 netdev_info(dev, "Restarting device\n");
1038 grcan_start(dev);
1039 if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
1040 netif_wake_queue(dev);
1041 }
1042 spin_unlock_irqrestore(&priv->lock, flags);
1043 return err;
1044 }
1045 return -EOPNOTSUPP;
1046}
1047
1048static int grcan_open(struct net_device *dev)
1049{
1050 struct grcan_priv *priv = netdev_priv(dev);
1051 struct grcan_dma *dma = &priv->dma;
1052 unsigned long flags;
1053 int err;
1054
1055 /* Allocate memory */
1056 err = grcan_allocate_dma_buffers(dev, priv->config.txsize,
1057 priv->config.rxsize);
1058 if (err) {
1059 netdev_err(dev, "could not allocate DMA buffers\n");
1060 return err;
1061 }
1062
1063 priv->echo_skb = kzalloc(dma->tx.size * sizeof(*priv->echo_skb),
1064 GFP_KERNEL);
1065 if (!priv->echo_skb) {
1066 err = -ENOMEM;
1067 goto exit_free_dma_buffers;
1068 }
1069 priv->can.echo_skb_max = dma->tx.size;
1070 priv->can.echo_skb = priv->echo_skb;
1071
1072 priv->txdlc = kzalloc(dma->tx.size * sizeof(*priv->txdlc), GFP_KERNEL);
1073 if (!priv->txdlc) {
1074 err = -ENOMEM;
1075 goto exit_free_echo_skb;
1076 }
1077
1078 /* Get can device up */
1079 err = open_candev(dev);
1080 if (err)
1081 goto exit_free_txdlc;
1082
1083 err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
1084 dev->name, dev);
1085 if (err)
1086 goto exit_close_candev;
1087
1088 spin_lock_irqsave(&priv->lock, flags);
1089
1090 napi_enable(&priv->napi);
1091 grcan_start(dev);
1092 if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
1093 netif_start_queue(dev);
1094 priv->resetting = false;
1095 priv->closing = false;
1096
1097 spin_unlock_irqrestore(&priv->lock, flags);
1098
1099 return 0;
1100
1101exit_close_candev:
1102 close_candev(dev);
1103exit_free_txdlc:
1104 kfree(priv->txdlc);
1105exit_free_echo_skb:
1106 kfree(priv->echo_skb);
1107exit_free_dma_buffers:
1108 grcan_free_dma_buffers(dev);
1109 return err;
1110}
1111
1112static int grcan_close(struct net_device *dev)
1113{
1114 struct grcan_priv *priv = netdev_priv(dev);
1115 unsigned long flags;
1116
1117 napi_disable(&priv->napi);
1118
1119 spin_lock_irqsave(&priv->lock, flags);
1120
1121 priv->closing = true;
1122 if (priv->need_txbug_workaround) {
1123 del_timer_sync(&priv->hang_timer);
1124 del_timer_sync(&priv->rr_timer);
1125 }
1126 netif_stop_queue(dev);
1127 grcan_stop_hardware(dev);
1128 priv->can.state = CAN_STATE_STOPPED;
1129
1130 spin_unlock_irqrestore(&priv->lock, flags);
1131
1132 free_irq(dev->irq, dev);
1133 close_candev(dev);
1134
1135 grcan_free_dma_buffers(dev);
1136 priv->can.echo_skb_max = 0;
1137 priv->can.echo_skb = NULL;
1138 kfree(priv->echo_skb);
1139 kfree(priv->txdlc);
1140
1141 return 0;
1142}
1143
1144static int grcan_transmit_catch_up(struct net_device *dev, int budget)
1145{
1146 struct grcan_priv *priv = netdev_priv(dev);
1147 unsigned long flags;
1148 int work_done;
1149
1150 spin_lock_irqsave(&priv->lock, flags);
1151
1152 work_done = catch_up_echo_skb(dev, budget, true);
1153 if (work_done) {
1154 if (!priv->resetting && !priv->closing &&
1155 !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
1156 netif_wake_queue(dev);
1157
1158 /* With napi we don't get TX interrupts for a while,
1159 * so prevent a running reset while catching up
1160 */
1161 if (priv->need_txbug_workaround)
1162 del_timer(&priv->hang_timer);
1163 }
1164
1165 spin_unlock_irqrestore(&priv->lock, flags);
1166
1167 return work_done;
1168}
1169
1170static int grcan_receive(struct net_device *dev, int budget)
1171{
1172 struct grcan_priv *priv = netdev_priv(dev);
1173 struct grcan_registers __iomem *regs = priv->regs;
1174 struct grcan_dma *dma = &priv->dma;
1175 struct net_device_stats *stats = &dev->stats;
1176 struct can_frame *cf;
1177 struct sk_buff *skb;
1178 u32 wr, rd, startrd;
1179 u32 *slot;
1180 u32 i, rtr, eff, j, shift;
1181 int work_done = 0;
1182
1183 rd = grcan_read_reg(&regs->rxrd);
1184 startrd = rd;
1185 for (work_done = 0; work_done < budget; work_done++) {
1186 /* Check for packet to receive */
1187 wr = grcan_read_reg(&regs->rxwr);
1188 if (rd == wr)
1189 break;
1190
1191 /* Take care of packet */
1192 skb = alloc_can_skb(dev, &cf);
1193 if (skb == NULL) {
1194 netdev_err(dev,
1195 "dropping frame: skb allocation failed\n");
1196 stats->rx_dropped++;
1197 continue;
1198 }
1199
1200 slot = dma->rx.buf + rd;
1201 eff = slot[0] & GRCAN_MSG_IDE;
1202 rtr = slot[0] & GRCAN_MSG_RTR;
1203 if (eff) {
1204 cf->can_id = ((slot[0] & GRCAN_MSG_EID)
1205 >> GRCAN_MSG_EID_BIT);
1206 cf->can_id |= CAN_EFF_FLAG;
1207 } else {
1208 cf->can_id = ((slot[0] & GRCAN_MSG_BID)
1209 >> GRCAN_MSG_BID_BIT);
1210 }
1211 cf->can_dlc = get_can_dlc((slot[1] & GRCAN_MSG_DLC)
1212 >> GRCAN_MSG_DLC_BIT);
1213 if (rtr) {
1214 cf->can_id |= CAN_RTR_FLAG;
1215 } else {
1216 for (i = 0; i < cf->can_dlc; i++) {
1217 j = GRCAN_MSG_DATA_SLOT_INDEX(i);
1218 shift = GRCAN_MSG_DATA_SHIFT(i);
1219 cf->data[i] = (u8)(slot[j] >> shift);
1220 }
1221 }
1222 netif_receive_skb(skb);
1223
1224 /* Update statistics and read pointer */
1225 stats->rx_packets++;
1226 stats->rx_bytes += cf->can_dlc;
1227 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
1228 }
1229
1230 /* Make sure everything is read before allowing hardware to
1231 * use the memory
1232 */
1233 mb();
1234
1235 /* Update read pointer - no need to check for ongoing */
1236 if (likely(rd != startrd))
1237 grcan_write_reg(&regs->rxrd, rd);
1238
1239 return work_done;
1240}
1241
1242static int grcan_poll(struct napi_struct *napi, int budget)
1243{
1244 struct grcan_priv *priv = container_of(napi, struct grcan_priv, napi);
1245 struct net_device *dev = priv->dev;
1246 struct grcan_registers __iomem *regs = priv->regs;
1247 unsigned long flags;
1248 int tx_work_done, rx_work_done;
1249 int rx_budget = budget / 2;
1250 int tx_budget = budget - rx_budget;
1251
1252 /* Half of the budget for receiveing messages */
1253 rx_work_done = grcan_receive(dev, rx_budget);
1254
1255 /* Half of the budget for transmitting messages as that can trigger echo
1256 * frames being received
1257 */
1258 tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
1259
1260 if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
1261 napi_complete(napi);
1262
1263 /* Guarantee no interference with a running reset that otherwise
1264 * could turn off interrupts.
1265 */
1266 spin_lock_irqsave(&priv->lock, flags);
1267
1268 /* Enable tx and rx interrupts again. No need to check
1269 * priv->closing as napi_disable in grcan_close is waiting for
1270 * scheduled napi calls to finish.
1271 */
1272 grcan_set_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX);
1273
1274 spin_unlock_irqrestore(&priv->lock, flags);
1275 }
1276
1277 return rx_work_done + tx_work_done;
1278}
1279
1280/* Work tx bug by waiting while for the risky situation to clear. If that fails,
1281 * drop a frame in one-shot mode or indicate a busy device otherwise.
1282 *
1283 * Returns 0 on successful wait. Otherwise it sets *netdev_tx_status to the
1284 * value that should be returned by grcan_start_xmit when aborting the xmit.
1285 */
1286static int grcan_txbug_workaround(struct net_device *dev, struct sk_buff *skb,
1287 u32 txwr, u32 oneshotmode,
1288 netdev_tx_t *netdev_tx_status)
1289{
1290 struct grcan_priv *priv = netdev_priv(dev);
1291 struct grcan_registers __iomem *regs = priv->regs;
1292 struct grcan_dma *dma = &priv->dma;
1293 int i;
1294 unsigned long flags;
1295
1296 /* Wait a while for ongoing to be cleared or read pointer to catch up to
1297 * write pointer. The latter is needed due to a bug in older versions of
1298 * GRCAN in which ONGOING is not cleared properly one-shot mode when a
1299 * transmission fails.
1300 */
1301 for (i = 0; i < GRCAN_SHORTWAIT_USECS; i++) {
1302 udelay(1);
1303 if (!grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ONGOING) ||
1304 grcan_read_reg(&regs->txrd) == txwr) {
1305 return 0;
1306 }
1307 }
1308
1309 /* Clean up, in case the situation was not resolved */
1310 spin_lock_irqsave(&priv->lock, flags);
1311 if (!priv->resetting && !priv->closing) {
1312 /* Queue might have been stopped earlier in grcan_start_xmit */
1313 if (grcan_txspace(dma->tx.size, txwr, priv->eskbp))
1314 netif_wake_queue(dev);
1315 /* Set a timer to resolve a hanged tx controller */
1316 if (!timer_pending(&priv->hang_timer))
1317 grcan_reset_timer(&priv->hang_timer,
1318 priv->can.bittiming.bitrate);
1319 }
1320 spin_unlock_irqrestore(&priv->lock, flags);
1321
1322 if (oneshotmode) {
1323 /* In one-shot mode we should never end up here because
1324 * then the interrupt handler increases txrd on TXLOSS,
1325 * but it is consistent with one-shot mode to drop the
1326 * frame in this case.
1327 */
1328 kfree_skb(skb);
1329 *netdev_tx_status = NETDEV_TX_OK;
1330 } else {
1331 /* In normal mode the socket-can transmission queue get
1332 * to keep the frame so that it can be retransmitted
1333 * later
1334 */
1335 *netdev_tx_status = NETDEV_TX_BUSY;
1336 }
1337 return -EBUSY;
1338}
1339
1340/* Notes on the tx cyclic buffer handling:
1341 *
1342 * regs->txwr - the next slot for the driver to put data to be sent
1343 * regs->txrd - the next slot for the device to read data
1344 * priv->eskbp - the next slot for the driver to call can_put_echo_skb for
1345 *
1346 * grcan_start_xmit can enter more messages as long as regs->txwr does
1347 * not reach priv->eskbp (within 1 message gap)
1348 *
1349 * The device sends messages until regs->txrd reaches regs->txwr
1350 *
1351 * The interrupt calls handler calls can_put_echo_skb until
1352 * priv->eskbp reaches regs->txrd
1353 */
1354static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
1355 struct net_device *dev)
1356{
1357 struct grcan_priv *priv = netdev_priv(dev);
1358 struct grcan_registers __iomem *regs = priv->regs;
1359 struct grcan_dma *dma = &priv->dma;
1360 struct can_frame *cf = (struct can_frame *)skb->data;
1361 u32 id, txwr, txrd, space, txctrl;
1362 int slotindex;
1363 u32 *slot;
1364 u32 i, rtr, eff, dlc, tmp, err;
1365 int j, shift;
1366 unsigned long flags;
1367 u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
1368
1369 if (can_dropped_invalid_skb(dev, skb))
1370 return NETDEV_TX_OK;
1371
1372 /* Trying to transmit in silent mode will generate error interrupts, but
1373 * this should never happen - the queue should not have been started.
1374 */
1375 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
1376 return NETDEV_TX_BUSY;
1377
1378 /* Reads of priv->eskbp and shut-downs of the queue needs to
1379 * be atomic towards the updates to priv->eskbp and wake-ups
1380 * of the queue in the interrupt handler.
1381 */
1382 spin_lock_irqsave(&priv->lock, flags);
1383
1384 txwr = grcan_read_reg(&regs->txwr);
1385 space = grcan_txspace(dma->tx.size, txwr, priv->eskbp);
1386
1387 slotindex = txwr / GRCAN_MSG_SIZE;
1388 slot = dma->tx.buf + txwr;
1389
1390 if (unlikely(space == 1))
1391 netif_stop_queue(dev);
1392
1393 spin_unlock_irqrestore(&priv->lock, flags);
1394 /* End of critical section*/
1395
1396 /* This should never happen. If circular buffer is full, the
1397 * netif_stop_queue should have been stopped already.
1398 */
1399 if (unlikely(!space)) {
1400 netdev_err(dev, "No buffer space, but queue is non-stopped.\n");
1401 return NETDEV_TX_BUSY;
1402 }
1403
1404 /* Convert and write CAN message to DMA buffer */
1405 eff = cf->can_id & CAN_EFF_FLAG;
1406 rtr = cf->can_id & CAN_RTR_FLAG;
1407 id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK);
1408 dlc = cf->can_dlc;
1409 if (eff)
1410 tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID;
1411 else
1412 tmp = (id << GRCAN_MSG_BID_BIT) & GRCAN_MSG_BID;
1413 slot[0] = (eff ? GRCAN_MSG_IDE : 0) | (rtr ? GRCAN_MSG_RTR : 0) | tmp;
1414
1415 slot[1] = ((dlc << GRCAN_MSG_DLC_BIT) & GRCAN_MSG_DLC);
1416 slot[2] = 0;
1417 slot[3] = 0;
1418 for (i = 0; i < dlc; i++) {
1419 j = GRCAN_MSG_DATA_SLOT_INDEX(i);
1420 shift = GRCAN_MSG_DATA_SHIFT(i);
1421 slot[j] |= cf->data[i] << shift;
1422 }
1423
1424 /* Checking that channel has not been disabled. These cases
1425 * should never happen
1426 */
1427 txctrl = grcan_read_reg(&regs->txctrl);
1428 if (!(txctrl & GRCAN_TXCTRL_ENABLE))
1429 netdev_err(dev, "tx channel spuriously disabled\n");
1430
1431 if (oneshotmode && !(txctrl & GRCAN_TXCTRL_SINGLE))
1432 netdev_err(dev, "one-shot mode spuriously disabled\n");
1433
1434 /* Bug workaround for old version of grcan where updating txwr
1435 * in the same clock cycle as the controller updates txrd to
1436 * the current txwr could hang the can controller
1437 */
1438 if (priv->need_txbug_workaround) {
1439 txrd = grcan_read_reg(&regs->txrd);
1440 if (unlikely(grcan_ring_sub(txwr, txrd, dma->tx.size) == 1)) {
1441 netdev_tx_t txstatus;
1442
1443 err = grcan_txbug_workaround(dev, skb, txwr,
1444 oneshotmode, &txstatus);
1445 if (err)
1446 return txstatus;
1447 }
1448 }
1449
1450 /* Prepare skb for echoing. This must be after the bug workaround above
1451 * as ownership of the skb is passed on by calling can_put_echo_skb.
1452 * Returning NETDEV_TX_BUSY or accessing skb or cf after a call to
1453 * can_put_echo_skb would be an error unless other measures are
1454 * taken.
1455 */
1456 priv->txdlc[slotindex] = cf->can_dlc; /* Store dlc for statistics */
1457 can_put_echo_skb(skb, dev, slotindex);
1458
1459 /* Make sure everything is written before allowing hardware to
1460 * read from the memory
1461 */
1462 wmb();
1463
1464 /* Update write pointer to start transmission */
1465 grcan_write_reg(&regs->txwr,
1466 grcan_ring_add(txwr, GRCAN_MSG_SIZE, dma->tx.size));
1467
1468 return NETDEV_TX_OK;
1469}
1470
1471/* ========== Setting up sysfs interface and module parameters ========== */
1472
1473#define GRCAN_NOT_BOOL(unsigned_val) ((unsigned_val) > 1)
1474
1475#define GRCAN_MODULE_PARAM(name, mtype, valcheckf, desc) \
1476 static void grcan_sanitize_##name(struct platform_device *pd) \
1477 { \
1478 struct grcan_device_config grcan_default_config \
1479 = GRCAN_DEFAULT_DEVICE_CONFIG; \
1480 if (valcheckf(grcan_module_config.name)) { \
1481 dev_err(&pd->dev, \
1482 "Invalid module parameter value for " \
1483 #name " - setting default\n"); \
1484 grcan_module_config.name = \
1485 grcan_default_config.name; \
1486 } \
1487 } \
1488 module_param_named(name, grcan_module_config.name, \
1489 mtype, S_IRUGO); \
1490 MODULE_PARM_DESC(name, desc)
1491
1492#define GRCAN_CONFIG_ATTR(name, desc) \
1493 static ssize_t grcan_store_##name(struct device *sdev, \
1494 struct device_attribute *att, \
1495 const char *buf, \
1496 size_t count) \
1497 { \
1498 struct net_device *dev = to_net_dev(sdev); \
1499 struct grcan_priv *priv = netdev_priv(dev); \
1500 u8 val; \
1501 int ret; \
1502 if (dev->flags & IFF_UP) \
1503 return -EBUSY; \
1504 ret = kstrtou8(buf, 0, &val); \
1505 if (ret < 0 || val > 1) \
1506 return -EINVAL; \
1507 priv->config.name = val; \
1508 return count; \
1509 } \
1510 static ssize_t grcan_show_##name(struct device *sdev, \
1511 struct device_attribute *att, \
1512 char *buf) \
1513 { \
1514 struct net_device *dev = to_net_dev(sdev); \
1515 struct grcan_priv *priv = netdev_priv(dev); \
1516 return sprintf(buf, "%d\n", priv->config.name); \
1517 } \
1518 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
1519 grcan_show_##name, \
1520 grcan_store_##name); \
1521 GRCAN_MODULE_PARAM(name, ushort, GRCAN_NOT_BOOL, desc)
1522
1523/* The following configuration options are made available both via module
1524 * parameters and writable sysfs files. See the chapter about GRCAN in the
1525 * documentation for the GRLIB VHDL library for further details.
1526 */
1527GRCAN_CONFIG_ATTR(enable0,
1528 "Configuration of physical interface 0. Determines\n" \
1529 "the \"Enable 0\" bit of the configuration register.\n" \
1530 "Format: 0 | 1\nDefault: 0\n");
1531
1532GRCAN_CONFIG_ATTR(enable1,
1533 "Configuration of physical interface 1. Determines\n" \
1534 "the \"Enable 1\" bit of the configuration register.\n" \
1535 "Format: 0 | 1\nDefault: 0\n");
1536
1537GRCAN_CONFIG_ATTR(select,
1538 "Select which physical interface to use.\n" \
1539 "Format: 0 | 1\nDefault: 0\n");
1540
1541/* The tx and rx buffer size configuration options are only available via module
1542 * parameters.
1543 */
1544GRCAN_MODULE_PARAM(txsize, uint, GRCAN_INVALID_BUFFER_SIZE,
1545 "Sets the size of the tx buffer.\n" \
1546 "Format: <unsigned int> where (txsize & ~0x1fffc0) == 0\n" \
1547 "Default: 1024\n");
1548GRCAN_MODULE_PARAM(rxsize, uint, GRCAN_INVALID_BUFFER_SIZE,
1549 "Sets the size of the rx buffer.\n" \
1550 "Format: <unsigned int> where (size & ~0x1fffc0) == 0\n" \
1551 "Default: 1024\n");
1552
1553/* Function that makes sure that configuration done using
1554 * module parameters are set to valid values
1555 */
1556static void grcan_sanitize_module_config(struct platform_device *ofdev)
1557{
1558 grcan_sanitize_enable0(ofdev);
1559 grcan_sanitize_enable1(ofdev);
1560 grcan_sanitize_select(ofdev);
1561 grcan_sanitize_txsize(ofdev);
1562 grcan_sanitize_rxsize(ofdev);
1563}
1564
1565static const struct attribute *const sysfs_grcan_attrs[] = {
1566 /* Config attrs */
1567 &dev_attr_enable0.attr,
1568 &dev_attr_enable1.attr,
1569 &dev_attr_select.attr,
1570 NULL,
1571};
1572
1573static const struct attribute_group sysfs_grcan_group = {
1574 .name = "grcan",
1575 .attrs = (struct attribute **)sysfs_grcan_attrs,
1576};
1577
1578/* ========== Setting up the driver ========== */
1579
1580static const struct net_device_ops grcan_netdev_ops = {
1581 .ndo_open = grcan_open,
1582 .ndo_stop = grcan_close,
1583 .ndo_start_xmit = grcan_start_xmit,
1584};
1585
1586static int grcan_setup_netdev(struct platform_device *ofdev,
1587 void __iomem *base,
1588 int irq, u32 ambafreq, bool txbug)
1589{
1590 struct net_device *dev;
1591 struct grcan_priv *priv;
1592 struct grcan_registers __iomem *regs;
1593 int err;
1594
1595 dev = alloc_candev(sizeof(struct grcan_priv), 0);
1596 if (!dev)
1597 return -ENOMEM;
1598
1599 dev->irq = irq;
1600 dev->flags |= IFF_ECHO;
1601 dev->netdev_ops = &grcan_netdev_ops;
1602 dev->sysfs_groups[0] = &sysfs_grcan_group;
1603
1604 priv = netdev_priv(dev);
1605 memcpy(&priv->config, &grcan_module_config,
1606 sizeof(struct grcan_device_config));
1607 priv->dev = dev;
1608 priv->regs = base;
1609 priv->can.bittiming_const = &grcan_bittiming_const;
1610 priv->can.do_set_bittiming = grcan_set_bittiming;
1611 priv->can.do_set_mode = grcan_set_mode;
1612 priv->can.do_get_berr_counter = grcan_get_berr_counter;
1613 priv->can.clock.freq = ambafreq;
1614 priv->can.ctrlmode_supported =
1615 CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT;
1616 priv->need_txbug_workaround = txbug;
1617
1618 /* Discover if triple sampling is supported by hardware */
1619 regs = priv->regs;
1620 grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET);
1621 grcan_set_bits(&regs->conf, GRCAN_CONF_SAM);
1622 if (grcan_read_bits(&regs->conf, GRCAN_CONF_SAM)) {
1623 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
1624 dev_dbg(&ofdev->dev, "Hardware supports triple-sampling\n");
1625 }
1626
1627 spin_lock_init(&priv->lock);
1628
1629 if (priv->need_txbug_workaround) {
1630 init_timer(&priv->rr_timer);
1631 priv->rr_timer.function = grcan_running_reset;
1632 priv->rr_timer.data = (unsigned long)dev;
1633
1634 init_timer(&priv->hang_timer);
1635 priv->hang_timer.function = grcan_initiate_running_reset;
1636 priv->hang_timer.data = (unsigned long)dev;
1637 }
1638
1639 netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
1640
1641 SET_NETDEV_DEV(dev, &ofdev->dev);
1642 dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
1643 priv->regs, dev->irq, priv->can.clock.freq);
1644
1645 err = register_candev(dev);
1646 if (err)
1647 goto exit_free_candev;
1648
1649 dev_set_drvdata(&ofdev->dev, dev);
1650
1651 /* Reset device to allow bit-timing to be set. No need to call
1652 * grcan_reset at this stage. That is done in grcan_open.
1653 */
1654 grcan_write_reg(&regs->ctrl, GRCAN_CTRL_RESET);
1655
1656 return 0;
1657exit_free_candev:
1658 free_candev(dev);
1659 return err;
1660}
1661
1662static int grcan_probe(struct platform_device *ofdev)
1663{
1664 struct device_node *np = ofdev->dev.of_node;
1665 struct resource *res;
1666 u32 sysid, ambafreq;
1667 int irq, err;
1668 void __iomem *base;
1669 bool txbug = true;
1670
1671 /* Compare GRLIB version number with the first that does not
1672 * have the tx bug (see start_xmit)
1673 */
1674 err = of_property_read_u32(np, "systemid", &sysid);
1675 if (!err && ((sysid & GRLIB_VERSION_MASK)
1676 >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
1677 txbug = false;
1678
1679 err = of_property_read_u32(np, "freq", &ambafreq);
1680 if (err) {
1681 dev_err(&ofdev->dev, "unable to fetch \"freq\" property\n");
1682 goto exit_error;
1683 }
1684
1685 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
1686 base = devm_request_and_ioremap(&ofdev->dev, res);
1687 if (!base) {
1688 dev_err(&ofdev->dev, "couldn't map IO resource\n");
1689 err = -EADDRNOTAVAIL;
1690 goto exit_error;
1691 }
1692
1693 irq = irq_of_parse_and_map(np, GRCAN_IRQIX_IRQ);
1694 if (!irq) {
1695 dev_err(&ofdev->dev, "no irq found\n");
1696 err = -ENODEV;
1697 goto exit_error;
1698 }
1699
1700 grcan_sanitize_module_config(ofdev);
1701
1702 err = grcan_setup_netdev(ofdev, base, irq, ambafreq, txbug);
1703 if (err)
1704 goto exit_dispose_irq;
1705
1706 return 0;
1707
1708exit_dispose_irq:
1709 irq_dispose_mapping(irq);
1710exit_error:
1711 dev_err(&ofdev->dev,
1712 "%s socket CAN driver initialization failed with error %d\n",
1713 DRV_NAME, err);
1714 return err;
1715}
1716
1717static int grcan_remove(struct platform_device *ofdev)
1718{
1719 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
1720 struct grcan_priv *priv = netdev_priv(dev);
1721
1722 unregister_candev(dev); /* Will in turn call grcan_close */
1723
1724 irq_dispose_mapping(dev->irq);
1725 dev_set_drvdata(&ofdev->dev, NULL);
1726 netif_napi_del(&priv->napi);
1727 free_candev(dev);
1728
1729 return 0;
1730}
1731
1732static struct of_device_id grcan_match[] = {
1733 {.name = "GAISLER_GRCAN"},
1734 {.name = "01_03d"},
1735 {.name = "GAISLER_GRHCAN"},
1736 {.name = "01_034"},
1737 {},
1738};
1739
1740MODULE_DEVICE_TABLE(of, grcan_match);
1741
1742static struct platform_driver grcan_driver = {
1743 .driver = {
1744 .name = DRV_NAME,
1745 .owner = THIS_MODULE,
1746 .of_match_table = grcan_match,
1747 },
1748 .probe = grcan_probe,
1749 .remove = grcan_remove,
1750};
1751
1752module_platform_driver(grcan_driver);
1753
1754MODULE_AUTHOR("Aeroflex Gaisler AB.");
1755MODULE_DESCRIPTION("Socket CAN driver for Aeroflex Gaisler GRCAN");
1756MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 7edadee487ba..c4bc1d2e2033 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -365,7 +365,7 @@ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg)
365 * ICAN3 "new-style" Host Interface Setup 365 * ICAN3 "new-style" Host Interface Setup
366 */ 366 */
367 367
368static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod) 368static void ican3_init_new_host_interface(struct ican3_dev *mod)
369{ 369{
370 struct ican3_new_desc desc; 370 struct ican3_new_desc desc;
371 unsigned long flags; 371 unsigned long flags;
@@ -444,7 +444,7 @@ static void __devinit ican3_init_new_host_interface(struct ican3_dev *mod)
444 * ICAN3 Fast Host Interface Setup 444 * ICAN3 Fast Host Interface Setup
445 */ 445 */
446 446
447static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod) 447static void ican3_init_fast_host_interface(struct ican3_dev *mod)
448{ 448{
449 struct ican3_fast_desc desc; 449 struct ican3_fast_desc desc;
450 unsigned long flags; 450 unsigned long flags;
@@ -631,7 +631,7 @@ static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg)
631 * Quick Pre-constructed Messages 631 * Quick Pre-constructed Messages
632 */ 632 */
633 633
634static int __devinit ican3_msg_connect(struct ican3_dev *mod) 634static int ican3_msg_connect(struct ican3_dev *mod)
635{ 635{
636 struct ican3_msg msg; 636 struct ican3_msg msg;
637 637
@@ -642,7 +642,7 @@ static int __devinit ican3_msg_connect(struct ican3_dev *mod)
642 return ican3_send_msg(mod, &msg); 642 return ican3_send_msg(mod, &msg);
643} 643}
644 644
645static int __devexit ican3_msg_disconnect(struct ican3_dev *mod) 645static int ican3_msg_disconnect(struct ican3_dev *mod)
646{ 646{
647 struct ican3_msg msg; 647 struct ican3_msg msg;
648 648
@@ -653,7 +653,7 @@ static int __devexit ican3_msg_disconnect(struct ican3_dev *mod)
653 return ican3_send_msg(mod, &msg); 653 return ican3_send_msg(mod, &msg);
654} 654}
655 655
656static int __devinit ican3_msg_newhostif(struct ican3_dev *mod) 656static int ican3_msg_newhostif(struct ican3_dev *mod)
657{ 657{
658 struct ican3_msg msg; 658 struct ican3_msg msg;
659 int ret; 659 int ret;
@@ -674,7 +674,7 @@ static int __devinit ican3_msg_newhostif(struct ican3_dev *mod)
674 return 0; 674 return 0;
675} 675}
676 676
677static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod) 677static int ican3_msg_fasthostif(struct ican3_dev *mod)
678{ 678{
679 struct ican3_msg msg; 679 struct ican3_msg msg;
680 unsigned int addr; 680 unsigned int addr;
@@ -707,7 +707,7 @@ static int __devinit ican3_msg_fasthostif(struct ican3_dev *mod)
707 * Setup the CAN filter to either accept or reject all 707 * Setup the CAN filter to either accept or reject all
708 * messages from the CAN bus. 708 * messages from the CAN bus.
709 */ 709 */
710static int __devinit ican3_set_id_filter(struct ican3_dev *mod, bool accept) 710static int ican3_set_id_filter(struct ican3_dev *mod, bool accept)
711{ 711{
712 struct ican3_msg msg; 712 struct ican3_msg msg;
713 int ret; 713 int ret;
@@ -1421,7 +1421,7 @@ static int ican3_reset_module(struct ican3_dev *mod)
1421 return -ETIMEDOUT; 1421 return -ETIMEDOUT;
1422} 1422}
1423 1423
1424static void __devexit ican3_shutdown_module(struct ican3_dev *mod) 1424static void ican3_shutdown_module(struct ican3_dev *mod)
1425{ 1425{
1426 ican3_msg_disconnect(mod); 1426 ican3_msg_disconnect(mod);
1427 ican3_reset_module(mod); 1427 ican3_reset_module(mod);
@@ -1430,7 +1430,7 @@ static void __devexit ican3_shutdown_module(struct ican3_dev *mod)
1430/* 1430/*
1431 * Startup an ICAN module, bringing it into fast mode 1431 * Startup an ICAN module, bringing it into fast mode
1432 */ 1432 */
1433static int __devinit ican3_startup_module(struct ican3_dev *mod) 1433static int ican3_startup_module(struct ican3_dev *mod)
1434{ 1434{
1435 int ret; 1435 int ret;
1436 1436
@@ -1692,7 +1692,7 @@ static int ican3_get_berr_counter(const struct net_device *ndev,
1692 return ret; 1692 return ret;
1693 1693
1694 ret = wait_for_completion_timeout(&mod->buserror_comp, HZ); 1694 ret = wait_for_completion_timeout(&mod->buserror_comp, HZ);
1695 if (ret <= 0) { 1695 if (ret == 0) {
1696 dev_info(mod->dev, "%s timed out\n", __func__); 1696 dev_info(mod->dev, "%s timed out\n", __func__);
1697 return -ETIMEDOUT; 1697 return -ETIMEDOUT;
1698 } 1698 }
@@ -1718,7 +1718,7 @@ static ssize_t ican3_sysfs_show_term(struct device *dev,
1718 return ret; 1718 return ret;
1719 1719
1720 ret = wait_for_completion_timeout(&mod->termination_comp, HZ); 1720 ret = wait_for_completion_timeout(&mod->termination_comp, HZ);
1721 if (ret <= 0) { 1721 if (ret == 0) {
1722 dev_info(mod->dev, "%s timed out\n", __func__); 1722 dev_info(mod->dev, "%s timed out\n", __func__);
1723 return -ETIMEDOUT; 1723 return -ETIMEDOUT;
1724 } 1724 }
@@ -1760,7 +1760,7 @@ static struct attribute_group ican3_sysfs_attr_group = {
1760 * PCI Subsystem 1760 * PCI Subsystem
1761 */ 1761 */
1762 1762
1763static int __devinit ican3_probe(struct platform_device *pdev) 1763static int ican3_probe(struct platform_device *pdev)
1764{ 1764{
1765 struct janz_platform_data *pdata; 1765 struct janz_platform_data *pdata;
1766 struct net_device *ndev; 1766 struct net_device *ndev;
@@ -1898,7 +1898,7 @@ out_return:
1898 return ret; 1898 return ret;
1899} 1899}
1900 1900
1901static int __devexit ican3_remove(struct platform_device *pdev) 1901static int ican3_remove(struct platform_device *pdev)
1902{ 1902{
1903 struct net_device *ndev = platform_get_drvdata(pdev); 1903 struct net_device *ndev = platform_get_drvdata(pdev);
1904 struct ican3_dev *mod = netdev_priv(ndev); 1904 struct ican3_dev *mod = netdev_priv(ndev);
@@ -1927,7 +1927,7 @@ static struct platform_driver ican3_driver = {
1927 .owner = THIS_MODULE, 1927 .owner = THIS_MODULE,
1928 }, 1928 },
1929 .probe = ican3_probe, 1929 .probe = ican3_probe,
1930 .remove = __devexit_p(ican3_remove), 1930 .remove = ican3_remove,
1931}; 1931};
1932 1932
1933module_platform_driver(ican3_driver); 1933module_platform_driver(ican3_driver);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 26e7129332ab..5eaf47b8e37b 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -981,7 +981,7 @@ static const struct net_device_ops mcp251x_netdev_ops = {
981 .ndo_start_xmit = mcp251x_hard_start_xmit, 981 .ndo_start_xmit = mcp251x_hard_start_xmit,
982}; 982};
983 983
984static int __devinit mcp251x_can_probe(struct spi_device *spi) 984static int mcp251x_can_probe(struct spi_device *spi)
985{ 985{
986 struct net_device *net; 986 struct net_device *net;
987 struct mcp251x_priv *priv; 987 struct mcp251x_priv *priv;
@@ -1100,7 +1100,7 @@ error_out:
1100 return ret; 1100 return ret;
1101} 1101}
1102 1102
1103static int __devexit mcp251x_can_remove(struct spi_device *spi) 1103static int mcp251x_can_remove(struct spi_device *spi)
1104{ 1104{
1105 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 1105 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1106 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 1106 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
@@ -1198,7 +1198,7 @@ static struct spi_driver mcp251x_can_driver = {
1198 1198
1199 .id_table = mcp251x_id_table, 1199 .id_table = mcp251x_id_table,
1200 .probe = mcp251x_can_probe, 1200 .probe = mcp251x_can_probe,
1201 .remove = __devexit_p(mcp251x_can_remove), 1201 .remove = mcp251x_can_remove,
1202 .suspend = mcp251x_can_suspend, 1202 .suspend = mcp251x_can_suspend,
1203 .resume = mcp251x_can_resume, 1203 .resume = mcp251x_can_resume,
1204}; 1204};
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 799c354083c4..668850e441dc 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -43,14 +43,13 @@ struct mpc5xxx_can_data {
43}; 43};
44 44
45#ifdef CONFIG_PPC_MPC52xx 45#ifdef CONFIG_PPC_MPC52xx
46static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = { 46static struct of_device_id mpc52xx_cdm_ids[] = {
47 { .compatible = "fsl,mpc5200-cdm", }, 47 { .compatible = "fsl,mpc5200-cdm", },
48 {} 48 {}
49}; 49};
50 50
51static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev, 51static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
52 const char *clock_name, 52 const char *clock_name, int *mscan_clksrc)
53 int *mscan_clksrc)
54{ 53{
55 unsigned int pvr; 54 unsigned int pvr;
56 struct mpc52xx_cdm __iomem *cdm; 55 struct mpc52xx_cdm __iomem *cdm;
@@ -101,9 +100,8 @@ static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev,
101 return freq; 100 return freq;
102} 101}
103#else /* !CONFIG_PPC_MPC52xx */ 102#else /* !CONFIG_PPC_MPC52xx */
104static u32 __devinit mpc52xx_can_get_clock(struct platform_device *ofdev, 103static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
105 const char *clock_name, 104 const char *clock_name, int *mscan_clksrc)
106 int *mscan_clksrc)
107{ 105{
108 return 0; 106 return 0;
109} 107}
@@ -124,14 +122,13 @@ struct mpc512x_clockctl {
124 u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */ 122 u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
125}; 123};
126 124
127static struct of_device_id __devinitdata mpc512x_clock_ids[] = { 125static struct of_device_id mpc512x_clock_ids[] = {
128 { .compatible = "fsl,mpc5121-clock", }, 126 { .compatible = "fsl,mpc5121-clock", },
129 {} 127 {}
130}; 128};
131 129
132static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, 130static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
133 const char *clock_name, 131 const char *clock_name, int *mscan_clksrc)
134 int *mscan_clksrc)
135{ 132{
136 struct mpc512x_clockctl __iomem *clockctl; 133 struct mpc512x_clockctl __iomem *clockctl;
137 struct device_node *np_clock; 134 struct device_node *np_clock;
@@ -239,16 +236,15 @@ exit_put:
239 return freq; 236 return freq;
240} 237}
241#else /* !CONFIG_PPC_MPC512x */ 238#else /* !CONFIG_PPC_MPC512x */
242static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, 239static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
243 const char *clock_name, 240 const char *clock_name, int *mscan_clksrc)
244 int *mscan_clksrc)
245{ 241{
246 return 0; 242 return 0;
247} 243}
248#endif /* CONFIG_PPC_MPC512x */ 244#endif /* CONFIG_PPC_MPC512x */
249 245
250static const struct of_device_id mpc5xxx_can_table[]; 246static const struct of_device_id mpc5xxx_can_table[];
251static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) 247static int mpc5xxx_can_probe(struct platform_device *ofdev)
252{ 248{
253 const struct of_device_id *match; 249 const struct of_device_id *match;
254 const struct mpc5xxx_can_data *data; 250 const struct mpc5xxx_can_data *data;
@@ -323,7 +319,7 @@ exit_unmap_mem:
323 return err; 319 return err;
324} 320}
325 321
326static int __devexit mpc5xxx_can_remove(struct platform_device *ofdev) 322static int mpc5xxx_can_remove(struct platform_device *ofdev)
327{ 323{
328 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 324 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
329 struct mscan_priv *priv = netdev_priv(dev); 325 struct mscan_priv *priv = netdev_priv(dev);
@@ -380,22 +376,23 @@ static int mpc5xxx_can_resume(struct platform_device *ofdev)
380} 376}
381#endif 377#endif
382 378
383static const struct mpc5xxx_can_data __devinitconst mpc5200_can_data = { 379static const struct mpc5xxx_can_data mpc5200_can_data = {
384 .type = MSCAN_TYPE_MPC5200, 380 .type = MSCAN_TYPE_MPC5200,
385 .get_clock = mpc52xx_can_get_clock, 381 .get_clock = mpc52xx_can_get_clock,
386}; 382};
387 383
388static const struct mpc5xxx_can_data __devinitconst mpc5121_can_data = { 384static const struct mpc5xxx_can_data mpc5121_can_data = {
389 .type = MSCAN_TYPE_MPC5121, 385 .type = MSCAN_TYPE_MPC5121,
390 .get_clock = mpc512x_can_get_clock, 386 .get_clock = mpc512x_can_get_clock,
391}; 387};
392 388
393static const struct of_device_id __devinitconst mpc5xxx_can_table[] = { 389static const struct of_device_id mpc5xxx_can_table[] = {
394 { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, }, 390 { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
395 /* Note that only MPC5121 Rev. 2 (and later) is supported */ 391 /* Note that only MPC5121 Rev. 2 (and later) is supported */
396 { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, }, 392 { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
397 {}, 393 {},
398}; 394};
395MODULE_DEVICE_TABLE(of, mpc5xxx_can_table);
399 396
400static struct platform_driver mpc5xxx_can_driver = { 397static struct platform_driver mpc5xxx_can_driver = {
401 .driver = { 398 .driver = {
@@ -404,7 +401,7 @@ static struct platform_driver mpc5xxx_can_driver = {
404 .of_match_table = mpc5xxx_can_table, 401 .of_match_table = mpc5xxx_can_table,
405 }, 402 },
406 .probe = mpc5xxx_can_probe, 403 .probe = mpc5xxx_can_probe,
407 .remove = __devexit_p(mpc5xxx_can_remove), 404 .remove = mpc5xxx_can_remove,
408#ifdef CONFIG_PM 405#ifdef CONFIG_PM
409 .suspend = mpc5xxx_can_suspend, 406 .suspend = mpc5xxx_can_suspend,
410 .resume = mpc5xxx_can_resume, 407 .resume = mpc5xxx_can_resume,
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 2b104d5f422c..e6b40954e204 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -517,12 +517,8 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
517 517
518static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode) 518static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
519{ 519{
520 struct mscan_priv *priv = netdev_priv(dev);
521 int ret = 0; 520 int ret = 0;
522 521
523 if (!priv->open_time)
524 return -EINVAL;
525
526 switch (mode) { 522 switch (mode) {
527 case CAN_MODE_START: 523 case CAN_MODE_START:
528 ret = mscan_restart(dev); 524 ret = mscan_restart(dev);
@@ -590,8 +586,6 @@ static int mscan_open(struct net_device *dev)
590 goto exit_napi_disable; 586 goto exit_napi_disable;
591 } 587 }
592 588
593 priv->open_time = jiffies;
594
595 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 589 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
596 setbits8(&regs->canctl1, MSCAN_LISTEN); 590 setbits8(&regs->canctl1, MSCAN_LISTEN);
597 else 591 else
@@ -606,7 +600,6 @@ static int mscan_open(struct net_device *dev)
606 return 0; 600 return 0;
607 601
608exit_free_irq: 602exit_free_irq:
609 priv->open_time = 0;
610 free_irq(dev->irq, dev); 603 free_irq(dev->irq, dev);
611exit_napi_disable: 604exit_napi_disable:
612 napi_disable(&priv->napi); 605 napi_disable(&priv->napi);
@@ -627,7 +620,6 @@ static int mscan_close(struct net_device *dev)
627 mscan_set_mode(dev, MSCAN_INIT_MODE); 620 mscan_set_mode(dev, MSCAN_INIT_MODE);
628 close_candev(dev); 621 close_candev(dev);
629 free_irq(dev->irq, dev); 622 free_irq(dev->irq, dev);
630 priv->open_time = 0;
631 623
632 return 0; 624 return 0;
633} 625}
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index b43e9f5d3268..af2ed8baf0a3 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -281,7 +281,6 @@ struct tx_queue_entry {
281struct mscan_priv { 281struct mscan_priv {
282 struct can_priv can; /* must be the first member */ 282 struct can_priv can; /* must be the first member */
283 unsigned int type; /* MSCAN type variants */ 283 unsigned int type; /* MSCAN type variants */
284 long open_time;
285 unsigned long flags; 284 unsigned long flags;
286 void __iomem *reg_base; /* ioremap'ed address to registers */ 285 void __iomem *reg_base; /* ioremap'ed address to registers */
287 u8 shadow_statflg; 286 u8 shadow_statflg;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 48b3d62b34cb..7d1748575b1f 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -954,7 +954,7 @@ static const struct net_device_ops pch_can_netdev_ops = {
954 .ndo_start_xmit = pch_xmit, 954 .ndo_start_xmit = pch_xmit,
955}; 955};
956 956
957static void __devexit pch_can_remove(struct pci_dev *pdev) 957static void pch_can_remove(struct pci_dev *pdev)
958{ 958{
959 struct net_device *ndev = pci_get_drvdata(pdev); 959 struct net_device *ndev = pci_get_drvdata(pdev);
960 struct pch_can_priv *priv = netdev_priv(ndev); 960 struct pch_can_priv *priv = netdev_priv(ndev);
@@ -1178,7 +1178,7 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
1178 return 0; 1178 return 0;
1179} 1179}
1180 1180
1181static int __devinit pch_can_probe(struct pci_dev *pdev, 1181static int pch_can_probe(struct pci_dev *pdev,
1182 const struct pci_device_id *id) 1182 const struct pci_device_id *id)
1183{ 1183{
1184 struct net_device *ndev; 1184 struct net_device *ndev;
@@ -1269,7 +1269,7 @@ static struct pci_driver pch_can_pci_driver = {
1269 .name = "pch_can", 1269 .name = "pch_can",
1270 .id_table = pch_pci_tbl, 1270 .id_table = pch_pci_tbl,
1271 .probe = pch_can_probe, 1271 .probe = pch_can_probe,
1272 .remove = __devexit_p(pch_can_remove), 1272 .remove = pch_can_remove,
1273 .suspend = pch_can_suspend, 1273 .suspend = pch_can_suspend,
1274 .resume = pch_can_resume, 1274 .resume = pch_can_resume,
1275}; 1275};
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 03df9a8f2bbf..92f73c708a3d 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -21,7 +21,7 @@ config CAN_SJA1000_PLATFORM
21 21
22config CAN_SJA1000_OF_PLATFORM 22config CAN_SJA1000_OF_PLATFORM
23 tristate "Generic OF Platform Bus based SJA1000 driver" 23 tristate "Generic OF Platform Bus based SJA1000 driver"
24 depends on PPC_OF 24 depends on OF
25 ---help--- 25 ---help---
26 This driver adds support for the SJA1000 chips connected to 26 This driver adds support for the SJA1000 chips connected to
27 the OpenFirmware "platform bus" found on embedded systems with 27 the OpenFirmware "platform bus" found on embedded systems with
@@ -93,6 +93,7 @@ config CAN_PLX_PCI
93 - Marathon CAN-bus-PCI card (http://www.marathon.ru/) 93 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
94 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/) 94 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
95 - IXXAT Automation PC-I 04/PCI card (http://www.ixxat.com/) 95 - IXXAT Automation PC-I 04/PCI card (http://www.ixxat.com/)
96 - Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com)
96 97
97config CAN_TSCAN1 98config CAN_TSCAN1
98 tristate "TS-CAN1 PC104 boards" 99 tristate "TS-CAN1 PC104 boards"
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 5c6d412bafb5..036a326836b2 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -220,8 +220,8 @@ static void ems_pci_card_reset(struct ems_pci_card *card)
220 * Probe PCI device for EMS CAN signature and register each available 220 * Probe PCI device for EMS CAN signature and register each available
221 * CAN channel to SJA1000 Socket-CAN subsystem. 221 * CAN channel to SJA1000 Socket-CAN subsystem.
222 */ 222 */
223static int __devinit ems_pci_add_card(struct pci_dev *pdev, 223static int ems_pci_add_card(struct pci_dev *pdev,
224 const struct pci_device_id *ent) 224 const struct pci_device_id *ent)
225{ 225{
226 struct sja1000_priv *priv; 226 struct sja1000_priv *priv;
227 struct net_device *dev; 227 struct net_device *dev;
diff --git a/drivers/net/can/sja1000/ems_pcmcia.c b/drivers/net/can/sja1000/ems_pcmcia.c
index 075a5457a190..5c2f3fbbf5ae 100644
--- a/drivers/net/can/sja1000/ems_pcmcia.c
+++ b/drivers/net/can/sja1000/ems_pcmcia.c
@@ -166,8 +166,7 @@ static void ems_pcmcia_del_card(struct pcmcia_device *pdev)
166 * Probe PCI device for EMS CAN signature and register each available 166 * Probe PCI device for EMS CAN signature and register each available
167 * CAN channel to SJA1000 Socket-CAN subsystem. 167 * CAN channel to SJA1000 Socket-CAN subsystem.
168 */ 168 */
169static int __devinit ems_pcmcia_add_card(struct pcmcia_device *pdev, 169static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
170 unsigned long base)
171{ 170{
172 struct sja1000_priv *priv; 171 struct sja1000_priv *priv;
173 struct net_device *dev; 172 struct net_device *dev;
@@ -256,7 +255,7 @@ failure_cleanup:
256/* 255/*
257 * Setup PCMCIA socket and probe for EMS CPC-CARD 256 * Setup PCMCIA socket and probe for EMS CPC-CARD
258 */ 257 */
259static int __devinit ems_pcmcia_probe(struct pcmcia_device *dev) 258static int ems_pcmcia_probe(struct pcmcia_device *dev)
260{ 259{
261 int csval; 260 int csval;
262 261
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 23ed6ea4c7c3..37b0381f532e 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -290,8 +290,8 @@ failure:
290 return err; 290 return err;
291} 291}
292 292
293static int __devinit kvaser_pci_init_one(struct pci_dev *pdev, 293static int kvaser_pci_init_one(struct pci_dev *pdev,
294 const struct pci_device_id *ent) 294 const struct pci_device_id *ent)
295{ 295{
296 int err; 296 int err;
297 struct net_device *master_dev = NULL; 297 struct net_device *master_dev = NULL;
@@ -379,7 +379,7 @@ failure:
379 379
380} 380}
381 381
382static void __devexit kvaser_pci_remove_one(struct pci_dev *pdev) 382static void kvaser_pci_remove_one(struct pci_dev *pdev)
383{ 383{
384 struct net_device *dev = pci_get_drvdata(pdev); 384 struct net_device *dev = pci_get_drvdata(pdev);
385 385
@@ -394,7 +394,7 @@ static struct pci_driver kvaser_pci_driver = {
394 .name = DRV_NAME, 394 .name = DRV_NAME,
395 .id_table = kvaser_pci_tbl, 395 .id_table = kvaser_pci_tbl,
396 .probe = kvaser_pci_init_one, 396 .probe = kvaser_pci_init_one,
397 .remove = __devexit_p(kvaser_pci_remove_one), 397 .remove = kvaser_pci_remove_one,
398}; 398};
399 399
400module_pci_driver(kvaser_pci_driver); 400module_pci_driver(kvaser_pci_driver);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6525dbcca4e3..d84888f03d92 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -551,8 +551,7 @@ static void peak_pci_post_irq(const struct sja1000_priv *priv)
551 writew(chan->icr_mask, chan->cfg_base + PITA_ICR); 551 writew(chan->icr_mask, chan->cfg_base + PITA_ICR);
552} 552}
553 553
554static int __devinit peak_pci_probe(struct pci_dev *pdev, 554static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
555 const struct pci_device_id *ent)
556{ 555{
557 struct sja1000_priv *priv; 556 struct sja1000_priv *priv;
558 struct peak_pci_chan *chan; 557 struct peak_pci_chan *chan;
@@ -717,7 +716,7 @@ failure_disable_pci:
717 return err; 716 return err;
718} 717}
719 718
720static void __devexit peak_pci_remove(struct pci_dev *pdev) 719static void peak_pci_remove(struct pci_dev *pdev)
721{ 720{
722 struct net_device *dev = pci_get_drvdata(pdev); /* Last device */ 721 struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
723 struct sja1000_priv *priv = netdev_priv(dev); 722 struct sja1000_priv *priv = netdev_priv(dev);
@@ -757,7 +756,7 @@ static struct pci_driver peak_pci_driver = {
757 .name = DRV_NAME, 756 .name = DRV_NAME,
758 .id_table = peak_pci_tbl, 757 .id_table = peak_pci_tbl,
759 .probe = peak_pci_probe, 758 .probe = peak_pci_probe,
760 .remove = __devexit_p(peak_pci_remove), 759 .remove = peak_pci_remove,
761}; 760};
762 761
763module_pci_driver(peak_pci_driver); 762module_pci_driver(peak_pci_driver);
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 272a85f32b14..f1175142b0a0 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -632,7 +632,7 @@ static void pcan_free(struct pcmcia_device *pdev)
632/* 632/*
633 * setup PCMCIA socket and probe for PEAK-System PC-CARD 633 * setup PCMCIA socket and probe for PEAK-System PC-CARD
634 */ 634 */
635static int __devinit pcan_probe(struct pcmcia_device *pdev) 635static int pcan_probe(struct pcmcia_device *pdev)
636{ 636{
637 struct pcan_pccard *card; 637 struct pcan_pccard *card;
638 int err; 638 int err;
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 8bc95982840f..11d1062a9449 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -44,6 +44,7 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
44 "esd CAN-PCI/CPCI/PCI104/200, " 44 "esd CAN-PCI/CPCI/PCI104/200, "
45 "esd CAN-PCI/PMC/266, " 45 "esd CAN-PCI/PMC/266, "
46 "esd CAN-PCIe/2000, " 46 "esd CAN-PCIe/2000, "
47 "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
47 "IXXAT PC-I 04/PCI") 48 "IXXAT PC-I 04/PCI")
48MODULE_LICENSE("GPL v2"); 49MODULE_LICENSE("GPL v2");
49 50
@@ -131,6 +132,9 @@ struct plx_pci_card {
131#define TEWS_PCI_VENDOR_ID 0x1498 132#define TEWS_PCI_VENDOR_ID 0x1498
132#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A 133#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
133 134
135#define CTI_PCI_VENDOR_ID 0x12c4
136#define CTI_PCI_DEVICE_ID_CRG001 0x0900
137
134static void plx_pci_reset_common(struct pci_dev *pdev); 138static void plx_pci_reset_common(struct pci_dev *pdev);
135static void plx_pci_reset_marathon(struct pci_dev *pdev); 139static void plx_pci_reset_marathon(struct pci_dev *pdev);
136static void plx9056_pci_reset_common(struct pci_dev *pdev); 140static void plx9056_pci_reset_common(struct pci_dev *pdev);
@@ -158,7 +162,7 @@ struct plx_pci_card_info {
158 void (*reset_func)(struct pci_dev *pdev); 162 void (*reset_func)(struct pci_dev *pdev);
159}; 163};
160 164
161static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = { 165static struct plx_pci_card_info plx_pci_card_info_adlink = {
162 "Adlink PCI-7841/cPCI-7841", 2, 166 "Adlink PCI-7841/cPCI-7841", 2,
163 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 167 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
164 {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} }, 168 {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
@@ -166,7 +170,7 @@ static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
166 /* based on PLX9052 */ 170 /* based on PLX9052 */
167}; 171};
168 172
169static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = { 173static struct plx_pci_card_info plx_pci_card_info_adlink_se = {
170 "Adlink PCI-7841/cPCI-7841 SE", 2, 174 "Adlink PCI-7841/cPCI-7841 SE", 2,
171 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 175 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
172 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} }, 176 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
@@ -174,7 +178,7 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
174 /* based on PLX9052 */ 178 /* based on PLX9052 */
175}; 179};
176 180
177static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = { 181static struct plx_pci_card_info plx_pci_card_info_esd200 = {
178 "esd CAN-PCI/CPCI/PCI104/200", 2, 182 "esd CAN-PCI/CPCI/PCI104/200", 2,
179 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 183 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
180 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, 184 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
@@ -182,7 +186,7 @@ static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
182 /* based on PLX9030/9050 */ 186 /* based on PLX9030/9050 */
183}; 187};
184 188
185static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = { 189static struct plx_pci_card_info plx_pci_card_info_esd266 = {
186 "esd CAN-PCI/PMC/266", 2, 190 "esd CAN-PCI/PMC/266", 2,
187 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 191 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
188 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, 192 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
@@ -190,7 +194,7 @@ static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
190 /* based on PLX9056 */ 194 /* based on PLX9056 */
191}; 195};
192 196
193static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = { 197static struct plx_pci_card_info plx_pci_card_info_esd2000 = {
194 "esd CAN-PCIe/2000", 2, 198 "esd CAN-PCIe/2000", 2,
195 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 199 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
196 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, 200 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
@@ -198,7 +202,7 @@ static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
198 /* based on PEX8311 */ 202 /* based on PEX8311 */
199}; 203};
200 204
201static struct plx_pci_card_info plx_pci_card_info_ixxat __devinitdata = { 205static struct plx_pci_card_info plx_pci_card_info_ixxat = {
202 "IXXAT PC-I 04/PCI", 2, 206 "IXXAT PC-I 04/PCI", 2,
203 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 207 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
204 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x200, 0x80} }, 208 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x200, 0x80} },
@@ -206,7 +210,7 @@ static struct plx_pci_card_info plx_pci_card_info_ixxat __devinitdata = {
206 /* based on PLX9050 */ 210 /* based on PLX9050 */
207}; 211};
208 212
209static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = { 213static struct plx_pci_card_info plx_pci_card_info_marathon = {
210 "Marathon CAN-bus-PCI", 2, 214 "Marathon CAN-bus-PCI", 2,
211 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 215 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
212 {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} }, 216 {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
@@ -214,7 +218,7 @@ static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
214 /* based on PLX9052 */ 218 /* based on PLX9052 */
215}; 219};
216 220
217static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = { 221static struct plx_pci_card_info plx_pci_card_info_tews = {
218 "TEWS TECHNOLOGIES TPMC810", 2, 222 "TEWS TECHNOLOGIES TPMC810", 2,
219 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 223 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
220 {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} }, 224 {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
@@ -222,6 +226,14 @@ static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
222 /* based on PLX9030 */ 226 /* based on PLX9030 */
223}; 227};
224 228
229static struct plx_pci_card_info plx_pci_card_info_cti = {
230 "Connect Tech Inc. CANpro/104-Plus Opto (CRG001)", 2,
231 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
232 {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
233 &plx_pci_reset_common
234 /* based on PLX9030 */
235};
236
225static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = { 237static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
226 { 238 {
227 /* Adlink PCI-7841/cPCI-7841 */ 239 /* Adlink PCI-7841/cPCI-7841 */
@@ -300,6 +312,13 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
300 0, 0, 312 0, 0,
301 (kernel_ulong_t)&plx_pci_card_info_tews 313 (kernel_ulong_t)&plx_pci_card_info_tews
302 }, 314 },
315 {
316 /* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */
317 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
318 CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001,
319 0, 0,
320 (kernel_ulong_t)&plx_pci_card_info_cti
321 },
303 { 0,} 322 { 0,}
304}; 323};
305MODULE_DEVICE_TABLE(pci, plx_pci_tbl); 324MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
@@ -465,8 +484,8 @@ static void plx_pci_del_card(struct pci_dev *pdev)
465 * Probe PLX90xx based device for the SJA1000 chips and register each 484 * Probe PLX90xx based device for the SJA1000 chips and register each
466 * available CAN channel to SJA1000 Socket-CAN subsystem. 485 * available CAN channel to SJA1000 Socket-CAN subsystem.
467 */ 486 */
468static int __devinit plx_pci_add_card(struct pci_dev *pdev, 487static int plx_pci_add_card(struct pci_dev *pdev,
469 const struct pci_device_id *ent) 488 const struct pci_device_id *ent)
470{ 489{
471 struct sja1000_priv *priv; 490 struct sja1000_priv *priv;
472 struct net_device *dev; 491 struct net_device *dev;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 25011dbe1b96..83ee11eca0e2 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -188,11 +188,6 @@ static void sja1000_start(struct net_device *dev)
188 188
189static int sja1000_set_mode(struct net_device *dev, enum can_mode mode) 189static int sja1000_set_mode(struct net_device *dev, enum can_mode mode)
190{ 190{
191 struct sja1000_priv *priv = netdev_priv(dev);
192
193 if (!priv->open_time)
194 return -EINVAL;
195
196 switch (mode) { 191 switch (mode) {
197 case CAN_MODE_START: 192 case CAN_MODE_START:
198 sja1000_start(dev); 193 sja1000_start(dev);
@@ -579,7 +574,6 @@ static int sja1000_open(struct net_device *dev)
579 574
580 /* init and start chi */ 575 /* init and start chi */
581 sja1000_start(dev); 576 sja1000_start(dev);
582 priv->open_time = jiffies;
583 577
584 netif_start_queue(dev); 578 netif_start_queue(dev);
585 579
@@ -598,8 +592,6 @@ static int sja1000_close(struct net_device *dev)
598 592
599 close_candev(dev); 593 close_candev(dev);
600 594
601 priv->open_time = 0;
602
603 return 0; 595 return 0;
604} 596}
605 597
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 23fff06875f5..afa99847a510 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -152,7 +152,6 @@
152 */ 152 */
153struct sja1000_priv { 153struct sja1000_priv {
154 struct can_priv can; /* must be the first member */ 154 struct can_priv can; /* must be the first member */
155 int open_time;
156 struct sk_buff *echo_skb; 155 struct sk_buff *echo_skb;
157 156
158 /* the lower-layer is responsible for appropriate locking */ 157 /* the lower-layer is responsible for appropriate locking */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 90c5c2dfd2fd..5c8da4661489 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -42,11 +42,11 @@ MODULE_LICENSE("GPL v2");
42 42
43static unsigned long port[MAXDEV]; 43static unsigned long port[MAXDEV];
44static unsigned long mem[MAXDEV]; 44static unsigned long mem[MAXDEV];
45static int __devinitdata irq[MAXDEV]; 45static int irq[MAXDEV];
46static int __devinitdata clk[MAXDEV]; 46static int clk[MAXDEV];
47static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 47static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
48static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; 48static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
49static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; 49static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
50 50
51module_param_array(port, ulong, NULL, S_IRUGO); 51module_param_array(port, ulong, NULL, S_IRUGO);
52MODULE_PARM_DESC(port, "I/O port number"); 52MODULE_PARM_DESC(port, "I/O port number");
@@ -117,7 +117,7 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
117 outb(val, base + 1); 117 outb(val, base + 1);
118} 118}
119 119
120static int __devinit sja1000_isa_probe(struct platform_device *pdev) 120static int sja1000_isa_probe(struct platform_device *pdev)
121{ 121{
122 struct net_device *dev; 122 struct net_device *dev;
123 struct sja1000_priv *priv; 123 struct sja1000_priv *priv;
@@ -223,7 +223,7 @@ static int __devinit sja1000_isa_probe(struct platform_device *pdev)
223 return err; 223 return err;
224} 224}
225 225
226static int __devexit sja1000_isa_remove(struct platform_device *pdev) 226static int sja1000_isa_remove(struct platform_device *pdev)
227{ 227{
228 struct net_device *dev = dev_get_drvdata(&pdev->dev); 228 struct net_device *dev = dev_get_drvdata(&pdev->dev);
229 struct sja1000_priv *priv = netdev_priv(dev); 229 struct sja1000_priv *priv = netdev_priv(dev);
@@ -248,7 +248,7 @@ static int __devexit sja1000_isa_remove(struct platform_device *pdev)
248 248
249static struct platform_driver sja1000_isa_driver = { 249static struct platform_driver sja1000_isa_driver = {
250 .probe = sja1000_isa_probe, 250 .probe = sja1000_isa_probe,
251 .remove = __devexit_p(sja1000_isa_remove), 251 .remove = sja1000_isa_remove,
252 .driver = { 252 .driver = {
253 .name = DRV_NAME, 253 .name = DRV_NAME,
254 .owner = THIS_MODULE, 254 .owner = THIS_MODULE,
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index f2683eb6a3d5..0f5917000aa2 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -42,6 +42,8 @@
42#include <linux/can/dev.h> 42#include <linux/can/dev.h>
43 43
44#include <linux/of_platform.h> 44#include <linux/of_platform.h>
45#include <linux/of_address.h>
46#include <linux/of_irq.h>
45#include <asm/prom.h> 47#include <asm/prom.h>
46 48
47#include "sja1000.h" 49#include "sja1000.h"
@@ -59,16 +61,16 @@ MODULE_LICENSE("GPL v2");
59 61
60static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg) 62static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
61{ 63{
62 return in_8(priv->reg_base + reg); 64 return ioread8(priv->reg_base + reg);
63} 65}
64 66
65static void sja1000_ofp_write_reg(const struct sja1000_priv *priv, 67static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
66 int reg, u8 val) 68 int reg, u8 val)
67{ 69{
68 out_8(priv->reg_base + reg, val); 70 iowrite8(val, priv->reg_base + reg);
69} 71}
70 72
71static int __devexit sja1000_ofp_remove(struct platform_device *ofdev) 73static int sja1000_ofp_remove(struct platform_device *ofdev)
72{ 74{
73 struct net_device *dev = dev_get_drvdata(&ofdev->dev); 75 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
74 struct sja1000_priv *priv = netdev_priv(dev); 76 struct sja1000_priv *priv = netdev_priv(dev);
@@ -88,7 +90,7 @@ static int __devexit sja1000_ofp_remove(struct platform_device *ofdev)
88 return 0; 90 return 0;
89} 91}
90 92
91static int __devinit sja1000_ofp_probe(struct platform_device *ofdev) 93static int sja1000_ofp_probe(struct platform_device *ofdev)
92{ 94{
93 struct device_node *np = ofdev->dev.of_node; 95 struct device_node *np = ofdev->dev.of_node;
94 struct net_device *dev; 96 struct net_device *dev;
@@ -204,7 +206,7 @@ exit_release_mem:
204 return err; 206 return err;
205} 207}
206 208
207static struct of_device_id __devinitdata sja1000_ofp_table[] = { 209static struct of_device_id sja1000_ofp_table[] = {
208 {.compatible = "nxp,sja1000"}, 210 {.compatible = "nxp,sja1000"},
209 {}, 211 {},
210}; 212};
@@ -217,7 +219,7 @@ static struct platform_driver sja1000_ofp_driver = {
217 .of_match_table = sja1000_ofp_table, 219 .of_match_table = sja1000_ofp_table,
218 }, 220 },
219 .probe = sja1000_ofp_probe, 221 .probe = sja1000_ofp_probe,
220 .remove = __devexit_p(sja1000_ofp_remove), 222 .remove = sja1000_ofp_remove,
221}; 223};
222 224
223module_platform_driver(sja1000_ofp_driver); 225module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 662c5f7eb0c5..21619bb5b869 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -34,6 +34,7 @@
34 34
35MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); 35MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
36MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); 36MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
37MODULE_ALIAS("platform:" DRV_NAME);
37MODULE_LICENSE("GPL v2"); 38MODULE_LICENSE("GPL v2");
38 39
39static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg) 40static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 9756099a883a..76513dd780c7 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -71,7 +71,7 @@ MODULE_LICENSE("GPL");
71#define TSCAN1_SJA1000_XTAL 16000000 71#define TSCAN1_SJA1000_XTAL 16000000
72 72
73/* SJA1000 IO base addresses */ 73/* SJA1000 IO base addresses */
74static const unsigned short tscan1_sja1000_addresses[] __devinitconst = { 74static const unsigned short tscan1_sja1000_addresses[] = {
75 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320 75 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
76}; 76};
77 77
@@ -88,7 +88,7 @@ static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
88} 88}
89 89
90/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */ 90/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
91static int __devinit tscan1_probe(struct device *dev, unsigned id) 91static int tscan1_probe(struct device *dev, unsigned id)
92{ 92{
93 struct net_device *netdev; 93 struct net_device *netdev;
94 struct sja1000_priv *priv; 94 struct sja1000_priv *priv;
@@ -171,7 +171,7 @@ static int __devinit tscan1_probe(struct device *dev, unsigned id)
171 return -ENXIO; 171 return -ENXIO;
172} 172}
173 173
174static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/) 174static int tscan1_remove(struct device *dev, unsigned id /*unused*/)
175{ 175{
176 struct net_device *netdev; 176 struct net_device *netdev;
177 struct sja1000_priv *priv; 177 struct sja1000_priv *priv;
@@ -197,7 +197,7 @@ static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
197 197
198static struct isa_driver tscan1_isa_driver = { 198static struct isa_driver tscan1_isa_driver = {
199 .probe = tscan1_probe, 199 .probe = tscan1_probe,
200 .remove = __devexit_p(tscan1_remove), 200 .remove = tscan1_remove,
201 .driver = { 201 .driver = {
202 .name = "tscan1", 202 .name = "tscan1",
203 }, 203 },
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index c0e1b1eb87a9..c2c0a5bb0b21 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -159,7 +159,7 @@ MODULE_FIRMWARE(fw_dir "bcard2.bin");
159MODULE_FIRMWARE(fw_dir "ldcard2.bin"); 159MODULE_FIRMWARE(fw_dir "ldcard2.bin");
160MODULE_FIRMWARE(fw_dir "cancrd2.bin"); 160MODULE_FIRMWARE(fw_dir "cancrd2.bin");
161 161
162static __devinit const struct softing_platform_data 162static const struct softing_platform_data
163*softingcs_find_platform_data(unsigned int manf, unsigned int prod) 163*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
164{ 164{
165 const struct softing_platform_data *lp; 165 const struct softing_platform_data *lp;
@@ -193,8 +193,7 @@ static int softingcs_enable_irq(struct platform_device *pdev, int v)
193/* 193/*
194 * pcmcia check 194 * pcmcia check
195 */ 195 */
196static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia, 196static int softingcs_probe_config(struct pcmcia_device *pcmcia, void *priv_data)
197 void *priv_data)
198{ 197{
199 struct softing_platform_data *pdat = priv_data; 198 struct softing_platform_data *pdat = priv_data;
200 struct resource *pres; 199 struct resource *pres;
@@ -215,7 +214,7 @@ static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
215 return pcmcia_request_window(pcmcia, pres, memspeed); 214 return pcmcia_request_window(pcmcia, pres, memspeed);
216} 215}
217 216
218static __devexit void softingcs_remove(struct pcmcia_device *pcmcia) 217static void softingcs_remove(struct pcmcia_device *pcmcia)
219{ 218{
220 struct platform_device *pdev = pcmcia->priv; 219 struct platform_device *pdev = pcmcia->priv;
221 220
@@ -235,7 +234,7 @@ static void softingcs_pdev_release(struct device *dev)
235 kfree(pdev); 234 kfree(pdev);
236} 235}
237 236
238static __devinit int softingcs_probe(struct pcmcia_device *pcmcia) 237static int softingcs_probe(struct pcmcia_device *pcmcia)
239{ 238{
240 int ret; 239 int ret;
241 struct platform_device *pdev; 240 struct platform_device *pdev;
@@ -338,7 +337,7 @@ static struct pcmcia_driver softingcs_driver = {
338 .name = "softingcs", 337 .name = "softingcs",
339 .id_table = softingcs_ids, 338 .id_table = softingcs_ids,
340 .probe = softingcs_probe, 339 .probe = softingcs_probe,
341 .remove = __devexit_p(softingcs_remove), 340 .remove = softingcs_remove,
342}; 341};
343 342
344static int __init softingcs_start(void) 343static int __init softingcs_start(void)
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index f2a221e7b968..3a2b45601ec2 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -478,7 +478,7 @@ static void softing_card_shutdown(struct softing *card)
478 mutex_unlock(&card->fw.lock); 478 mutex_unlock(&card->fw.lock);
479} 479}
480 480
481static __devinit int softing_card_boot(struct softing *card) 481static int softing_card_boot(struct softing *card)
482{ 482{
483 int ret, j; 483 int ret, j;
484 static const uint8_t stream[] = { 484 static const uint8_t stream[] = {
@@ -645,8 +645,8 @@ static const struct can_bittiming_const softing_btr_const = {
645}; 645};
646 646
647 647
648static __devinit struct net_device *softing_netdev_create(struct softing *card, 648static struct net_device *softing_netdev_create(struct softing *card,
649 uint16_t chip_id) 649 uint16_t chip_id)
650{ 650{
651 struct net_device *netdev; 651 struct net_device *netdev;
652 struct softing_priv *priv; 652 struct softing_priv *priv;
@@ -676,7 +676,7 @@ static __devinit struct net_device *softing_netdev_create(struct softing *card,
676 return netdev; 676 return netdev;
677} 677}
678 678
679static __devinit int softing_netdev_register(struct net_device *netdev) 679static int softing_netdev_register(struct net_device *netdev)
680{ 680{
681 int ret; 681 int ret;
682 682
@@ -745,7 +745,7 @@ static const struct attribute_group softing_pdev_group = {
745/* 745/*
746 * platform driver 746 * platform driver
747 */ 747 */
748static __devexit int softing_pdev_remove(struct platform_device *pdev) 748static int softing_pdev_remove(struct platform_device *pdev)
749{ 749{
750 struct softing *card = platform_get_drvdata(pdev); 750 struct softing *card = platform_get_drvdata(pdev);
751 int j; 751 int j;
@@ -766,7 +766,7 @@ static __devexit int softing_pdev_remove(struct platform_device *pdev)
766 return 0; 766 return 0;
767} 767}
768 768
769static __devinit int softing_pdev_probe(struct platform_device *pdev) 769static int softing_pdev_probe(struct platform_device *pdev)
770{ 770{
771 const struct softing_platform_data *pdat = pdev->dev.platform_data; 771 const struct softing_platform_data *pdat = pdev->dev.platform_data;
772 struct softing *card; 772 struct softing *card;
@@ -871,7 +871,7 @@ static struct platform_driver softing_driver = {
871 .owner = THIS_MODULE, 871 .owner = THIS_MODULE,
872 }, 872 },
873 .probe = softing_pdev_probe, 873 .probe = softing_pdev_probe,
874 .remove = __devexit_p(softing_pdev_remove), 874 .remove = softing_pdev_remove,
875}; 875};
876 876
877module_platform_driver(softing_driver); 877module_platform_driver(softing_driver);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 9ded21e79db5..f898c6363729 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -978,7 +978,7 @@ probe_exit:
978 return err; 978 return err;
979} 979}
980 980
981static int __devexit ti_hecc_remove(struct platform_device *pdev) 981static int ti_hecc_remove(struct platform_device *pdev)
982{ 982{
983 struct resource *res; 983 struct resource *res;
984 struct net_device *ndev = platform_get_drvdata(pdev); 984 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1045,7 +1045,7 @@ static struct platform_driver ti_hecc_driver = {
1045 .owner = THIS_MODULE, 1045 .owner = THIS_MODULE,
1046 }, 1046 },
1047 .probe = ti_hecc_probe, 1047 .probe = ti_hecc_probe,
1048 .remove = __devexit_p(ti_hecc_remove), 1048 .remove = ti_hecc_remove,
1049 .suspend = ti_hecc_suspend, 1049 .suspend = ti_hecc_suspend,
1050 .resume = ti_hecc_resume, 1050 .resume = ti_hecc_resume,
1051}; 1051};
@@ -1055,3 +1055,4 @@ module_platform_driver(ti_hecc_driver);
1055MODULE_AUTHOR("Anant Gole <anantgole@ti.com>"); 1055MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
1056MODULE_LICENSE("GPL v2"); 1056MODULE_LICENSE("GPL v2");
1057MODULE_DESCRIPTION(DRV_DESC); 1057MODULE_DESCRIPTION(DRV_DESC);
1058MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 0a6876841c20..a4e4bee35710 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,6 +13,35 @@ config CAN_ESD_USB2
13 This driver supports the CAN-USB/2 interface 13 This driver supports the CAN-USB/2 interface
14 from esd electronic system design gmbh (http://www.esd.eu). 14 from esd electronic system design gmbh (http://www.esd.eu).
15 15
16config CAN_KVASER_USB
17 tristate "Kvaser CAN/USB interface"
18 ---help---
19 This driver adds support for Kvaser CAN/USB devices like Kvaser
20 Leaf Light.
21
22 The driver gives support for the following devices:
23 - Kvaser Leaf Light
24 - Kvaser Leaf Professional HS
25 - Kvaser Leaf SemiPro HS
26 - Kvaser Leaf Professional LS
27 - Kvaser Leaf Professional SWC
28 - Kvaser Leaf Professional LIN
29 - Kvaser Leaf SemiPro LS
30 - Kvaser Leaf SemiPro SWC
31 - Kvaser Memorator II HS/HS
32 - Kvaser USBcan Professional HS/HS
33 - Kvaser Leaf Light GI
34 - Kvaser Leaf Professional HS (OBD-II connector)
35 - Kvaser Memorator Professional HS/LS
36 - Kvaser Leaf Light "China"
37 - Kvaser BlackBird SemiPro
38 - Kvaser USBcan R
39
40 If unsure, say N.
41
42 To compile this driver as a module, choose M here: the
43 module will be called kvaser_usb.
44
16config CAN_PEAK_USB 45config CAN_PEAK_USB
17 tristate "PEAK PCAN-USB/USB Pro interfaces" 46 tristate "PEAK PCAN-USB/USB Pro interfaces"
18 ---help--- 47 ---help---
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index da6d1d3b2969..80a2ee41fd61 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o 5obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
6obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o 6obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
7obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
7obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/ 8obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
8 9
9ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 10ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 086fa321677a..c69f0b72b352 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -245,7 +245,6 @@ struct ems_tx_urb_context {
245 245
246struct ems_usb { 246struct ems_usb {
247 struct can_priv can; /* must be the first member */ 247 struct can_priv can; /* must be the first member */
248 int open_time;
249 248
250 struct sk_buff *echo_skb[MAX_TX_URBS]; 249 struct sk_buff *echo_skb[MAX_TX_URBS];
251 250
@@ -728,7 +727,6 @@ static int ems_usb_open(struct net_device *netdev)
728 return err; 727 return err;
729 } 728 }
730 729
731 dev->open_time = jiffies;
732 730
733 netif_start_queue(netdev); 731 netif_start_queue(netdev);
734 732
@@ -878,8 +876,6 @@ static int ems_usb_close(struct net_device *netdev)
878 876
879 close_candev(netdev); 877 close_candev(netdev);
880 878
881 dev->open_time = 0;
882
883 return 0; 879 return 0;
884} 880}
885 881
@@ -905,9 +901,6 @@ static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode)
905{ 901{
906 struct ems_usb *dev = netdev_priv(netdev); 902 struct ems_usb *dev = netdev_priv(netdev);
907 903
908 if (!dev->open_time)
909 return -EINVAL;
910
911 switch (mode) { 904 switch (mode) {
912 case CAN_MODE_START: 905 case CAN_MODE_START:
913 if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL)) 906 if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL))
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index bd36e5517173..9b74d1e3ad44 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * CAN driver for esd CAN-USB/2 2 * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
3 * 3 *
4 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh 4 * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published 7 * under the terms of the GNU General Public License as published
@@ -28,14 +28,16 @@
28#include <linux/can/error.h> 28#include <linux/can/error.h>
29 29
30MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>"); 30MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
31MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 interfaces"); 31MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
32MODULE_LICENSE("GPL v2"); 32MODULE_LICENSE("GPL v2");
33 33
34/* Define these values to match your devices */ 34/* Define these values to match your devices */
35#define USB_ESDGMBH_VENDOR_ID 0x0ab4 35#define USB_ESDGMBH_VENDOR_ID 0x0ab4
36#define USB_CANUSB2_PRODUCT_ID 0x0010 36#define USB_CANUSB2_PRODUCT_ID 0x0010
37#define USB_CANUSBM_PRODUCT_ID 0x0011
37 38
38#define ESD_USB2_CAN_CLOCK 60000000 39#define ESD_USB2_CAN_CLOCK 60000000
40#define ESD_USBM_CAN_CLOCK 36000000
39#define ESD_USB2_MAX_NETS 2 41#define ESD_USB2_MAX_NETS 2
40 42
41/* USB2 commands */ 43/* USB2 commands */
@@ -69,6 +71,7 @@ MODULE_LICENSE("GPL v2");
69#define ESD_USB2_TSEG2_SHIFT 20 71#define ESD_USB2_TSEG2_SHIFT 20
70#define ESD_USB2_SJW_MAX 4 72#define ESD_USB2_SJW_MAX 4
71#define ESD_USB2_SJW_SHIFT 14 73#define ESD_USB2_SJW_SHIFT 14
74#define ESD_USBM_SJW_SHIFT 24
72#define ESD_USB2_BRP_MIN 1 75#define ESD_USB2_BRP_MIN 1
73#define ESD_USB2_BRP_MAX 1024 76#define ESD_USB2_BRP_MAX 1024
74#define ESD_USB2_BRP_INC 1 77#define ESD_USB2_BRP_INC 1
@@ -183,6 +186,7 @@ struct __attribute__ ((packed)) esd_usb2_msg {
183 186
184static struct usb_device_id esd_usb2_table[] = { 187static struct usb_device_id esd_usb2_table[] = {
185 {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)}, 188 {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
189 {USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
186 {} 190 {}
187}; 191};
188MODULE_DEVICE_TABLE(usb, esd_usb2_table); 192MODULE_DEVICE_TABLE(usb, esd_usb2_table);
@@ -213,7 +217,6 @@ struct esd_usb2_net_priv {
213 struct usb_anchor tx_submitted; 217 struct usb_anchor tx_submitted;
214 struct esd_tx_urb_context tx_contexts[MAX_TX_URBS]; 218 struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
215 219
216 int open_time;
217 struct esd_usb2 *usb2; 220 struct esd_usb2 *usb2;
218 struct net_device *netdev; 221 struct net_device *netdev;
219 int index; 222 int index;
@@ -691,8 +694,6 @@ static int esd_usb2_open(struct net_device *netdev)
691 return err; 694 return err;
692 } 695 }
693 696
694 priv->open_time = jiffies;
695
696 netif_start_queue(netdev); 697 netif_start_queue(netdev);
697 698
698 return 0; 699 return 0;
@@ -860,8 +861,6 @@ static int esd_usb2_close(struct net_device *netdev)
860 861
861 close_candev(netdev); 862 close_candev(netdev);
862 863
863 priv->open_time = 0;
864
865 return 0; 864 return 0;
866} 865}
867 866
@@ -889,11 +888,22 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
889 struct can_bittiming *bt = &priv->can.bittiming; 888 struct can_bittiming *bt = &priv->can.bittiming;
890 struct esd_usb2_msg msg; 889 struct esd_usb2_msg msg;
891 u32 canbtr; 890 u32 canbtr;
891 int sjw_shift;
892 892
893 canbtr = ESD_USB2_UBR; 893 canbtr = ESD_USB2_UBR;
894 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
895 canbtr |= ESD_USB2_LOM;
896
894 canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1); 897 canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
898
899 if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
900 USB_CANUSBM_PRODUCT_ID)
901 sjw_shift = ESD_USBM_SJW_SHIFT;
902 else
903 sjw_shift = ESD_USB2_SJW_SHIFT;
904
895 canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1)) 905 canbtr |= ((bt->sjw - 1) & (ESD_USB2_SJW_MAX - 1))
896 << ESD_USB2_SJW_SHIFT; 906 << sjw_shift;
897 canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1) 907 canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1)
898 & (ESD_USB2_TSEG1_MAX - 1)) 908 & (ESD_USB2_TSEG1_MAX - 1))
899 << ESD_USB2_TSEG1_SHIFT; 909 << ESD_USB2_TSEG1_SHIFT;
@@ -926,11 +936,6 @@ static int esd_usb2_get_berr_counter(const struct net_device *netdev,
926 936
927static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode) 937static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
928{ 938{
929 struct esd_usb2_net_priv *priv = netdev_priv(netdev);
930
931 if (!priv->open_time)
932 return -EINVAL;
933
934 switch (mode) { 939 switch (mode) {
935 case CAN_MODE_START: 940 case CAN_MODE_START:
936 netif_wake_queue(netdev); 941 netif_wake_queue(netdev);
@@ -971,12 +976,20 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
971 priv->index = index; 976 priv->index = index;
972 977
973 priv->can.state = CAN_STATE_STOPPED; 978 priv->can.state = CAN_STATE_STOPPED;
974 priv->can.clock.freq = ESD_USB2_CAN_CLOCK; 979 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
980
981 if (le16_to_cpu(dev->udev->descriptor.idProduct) ==
982 USB_CANUSBM_PRODUCT_ID)
983 priv->can.clock.freq = ESD_USBM_CAN_CLOCK;
984 else {
985 priv->can.clock.freq = ESD_USB2_CAN_CLOCK;
986 priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
987 }
988
975 priv->can.bittiming_const = &esd_usb2_bittiming_const; 989 priv->can.bittiming_const = &esd_usb2_bittiming_const;
976 priv->can.do_set_bittiming = esd_usb2_set_bittiming; 990 priv->can.do_set_bittiming = esd_usb2_set_bittiming;
977 priv->can.do_set_mode = esd_usb2_set_mode; 991 priv->can.do_set_mode = esd_usb2_set_mode;
978 priv->can.do_get_berr_counter = esd_usb2_get_berr_counter; 992 priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
979 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
980 993
981 netdev->flags |= IFF_ECHO; /* we support local echo */ 994 netdev->flags |= IFF_ECHO; /* we support local echo */
982 995
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
new file mode 100644
index 000000000000..5b58a4d87397
--- /dev/null
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -0,0 +1,1627 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation version 2.
5 *
6 * Parts of this driver are based on the following:
7 * - Kvaser linux leaf driver (version 4.78)
8 * - CAN driver for esd CAN-USB/2
9 *
10 * Copyright (C) 2002-2006 KVASER AB, Sweden. All rights reserved.
11 * Copyright (C) 2010 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
12 * Copyright (C) 2012 Olivier Sobrie <olivier@sobrie.be>
13 */
14
15#include <linux/init.h>
16#include <linux/completion.h>
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/usb.h>
20
21#include <linux/can.h>
22#include <linux/can/dev.h>
23#include <linux/can/error.h>
24
25#define MAX_TX_URBS 16
26#define MAX_RX_URBS 4
27#define START_TIMEOUT 1000 /* msecs */
28#define STOP_TIMEOUT 1000 /* msecs */
29#define USB_SEND_TIMEOUT 1000 /* msecs */
30#define USB_RECV_TIMEOUT 1000 /* msecs */
31#define RX_BUFFER_SIZE 3072
32#define CAN_USB_CLOCK 8000000
33#define MAX_NET_DEVICES 3
34
35/* Kvaser USB devices */
36#define KVASER_VENDOR_ID 0x0bfd
37#define USB_LEAF_DEVEL_PRODUCT_ID 10
38#define USB_LEAF_LITE_PRODUCT_ID 11
39#define USB_LEAF_PRO_PRODUCT_ID 12
40#define USB_LEAF_SPRO_PRODUCT_ID 14
41#define USB_LEAF_PRO_LS_PRODUCT_ID 15
42#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
43#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
44#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
45#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
46#define USB_MEMO2_DEVEL_PRODUCT_ID 22
47#define USB_MEMO2_HSHS_PRODUCT_ID 23
48#define USB_UPRO_HSHS_PRODUCT_ID 24
49#define USB_LEAF_LITE_GI_PRODUCT_ID 25
50#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
51#define USB_MEMO2_HSLS_PRODUCT_ID 27
52#define USB_LEAF_LITE_CH_PRODUCT_ID 28
53#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
54#define USB_OEM_MERCURY_PRODUCT_ID 34
55#define USB_OEM_LEAF_PRODUCT_ID 35
56#define USB_CAN_R_PRODUCT_ID 39
57
58/* USB devices features */
59#define KVASER_HAS_SILENT_MODE BIT(0)
60#define KVASER_HAS_TXRX_ERRORS BIT(1)
61
62/* Message header size */
63#define MSG_HEADER_LEN 2
64
65/* Can message flags */
66#define MSG_FLAG_ERROR_FRAME BIT(0)
67#define MSG_FLAG_OVERRUN BIT(1)
68#define MSG_FLAG_NERR BIT(2)
69#define MSG_FLAG_WAKEUP BIT(3)
70#define MSG_FLAG_REMOTE_FRAME BIT(4)
71#define MSG_FLAG_RESERVED BIT(5)
72#define MSG_FLAG_TX_ACK BIT(6)
73#define MSG_FLAG_TX_REQUEST BIT(7)
74
75/* Can states */
76#define M16C_STATE_BUS_RESET BIT(0)
77#define M16C_STATE_BUS_ERROR BIT(4)
78#define M16C_STATE_BUS_PASSIVE BIT(5)
79#define M16C_STATE_BUS_OFF BIT(6)
80
81/* Can msg ids */
82#define CMD_RX_STD_MESSAGE 12
83#define CMD_TX_STD_MESSAGE 13
84#define CMD_RX_EXT_MESSAGE 14
85#define CMD_TX_EXT_MESSAGE 15
86#define CMD_SET_BUS_PARAMS 16
87#define CMD_GET_BUS_PARAMS 17
88#define CMD_GET_BUS_PARAMS_REPLY 18
89#define CMD_GET_CHIP_STATE 19
90#define CMD_CHIP_STATE_EVENT 20
91#define CMD_SET_CTRL_MODE 21
92#define CMD_GET_CTRL_MODE 22
93#define CMD_GET_CTRL_MODE_REPLY 23
94#define CMD_RESET_CHIP 24
95#define CMD_RESET_CARD 25
96#define CMD_START_CHIP 26
97#define CMD_START_CHIP_REPLY 27
98#define CMD_STOP_CHIP 28
99#define CMD_STOP_CHIP_REPLY 29
100#define CMD_GET_CARD_INFO2 32
101#define CMD_GET_CARD_INFO 34
102#define CMD_GET_CARD_INFO_REPLY 35
103#define CMD_GET_SOFTWARE_INFO 38
104#define CMD_GET_SOFTWARE_INFO_REPLY 39
105#define CMD_ERROR_EVENT 45
106#define CMD_FLUSH_QUEUE 48
107#define CMD_RESET_ERROR_COUNTER 49
108#define CMD_TX_ACKNOWLEDGE 50
109#define CMD_CAN_ERROR_EVENT 51
110#define CMD_USB_THROTTLE 77
111#define CMD_LOG_MESSAGE 106
112
113/* error factors */
114#define M16C_EF_ACKE BIT(0)
115#define M16C_EF_CRCE BIT(1)
116#define M16C_EF_FORME BIT(2)
117#define M16C_EF_STFE BIT(3)
118#define M16C_EF_BITE0 BIT(4)
119#define M16C_EF_BITE1 BIT(5)
120#define M16C_EF_RCVE BIT(6)
121#define M16C_EF_TRE BIT(7)
122
123/* bittiming parameters */
124#define KVASER_USB_TSEG1_MIN 1
125#define KVASER_USB_TSEG1_MAX 16
126#define KVASER_USB_TSEG2_MIN 1
127#define KVASER_USB_TSEG2_MAX 8
128#define KVASER_USB_SJW_MAX 4
129#define KVASER_USB_BRP_MIN 1
130#define KVASER_USB_BRP_MAX 64
131#define KVASER_USB_BRP_INC 1
132
133/* ctrl modes */
134#define KVASER_CTRL_MODE_NORMAL 1
135#define KVASER_CTRL_MODE_SILENT 2
136#define KVASER_CTRL_MODE_SELFRECEPTION 3
137#define KVASER_CTRL_MODE_OFF 4
138
139struct kvaser_msg_simple {
140 u8 tid;
141 u8 channel;
142} __packed;
143
144struct kvaser_msg_cardinfo {
145 u8 tid;
146 u8 nchannels;
147 __le32 serial_number;
148 __le32 padding;
149 __le32 clock_resolution;
150 __le32 mfgdate;
151 u8 ean[8];
152 u8 hw_revision;
153 u8 usb_hs_mode;
154 __le16 padding2;
155} __packed;
156
157struct kvaser_msg_cardinfo2 {
158 u8 tid;
159 u8 channel;
160 u8 pcb_id[24];
161 __le32 oem_unlock_code;
162} __packed;
163
164struct kvaser_msg_softinfo {
165 u8 tid;
166 u8 channel;
167 __le32 sw_options;
168 __le32 fw_version;
169 __le16 max_outstanding_tx;
170 __le16 padding[9];
171} __packed;
172
173struct kvaser_msg_busparams {
174 u8 tid;
175 u8 channel;
176 __le32 bitrate;
177 u8 tseg1;
178 u8 tseg2;
179 u8 sjw;
180 u8 no_samp;
181} __packed;
182
183struct kvaser_msg_tx_can {
184 u8 channel;
185 u8 tid;
186 u8 msg[14];
187 u8 padding;
188 u8 flags;
189} __packed;
190
191struct kvaser_msg_rx_can {
192 u8 channel;
193 u8 flag;
194 __le16 time[3];
195 u8 msg[14];
196} __packed;
197
198struct kvaser_msg_chip_state_event {
199 u8 tid;
200 u8 channel;
201 __le16 time[3];
202 u8 tx_errors_count;
203 u8 rx_errors_count;
204 u8 status;
205 u8 padding[3];
206} __packed;
207
208struct kvaser_msg_tx_acknowledge {
209 u8 channel;
210 u8 tid;
211 __le16 time[3];
212 u8 flags;
213 u8 time_offset;
214} __packed;
215
216struct kvaser_msg_error_event {
217 u8 tid;
218 u8 flags;
219 __le16 time[3];
220 u8 channel;
221 u8 padding;
222 u8 tx_errors_count;
223 u8 rx_errors_count;
224 u8 status;
225 u8 error_factor;
226} __packed;
227
228struct kvaser_msg_ctrl_mode {
229 u8 tid;
230 u8 channel;
231 u8 ctrl_mode;
232 u8 padding[3];
233} __packed;
234
235struct kvaser_msg_flush_queue {
236 u8 tid;
237 u8 channel;
238 u8 flags;
239 u8 padding[3];
240} __packed;
241
242struct kvaser_msg_log_message {
243 u8 channel;
244 u8 flags;
245 __le16 time[3];
246 u8 dlc;
247 u8 time_offset;
248 __le32 id;
249 u8 data[8];
250} __packed;
251
252struct kvaser_msg {
253 u8 len;
254 u8 id;
255 union {
256 struct kvaser_msg_simple simple;
257 struct kvaser_msg_cardinfo cardinfo;
258 struct kvaser_msg_cardinfo2 cardinfo2;
259 struct kvaser_msg_softinfo softinfo;
260 struct kvaser_msg_busparams busparams;
261 struct kvaser_msg_tx_can tx_can;
262 struct kvaser_msg_rx_can rx_can;
263 struct kvaser_msg_chip_state_event chip_state_event;
264 struct kvaser_msg_tx_acknowledge tx_acknowledge;
265 struct kvaser_msg_error_event error_event;
266 struct kvaser_msg_ctrl_mode ctrl_mode;
267 struct kvaser_msg_flush_queue flush_queue;
268 struct kvaser_msg_log_message log_message;
269 } u;
270} __packed;
271
272struct kvaser_usb_tx_urb_context {
273 struct kvaser_usb_net_priv *priv;
274 u32 echo_index;
275 int dlc;
276};
277
278struct kvaser_usb {
279 struct usb_device *udev;
280 struct kvaser_usb_net_priv *nets[MAX_NET_DEVICES];
281
282 struct usb_endpoint_descriptor *bulk_in, *bulk_out;
283 struct usb_anchor rx_submitted;
284
285 u32 fw_version;
286 unsigned int nchannels;
287
288 bool rxinitdone;
289 void *rxbuf[MAX_RX_URBS];
290 dma_addr_t rxbuf_dma[MAX_RX_URBS];
291};
292
293struct kvaser_usb_net_priv {
294 struct can_priv can;
295
296 atomic_t active_tx_urbs;
297 struct usb_anchor tx_submitted;
298 struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
299
300 struct completion start_comp, stop_comp;
301
302 struct kvaser_usb *dev;
303 struct net_device *netdev;
304 int channel;
305
306 struct can_berr_counter bec;
307};
308
309static const struct usb_device_id kvaser_usb_table[] = {
310 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
311 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
312 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
313 .driver_info = KVASER_HAS_TXRX_ERRORS |
314 KVASER_HAS_SILENT_MODE },
315 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
316 .driver_info = KVASER_HAS_TXRX_ERRORS |
317 KVASER_HAS_SILENT_MODE },
318 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
319 .driver_info = KVASER_HAS_TXRX_ERRORS |
320 KVASER_HAS_SILENT_MODE },
321 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
322 .driver_info = KVASER_HAS_TXRX_ERRORS |
323 KVASER_HAS_SILENT_MODE },
324 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
325 .driver_info = KVASER_HAS_TXRX_ERRORS |
326 KVASER_HAS_SILENT_MODE },
327 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
328 .driver_info = KVASER_HAS_TXRX_ERRORS |
329 KVASER_HAS_SILENT_MODE },
330 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
331 .driver_info = KVASER_HAS_TXRX_ERRORS |
332 KVASER_HAS_SILENT_MODE },
333 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
334 .driver_info = KVASER_HAS_TXRX_ERRORS |
335 KVASER_HAS_SILENT_MODE },
336 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
337 .driver_info = KVASER_HAS_TXRX_ERRORS |
338 KVASER_HAS_SILENT_MODE },
339 { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
340 .driver_info = KVASER_HAS_TXRX_ERRORS },
341 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
342 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
343 .driver_info = KVASER_HAS_TXRX_ERRORS |
344 KVASER_HAS_SILENT_MODE },
345 { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
346 .driver_info = KVASER_HAS_TXRX_ERRORS },
347 { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
348 .driver_info = KVASER_HAS_TXRX_ERRORS },
349 { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
350 .driver_info = KVASER_HAS_TXRX_ERRORS },
351 { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
352 .driver_info = KVASER_HAS_TXRX_ERRORS },
353 { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
354 .driver_info = KVASER_HAS_TXRX_ERRORS },
355 { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
356 .driver_info = KVASER_HAS_TXRX_ERRORS },
357 { }
358};
359MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
360
361static inline int kvaser_usb_send_msg(const struct kvaser_usb *dev,
362 struct kvaser_msg *msg)
363{
364 int actual_len;
365
366 return usb_bulk_msg(dev->udev,
367 usb_sndbulkpipe(dev->udev,
368 dev->bulk_out->bEndpointAddress),
369 msg, msg->len, &actual_len,
370 USB_SEND_TIMEOUT);
371}
372
373static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
374 struct kvaser_msg *msg)
375{
376 struct kvaser_msg *tmp;
377 void *buf;
378 int actual_len;
379 int err;
380 int pos = 0;
381
382 buf = kzalloc(RX_BUFFER_SIZE, GFP_KERNEL);
383 if (!buf)
384 return -ENOMEM;
385
386 err = usb_bulk_msg(dev->udev,
387 usb_rcvbulkpipe(dev->udev,
388 dev->bulk_in->bEndpointAddress),
389 buf, RX_BUFFER_SIZE, &actual_len,
390 USB_RECV_TIMEOUT);
391 if (err < 0)
392 goto end;
393
394 while (pos <= actual_len - MSG_HEADER_LEN) {
395 tmp = buf + pos;
396
397 if (!tmp->len)
398 break;
399
400 if (pos + tmp->len > actual_len) {
401 dev_err(dev->udev->dev.parent, "Format error\n");
402 break;
403 }
404
405 if (tmp->id == id) {
406 memcpy(msg, tmp, tmp->len);
407 goto end;
408 }
409
410 pos += tmp->len;
411 }
412
413 err = -EINVAL;
414
415end:
416 kfree(buf);
417
418 return err;
419}
420
421static int kvaser_usb_send_simple_msg(const struct kvaser_usb *dev,
422 u8 msg_id, int channel)
423{
424 struct kvaser_msg *msg;
425 int rc;
426
427 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
428 if (!msg)
429 return -ENOMEM;
430
431 msg->id = msg_id;
432 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
433 msg->u.simple.channel = channel;
434 msg->u.simple.tid = 0xff;
435
436 rc = kvaser_usb_send_msg(dev, msg);
437
438 kfree(msg);
439 return rc;
440}
441
442static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
443{
444 struct kvaser_msg msg;
445 int err;
446
447 err = kvaser_usb_send_simple_msg(dev, CMD_GET_SOFTWARE_INFO, 0);
448 if (err)
449 return err;
450
451 err = kvaser_usb_wait_msg(dev, CMD_GET_SOFTWARE_INFO_REPLY, &msg);
452 if (err)
453 return err;
454
455 dev->fw_version = le32_to_cpu(msg.u.softinfo.fw_version);
456
457 return 0;
458}
459
460static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
461{
462 struct kvaser_msg msg;
463 int err;
464
465 err = kvaser_usb_send_simple_msg(dev, CMD_GET_CARD_INFO, 0);
466 if (err)
467 return err;
468
469 err = kvaser_usb_wait_msg(dev, CMD_GET_CARD_INFO_REPLY, &msg);
470 if (err)
471 return err;
472
473 dev->nchannels = msg.u.cardinfo.nchannels;
474
475 return 0;
476}
477
478static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
479 const struct kvaser_msg *msg)
480{
481 struct net_device_stats *stats;
482 struct kvaser_usb_tx_urb_context *context;
483 struct kvaser_usb_net_priv *priv;
484 struct sk_buff *skb;
485 struct can_frame *cf;
486 u8 channel = msg->u.tx_acknowledge.channel;
487 u8 tid = msg->u.tx_acknowledge.tid;
488
489 if (channel >= dev->nchannels) {
490 dev_err(dev->udev->dev.parent,
491 "Invalid channel number (%d)\n", channel);
492 return;
493 }
494
495 priv = dev->nets[channel];
496
497 if (!netif_device_present(priv->netdev))
498 return;
499
500 stats = &priv->netdev->stats;
501
502 context = &priv->tx_contexts[tid % MAX_TX_URBS];
503
504 /* Sometimes the state change doesn't come after a bus-off event */
505 if (priv->can.restart_ms &&
506 (priv->can.state >= CAN_STATE_BUS_OFF)) {
507 skb = alloc_can_err_skb(priv->netdev, &cf);
508 if (skb) {
509 cf->can_id |= CAN_ERR_RESTARTED;
510 netif_rx(skb);
511
512 stats->rx_packets++;
513 stats->rx_bytes += cf->can_dlc;
514 } else {
515 netdev_err(priv->netdev,
516 "No memory left for err_skb\n");
517 }
518
519 priv->can.can_stats.restarts++;
520 netif_carrier_on(priv->netdev);
521
522 priv->can.state = CAN_STATE_ERROR_ACTIVE;
523 }
524
525 stats->tx_packets++;
526 stats->tx_bytes += context->dlc;
527 can_get_echo_skb(priv->netdev, context->echo_index);
528
529 context->echo_index = MAX_TX_URBS;
530 atomic_dec(&priv->active_tx_urbs);
531
532 netif_wake_queue(priv->netdev);
533}
534
535static void kvaser_usb_simple_msg_callback(struct urb *urb)
536{
537 struct net_device *netdev = urb->context;
538
539 kfree(urb->transfer_buffer);
540
541 if (urb->status)
542 netdev_warn(netdev, "urb status received: %d\n",
543 urb->status);
544}
545
546static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
547 u8 msg_id)
548{
549 struct kvaser_usb *dev = priv->dev;
550 struct net_device *netdev = priv->netdev;
551 struct kvaser_msg *msg;
552 struct urb *urb;
553 void *buf;
554 int err;
555
556 urb = usb_alloc_urb(0, GFP_ATOMIC);
557 if (!urb) {
558 netdev_err(netdev, "No memory left for URBs\n");
559 return -ENOMEM;
560 }
561
562 buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
563 if (!buf) {
564 netdev_err(netdev, "No memory left for USB buffer\n");
565 usb_free_urb(urb);
566 return -ENOMEM;
567 }
568
569 msg = (struct kvaser_msg *)buf;
570 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_simple);
571 msg->id = msg_id;
572 msg->u.simple.channel = priv->channel;
573
574 usb_fill_bulk_urb(urb, dev->udev,
575 usb_sndbulkpipe(dev->udev,
576 dev->bulk_out->bEndpointAddress),
577 buf, msg->len,
578 kvaser_usb_simple_msg_callback, priv);
579 usb_anchor_urb(urb, &priv->tx_submitted);
580
581 err = usb_submit_urb(urb, GFP_ATOMIC);
582 if (err) {
583 netdev_err(netdev, "Error transmitting URB\n");
584 usb_unanchor_urb(urb);
585 usb_free_urb(urb);
586 kfree(buf);
587 return err;
588 }
589
590 usb_free_urb(urb);
591
592 return 0;
593}
594
595static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
596{
597 int i;
598
599 usb_kill_anchored_urbs(&priv->tx_submitted);
600 atomic_set(&priv->active_tx_urbs, 0);
601
602 for (i = 0; i < MAX_TX_URBS; i++)
603 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
604}
605
606static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
607 const struct kvaser_msg *msg)
608{
609 struct can_frame *cf;
610 struct sk_buff *skb;
611 struct net_device_stats *stats;
612 struct kvaser_usb_net_priv *priv;
613 unsigned int new_state;
614 u8 channel, status, txerr, rxerr, error_factor;
615
616 switch (msg->id) {
617 case CMD_CAN_ERROR_EVENT:
618 channel = msg->u.error_event.channel;
619 status = msg->u.error_event.status;
620 txerr = msg->u.error_event.tx_errors_count;
621 rxerr = msg->u.error_event.rx_errors_count;
622 error_factor = msg->u.error_event.error_factor;
623 break;
624 case CMD_LOG_MESSAGE:
625 channel = msg->u.log_message.channel;
626 status = msg->u.log_message.data[0];
627 txerr = msg->u.log_message.data[2];
628 rxerr = msg->u.log_message.data[3];
629 error_factor = msg->u.log_message.data[1];
630 break;
631 case CMD_CHIP_STATE_EVENT:
632 channel = msg->u.chip_state_event.channel;
633 status = msg->u.chip_state_event.status;
634 txerr = msg->u.chip_state_event.tx_errors_count;
635 rxerr = msg->u.chip_state_event.rx_errors_count;
636 error_factor = 0;
637 break;
638 default:
639 dev_err(dev->udev->dev.parent, "Invalid msg id (%d)\n",
640 msg->id);
641 return;
642 }
643
644 if (channel >= dev->nchannels) {
645 dev_err(dev->udev->dev.parent,
646 "Invalid channel number (%d)\n", channel);
647 return;
648 }
649
650 priv = dev->nets[channel];
651 stats = &priv->netdev->stats;
652
653 if (status & M16C_STATE_BUS_RESET) {
654 kvaser_usb_unlink_tx_urbs(priv);
655 return;
656 }
657
658 skb = alloc_can_err_skb(priv->netdev, &cf);
659 if (!skb) {
660 stats->rx_dropped++;
661 return;
662 }
663
664 new_state = priv->can.state;
665
666 netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
667
668 if (status & M16C_STATE_BUS_OFF) {
669 cf->can_id |= CAN_ERR_BUSOFF;
670
671 priv->can.can_stats.bus_off++;
672 if (!priv->can.restart_ms)
673 kvaser_usb_simple_msg_async(priv, CMD_STOP_CHIP);
674
675 netif_carrier_off(priv->netdev);
676
677 new_state = CAN_STATE_BUS_OFF;
678 } else if (status & M16C_STATE_BUS_PASSIVE) {
679 if (priv->can.state != CAN_STATE_ERROR_PASSIVE) {
680 cf->can_id |= CAN_ERR_CRTL;
681
682 if (txerr || rxerr)
683 cf->data[1] = (txerr > rxerr)
684 ? CAN_ERR_CRTL_TX_PASSIVE
685 : CAN_ERR_CRTL_RX_PASSIVE;
686 else
687 cf->data[1] = CAN_ERR_CRTL_TX_PASSIVE |
688 CAN_ERR_CRTL_RX_PASSIVE;
689
690 priv->can.can_stats.error_passive++;
691 }
692
693 new_state = CAN_STATE_ERROR_PASSIVE;
694 }
695
696 if (status == M16C_STATE_BUS_ERROR) {
697 if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
698 ((txerr >= 96) || (rxerr >= 96))) {
699 cf->can_id |= CAN_ERR_CRTL;
700 cf->data[1] = (txerr > rxerr)
701 ? CAN_ERR_CRTL_TX_WARNING
702 : CAN_ERR_CRTL_RX_WARNING;
703
704 priv->can.can_stats.error_warning++;
705 new_state = CAN_STATE_ERROR_WARNING;
706 } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
707 cf->can_id |= CAN_ERR_PROT;
708 cf->data[2] = CAN_ERR_PROT_ACTIVE;
709
710 new_state = CAN_STATE_ERROR_ACTIVE;
711 }
712 }
713
714 if (!status) {
715 cf->can_id |= CAN_ERR_PROT;
716 cf->data[2] = CAN_ERR_PROT_ACTIVE;
717
718 new_state = CAN_STATE_ERROR_ACTIVE;
719 }
720
721 if (priv->can.restart_ms &&
722 (priv->can.state >= CAN_STATE_BUS_OFF) &&
723 (new_state < CAN_STATE_BUS_OFF)) {
724 cf->can_id |= CAN_ERR_RESTARTED;
725 netif_carrier_on(priv->netdev);
726
727 priv->can.can_stats.restarts++;
728 }
729
730 if (error_factor) {
731 priv->can.can_stats.bus_error++;
732 stats->rx_errors++;
733
734 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
735
736 if (error_factor & M16C_EF_ACKE)
737 cf->data[3] |= (CAN_ERR_PROT_LOC_ACK);
738 if (error_factor & M16C_EF_CRCE)
739 cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
740 CAN_ERR_PROT_LOC_CRC_DEL);
741 if (error_factor & M16C_EF_FORME)
742 cf->data[2] |= CAN_ERR_PROT_FORM;
743 if (error_factor & M16C_EF_STFE)
744 cf->data[2] |= CAN_ERR_PROT_STUFF;
745 if (error_factor & M16C_EF_BITE0)
746 cf->data[2] |= CAN_ERR_PROT_BIT0;
747 if (error_factor & M16C_EF_BITE1)
748 cf->data[2] |= CAN_ERR_PROT_BIT1;
749 if (error_factor & M16C_EF_TRE)
750 cf->data[2] |= CAN_ERR_PROT_TX;
751 }
752
753 cf->data[6] = txerr;
754 cf->data[7] = rxerr;
755
756 priv->bec.txerr = txerr;
757 priv->bec.rxerr = rxerr;
758
759 priv->can.state = new_state;
760
761 netif_rx(skb);
762
763 stats->rx_packets++;
764 stats->rx_bytes += cf->can_dlc;
765}
766
767static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
768 const struct kvaser_msg *msg)
769{
770 struct can_frame *cf;
771 struct sk_buff *skb;
772 struct net_device_stats *stats = &priv->netdev->stats;
773
774 if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME |
775 MSG_FLAG_NERR)) {
776 netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
777 msg->u.rx_can.flag);
778
779 stats->rx_errors++;
780 return;
781 }
782
783 if (msg->u.rx_can.flag & MSG_FLAG_OVERRUN) {
784 skb = alloc_can_err_skb(priv->netdev, &cf);
785 if (!skb) {
786 stats->rx_dropped++;
787 return;
788 }
789
790 cf->can_id |= CAN_ERR_CRTL;
791 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
792
793 stats->rx_over_errors++;
794 stats->rx_errors++;
795
796 netif_rx(skb);
797
798 stats->rx_packets++;
799 stats->rx_bytes += cf->can_dlc;
800 }
801}
802
803static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
804 const struct kvaser_msg *msg)
805{
806 struct kvaser_usb_net_priv *priv;
807 struct can_frame *cf;
808 struct sk_buff *skb;
809 struct net_device_stats *stats;
810 u8 channel = msg->u.rx_can.channel;
811
812 if (channel >= dev->nchannels) {
813 dev_err(dev->udev->dev.parent,
814 "Invalid channel number (%d)\n", channel);
815 return;
816 }
817
818 priv = dev->nets[channel];
819 stats = &priv->netdev->stats;
820
821 if (msg->u.rx_can.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR |
822 MSG_FLAG_OVERRUN)) {
823 kvaser_usb_rx_can_err(priv, msg);
824 return;
825 } else if (msg->u.rx_can.flag & ~MSG_FLAG_REMOTE_FRAME) {
826 netdev_warn(priv->netdev,
827 "Unhandled frame (flags: 0x%02x)",
828 msg->u.rx_can.flag);
829 return;
830 }
831
832 skb = alloc_can_skb(priv->netdev, &cf);
833 if (!skb) {
834 stats->tx_dropped++;
835 return;
836 }
837
838 cf->can_id = ((msg->u.rx_can.msg[0] & 0x1f) << 6) |
839 (msg->u.rx_can.msg[1] & 0x3f);
840 cf->can_dlc = get_can_dlc(msg->u.rx_can.msg[5]);
841
842 if (msg->id == CMD_RX_EXT_MESSAGE) {
843 cf->can_id <<= 18;
844 cf->can_id |= ((msg->u.rx_can.msg[2] & 0x0f) << 14) |
845 ((msg->u.rx_can.msg[3] & 0xff) << 6) |
846 (msg->u.rx_can.msg[4] & 0x3f);
847 cf->can_id |= CAN_EFF_FLAG;
848 }
849
850 if (msg->u.rx_can.flag & MSG_FLAG_REMOTE_FRAME)
851 cf->can_id |= CAN_RTR_FLAG;
852 else
853 memcpy(cf->data, &msg->u.rx_can.msg[6], cf->can_dlc);
854
855 netif_rx(skb);
856
857 stats->rx_packets++;
858 stats->rx_bytes += cf->can_dlc;
859}
860
861static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
862 const struct kvaser_msg *msg)
863{
864 struct kvaser_usb_net_priv *priv;
865 u8 channel = msg->u.simple.channel;
866
867 if (channel >= dev->nchannels) {
868 dev_err(dev->udev->dev.parent,
869 "Invalid channel number (%d)\n", channel);
870 return;
871 }
872
873 priv = dev->nets[channel];
874
875 if (completion_done(&priv->start_comp) &&
876 netif_queue_stopped(priv->netdev)) {
877 netif_wake_queue(priv->netdev);
878 } else {
879 netif_start_queue(priv->netdev);
880 complete(&priv->start_comp);
881 }
882}
883
884static void kvaser_usb_stop_chip_reply(const struct kvaser_usb *dev,
885 const struct kvaser_msg *msg)
886{
887 struct kvaser_usb_net_priv *priv;
888 u8 channel = msg->u.simple.channel;
889
890 if (channel >= dev->nchannels) {
891 dev_err(dev->udev->dev.parent,
892 "Invalid channel number (%d)\n", channel);
893 return;
894 }
895
896 priv = dev->nets[channel];
897
898 complete(&priv->stop_comp);
899}
900
901static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
902 const struct kvaser_msg *msg)
903{
904 switch (msg->id) {
905 case CMD_START_CHIP_REPLY:
906 kvaser_usb_start_chip_reply(dev, msg);
907 break;
908
909 case CMD_STOP_CHIP_REPLY:
910 kvaser_usb_stop_chip_reply(dev, msg);
911 break;
912
913 case CMD_RX_STD_MESSAGE:
914 case CMD_RX_EXT_MESSAGE:
915 kvaser_usb_rx_can_msg(dev, msg);
916 break;
917
918 case CMD_CHIP_STATE_EVENT:
919 case CMD_CAN_ERROR_EVENT:
920 kvaser_usb_rx_error(dev, msg);
921 break;
922
923 case CMD_LOG_MESSAGE:
924 if (msg->u.log_message.flags & MSG_FLAG_ERROR_FRAME)
925 kvaser_usb_rx_error(dev, msg);
926 break;
927
928 case CMD_TX_ACKNOWLEDGE:
929 kvaser_usb_tx_acknowledge(dev, msg);
930 break;
931
932 default:
933 dev_warn(dev->udev->dev.parent,
934 "Unhandled message (%d)\n", msg->id);
935 break;
936 }
937}
938
939static void kvaser_usb_read_bulk_callback(struct urb *urb)
940{
941 struct kvaser_usb *dev = urb->context;
942 struct kvaser_msg *msg;
943 int pos = 0;
944 int err, i;
945
946 switch (urb->status) {
947 case 0:
948 break;
949 case -ENOENT:
950 case -ESHUTDOWN:
951 return;
952 default:
953 dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n",
954 urb->status);
955 goto resubmit_urb;
956 }
957
958 while (pos <= urb->actual_length - MSG_HEADER_LEN) {
959 msg = urb->transfer_buffer + pos;
960
961 if (!msg->len)
962 break;
963
964 if (pos + msg->len > urb->actual_length) {
965 dev_err(dev->udev->dev.parent, "Format error\n");
966 break;
967 }
968
969 kvaser_usb_handle_message(dev, msg);
970
971 pos += msg->len;
972 }
973
974resubmit_urb:
975 usb_fill_bulk_urb(urb, dev->udev,
976 usb_rcvbulkpipe(dev->udev,
977 dev->bulk_in->bEndpointAddress),
978 urb->transfer_buffer, RX_BUFFER_SIZE,
979 kvaser_usb_read_bulk_callback, dev);
980
981 err = usb_submit_urb(urb, GFP_ATOMIC);
982 if (err == -ENODEV) {
983 for (i = 0; i < dev->nchannels; i++) {
984 if (!dev->nets[i])
985 continue;
986
987 netif_device_detach(dev->nets[i]->netdev);
988 }
989 } else if (err) {
990 dev_err(dev->udev->dev.parent,
991 "Failed resubmitting read bulk urb: %d\n", err);
992 }
993
994 return;
995}
996
997static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev)
998{
999 int i, err = 0;
1000
1001 if (dev->rxinitdone)
1002 return 0;
1003
1004 for (i = 0; i < MAX_RX_URBS; i++) {
1005 struct urb *urb = NULL;
1006 u8 *buf = NULL;
1007 dma_addr_t buf_dma;
1008
1009 urb = usb_alloc_urb(0, GFP_KERNEL);
1010 if (!urb) {
1011 dev_warn(dev->udev->dev.parent,
1012 "No memory left for URBs\n");
1013 err = -ENOMEM;
1014 break;
1015 }
1016
1017 buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE,
1018 GFP_KERNEL, &buf_dma);
1019 if (!buf) {
1020 dev_warn(dev->udev->dev.parent,
1021 "No memory left for USB buffer\n");
1022 usb_free_urb(urb);
1023 err = -ENOMEM;
1024 break;
1025 }
1026
1027 usb_fill_bulk_urb(urb, dev->udev,
1028 usb_rcvbulkpipe(dev->udev,
1029 dev->bulk_in->bEndpointAddress),
1030 buf, RX_BUFFER_SIZE,
1031 kvaser_usb_read_bulk_callback,
1032 dev);
1033 urb->transfer_dma = buf_dma;
1034 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1035 usb_anchor_urb(urb, &dev->rx_submitted);
1036
1037 err = usb_submit_urb(urb, GFP_KERNEL);
1038 if (err) {
1039 usb_unanchor_urb(urb);
1040 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
1041 buf_dma);
1042 usb_free_urb(urb);
1043 break;
1044 }
1045
1046 dev->rxbuf[i] = buf;
1047 dev->rxbuf_dma[i] = buf_dma;
1048
1049 usb_free_urb(urb);
1050 }
1051
1052 if (i == 0) {
1053 dev_warn(dev->udev->dev.parent,
1054 "Cannot setup read URBs, error %d\n", err);
1055 return err;
1056 } else if (i < MAX_RX_URBS) {
1057 dev_warn(dev->udev->dev.parent,
1058 "RX performances may be slow\n");
1059 }
1060
1061 dev->rxinitdone = true;
1062
1063 return 0;
1064}
1065
1066static int kvaser_usb_set_opt_mode(const struct kvaser_usb_net_priv *priv)
1067{
1068 struct kvaser_msg *msg;
1069 int rc;
1070
1071 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1072 if (!msg)
1073 return -ENOMEM;
1074
1075 msg->id = CMD_SET_CTRL_MODE;
1076 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_ctrl_mode);
1077 msg->u.ctrl_mode.tid = 0xff;
1078 msg->u.ctrl_mode.channel = priv->channel;
1079
1080 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
1081 msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT;
1082 else
1083 msg->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL;
1084
1085 rc = kvaser_usb_send_msg(priv->dev, msg);
1086
1087 kfree(msg);
1088 return rc;
1089}
1090
1091static int kvaser_usb_start_chip(struct kvaser_usb_net_priv *priv)
1092{
1093 int err;
1094
1095 init_completion(&priv->start_comp);
1096
1097 err = kvaser_usb_send_simple_msg(priv->dev, CMD_START_CHIP,
1098 priv->channel);
1099 if (err)
1100 return err;
1101
1102 if (!wait_for_completion_timeout(&priv->start_comp,
1103 msecs_to_jiffies(START_TIMEOUT)))
1104 return -ETIMEDOUT;
1105
1106 return 0;
1107}
1108
1109static int kvaser_usb_open(struct net_device *netdev)
1110{
1111 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1112 struct kvaser_usb *dev = priv->dev;
1113 int err;
1114
1115 err = open_candev(netdev);
1116 if (err)
1117 return err;
1118
1119 err = kvaser_usb_setup_rx_urbs(dev);
1120 if (err)
1121 goto error;
1122
1123 err = kvaser_usb_set_opt_mode(priv);
1124 if (err)
1125 goto error;
1126
1127 err = kvaser_usb_start_chip(priv);
1128 if (err) {
1129 netdev_warn(netdev, "Cannot start device, error %d\n", err);
1130 goto error;
1131 }
1132
1133 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1134
1135 return 0;
1136
1137error:
1138 close_candev(netdev);
1139 return err;
1140}
1141
1142static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev)
1143{
1144 int i;
1145
1146 usb_kill_anchored_urbs(&dev->rx_submitted);
1147
1148 for (i = 0; i < MAX_RX_URBS; i++)
1149 usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
1150 dev->rxbuf[i],
1151 dev->rxbuf_dma[i]);
1152
1153 for (i = 0; i < MAX_NET_DEVICES; i++) {
1154 struct kvaser_usb_net_priv *priv = dev->nets[i];
1155
1156 if (priv)
1157 kvaser_usb_unlink_tx_urbs(priv);
1158 }
1159}
1160
1161static int kvaser_usb_stop_chip(struct kvaser_usb_net_priv *priv)
1162{
1163 int err;
1164
1165 init_completion(&priv->stop_comp);
1166
1167 err = kvaser_usb_send_simple_msg(priv->dev, CMD_STOP_CHIP,
1168 priv->channel);
1169 if (err)
1170 return err;
1171
1172 if (!wait_for_completion_timeout(&priv->stop_comp,
1173 msecs_to_jiffies(STOP_TIMEOUT)))
1174 return -ETIMEDOUT;
1175
1176 return 0;
1177}
1178
1179static int kvaser_usb_flush_queue(struct kvaser_usb_net_priv *priv)
1180{
1181 struct kvaser_msg *msg;
1182 int rc;
1183
1184 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1185 if (!msg)
1186 return -ENOMEM;
1187
1188 msg->id = CMD_FLUSH_QUEUE;
1189 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_flush_queue);
1190 msg->u.flush_queue.channel = priv->channel;
1191 msg->u.flush_queue.flags = 0x00;
1192
1193 rc = kvaser_usb_send_msg(priv->dev, msg);
1194
1195 kfree(msg);
1196 return rc;
1197}
1198
1199static int kvaser_usb_close(struct net_device *netdev)
1200{
1201 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1202 struct kvaser_usb *dev = priv->dev;
1203 int err;
1204
1205 netif_stop_queue(netdev);
1206
1207 err = kvaser_usb_flush_queue(priv);
1208 if (err)
1209 netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
1210
1211 if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
1212 netdev_warn(netdev, "Cannot reset card, error %d\n", err);
1213
1214 err = kvaser_usb_stop_chip(priv);
1215 if (err)
1216 netdev_warn(netdev, "Cannot stop device, error %d\n", err);
1217
1218 priv->can.state = CAN_STATE_STOPPED;
1219 close_candev(priv->netdev);
1220
1221 return 0;
1222}
1223
1224static void kvaser_usb_write_bulk_callback(struct urb *urb)
1225{
1226 struct kvaser_usb_tx_urb_context *context = urb->context;
1227 struct kvaser_usb_net_priv *priv;
1228 struct net_device *netdev;
1229
1230 if (WARN_ON(!context))
1231 return;
1232
1233 priv = context->priv;
1234 netdev = priv->netdev;
1235
1236 kfree(urb->transfer_buffer);
1237
1238 if (!netif_device_present(netdev))
1239 return;
1240
1241 if (urb->status)
1242 netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
1243}
1244
1245static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1246 struct net_device *netdev)
1247{
1248 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1249 struct kvaser_usb *dev = priv->dev;
1250 struct net_device_stats *stats = &netdev->stats;
1251 struct can_frame *cf = (struct can_frame *)skb->data;
1252 struct kvaser_usb_tx_urb_context *context = NULL;
1253 struct urb *urb;
1254 void *buf;
1255 struct kvaser_msg *msg;
1256 int i, err;
1257 int ret = NETDEV_TX_OK;
1258
1259 if (can_dropped_invalid_skb(netdev, skb))
1260 return NETDEV_TX_OK;
1261
1262 urb = usb_alloc_urb(0, GFP_ATOMIC);
1263 if (!urb) {
1264 netdev_err(netdev, "No memory left for URBs\n");
1265 stats->tx_dropped++;
1266 goto nourbmem;
1267 }
1268
1269 buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
1270 if (!buf) {
1271 netdev_err(netdev, "No memory left for USB buffer\n");
1272 stats->tx_dropped++;
1273 goto nobufmem;
1274 }
1275
1276 msg = buf;
1277 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_tx_can);
1278 msg->u.tx_can.flags = 0;
1279 msg->u.tx_can.channel = priv->channel;
1280
1281 if (cf->can_id & CAN_EFF_FLAG) {
1282 msg->id = CMD_TX_EXT_MESSAGE;
1283 msg->u.tx_can.msg[0] = (cf->can_id >> 24) & 0x1f;
1284 msg->u.tx_can.msg[1] = (cf->can_id >> 18) & 0x3f;
1285 msg->u.tx_can.msg[2] = (cf->can_id >> 14) & 0x0f;
1286 msg->u.tx_can.msg[3] = (cf->can_id >> 6) & 0xff;
1287 msg->u.tx_can.msg[4] = cf->can_id & 0x3f;
1288 } else {
1289 msg->id = CMD_TX_STD_MESSAGE;
1290 msg->u.tx_can.msg[0] = (cf->can_id >> 6) & 0x1f;
1291 msg->u.tx_can.msg[1] = cf->can_id & 0x3f;
1292 }
1293
1294 msg->u.tx_can.msg[5] = cf->can_dlc;
1295 memcpy(&msg->u.tx_can.msg[6], cf->data, cf->can_dlc);
1296
1297 if (cf->can_id & CAN_RTR_FLAG)
1298 msg->u.tx_can.flags |= MSG_FLAG_REMOTE_FRAME;
1299
1300 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
1301 if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
1302 context = &priv->tx_contexts[i];
1303 break;
1304 }
1305 }
1306
1307 if (!context) {
1308 netdev_warn(netdev, "cannot find free context\n");
1309 ret = NETDEV_TX_BUSY;
1310 goto releasebuf;
1311 }
1312
1313 context->priv = priv;
1314 context->echo_index = i;
1315 context->dlc = cf->can_dlc;
1316
1317 msg->u.tx_can.tid = context->echo_index;
1318
1319 usb_fill_bulk_urb(urb, dev->udev,
1320 usb_sndbulkpipe(dev->udev,
1321 dev->bulk_out->bEndpointAddress),
1322 buf, msg->len,
1323 kvaser_usb_write_bulk_callback, context);
1324 usb_anchor_urb(urb, &priv->tx_submitted);
1325
1326 can_put_echo_skb(skb, netdev, context->echo_index);
1327
1328 atomic_inc(&priv->active_tx_urbs);
1329
1330 if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
1331 netif_stop_queue(netdev);
1332
1333 err = usb_submit_urb(urb, GFP_ATOMIC);
1334 if (unlikely(err)) {
1335 can_free_echo_skb(netdev, context->echo_index);
1336
1337 skb = NULL; /* set to NULL to avoid double free in
1338 * dev_kfree_skb(skb) */
1339
1340 atomic_dec(&priv->active_tx_urbs);
1341 usb_unanchor_urb(urb);
1342
1343 stats->tx_dropped++;
1344
1345 if (err == -ENODEV)
1346 netif_device_detach(netdev);
1347 else
1348 netdev_warn(netdev, "Failed tx_urb %d\n", err);
1349
1350 goto releasebuf;
1351 }
1352
1353 usb_free_urb(urb);
1354
1355 return NETDEV_TX_OK;
1356
1357releasebuf:
1358 kfree(buf);
1359nobufmem:
1360 usb_free_urb(urb);
1361nourbmem:
1362 dev_kfree_skb(skb);
1363 return ret;
1364}
1365
1366static const struct net_device_ops kvaser_usb_netdev_ops = {
1367 .ndo_open = kvaser_usb_open,
1368 .ndo_stop = kvaser_usb_close,
1369 .ndo_start_xmit = kvaser_usb_start_xmit,
1370};
1371
1372static const struct can_bittiming_const kvaser_usb_bittiming_const = {
1373 .name = "kvaser_usb",
1374 .tseg1_min = KVASER_USB_TSEG1_MIN,
1375 .tseg1_max = KVASER_USB_TSEG1_MAX,
1376 .tseg2_min = KVASER_USB_TSEG2_MIN,
1377 .tseg2_max = KVASER_USB_TSEG2_MAX,
1378 .sjw_max = KVASER_USB_SJW_MAX,
1379 .brp_min = KVASER_USB_BRP_MIN,
1380 .brp_max = KVASER_USB_BRP_MAX,
1381 .brp_inc = KVASER_USB_BRP_INC,
1382};
1383
1384static int kvaser_usb_set_bittiming(struct net_device *netdev)
1385{
1386 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1387 struct can_bittiming *bt = &priv->can.bittiming;
1388 struct kvaser_usb *dev = priv->dev;
1389 struct kvaser_msg *msg;
1390 int rc;
1391
1392 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1393 if (!msg)
1394 return -ENOMEM;
1395
1396 msg->id = CMD_SET_BUS_PARAMS;
1397 msg->len = MSG_HEADER_LEN + sizeof(struct kvaser_msg_busparams);
1398 msg->u.busparams.channel = priv->channel;
1399 msg->u.busparams.tid = 0xff;
1400 msg->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
1401 msg->u.busparams.sjw = bt->sjw;
1402 msg->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
1403 msg->u.busparams.tseg2 = bt->phase_seg2;
1404
1405 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
1406 msg->u.busparams.no_samp = 3;
1407 else
1408 msg->u.busparams.no_samp = 1;
1409
1410 rc = kvaser_usb_send_msg(dev, msg);
1411
1412 kfree(msg);
1413 return rc;
1414}
1415
1416static int kvaser_usb_set_mode(struct net_device *netdev,
1417 enum can_mode mode)
1418{
1419 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1420 int err;
1421
1422 switch (mode) {
1423 case CAN_MODE_START:
1424 err = kvaser_usb_simple_msg_async(priv, CMD_START_CHIP);
1425 if (err)
1426 return err;
1427 break;
1428 default:
1429 return -EOPNOTSUPP;
1430 }
1431
1432 return 0;
1433}
1434
1435static int kvaser_usb_get_berr_counter(const struct net_device *netdev,
1436 struct can_berr_counter *bec)
1437{
1438 struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
1439
1440 *bec = priv->bec;
1441
1442 return 0;
1443}
1444
1445static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
1446{
1447 int i;
1448
1449 for (i = 0; i < dev->nchannels; i++) {
1450 if (!dev->nets[i])
1451 continue;
1452
1453 unregister_netdev(dev->nets[i]->netdev);
1454 }
1455
1456 kvaser_usb_unlink_all_urbs(dev);
1457
1458 for (i = 0; i < dev->nchannels; i++) {
1459 if (!dev->nets[i])
1460 continue;
1461
1462 free_candev(dev->nets[i]->netdev);
1463 }
1464}
1465
1466static int kvaser_usb_init_one(struct usb_interface *intf,
1467 const struct usb_device_id *id, int channel)
1468{
1469 struct kvaser_usb *dev = usb_get_intfdata(intf);
1470 struct net_device *netdev;
1471 struct kvaser_usb_net_priv *priv;
1472 int i, err;
1473
1474 netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
1475 if (!netdev) {
1476 dev_err(&intf->dev, "Cannot alloc candev\n");
1477 return -ENOMEM;
1478 }
1479
1480 priv = netdev_priv(netdev);
1481
1482 init_completion(&priv->start_comp);
1483 init_completion(&priv->stop_comp);
1484
1485 init_usb_anchor(&priv->tx_submitted);
1486 atomic_set(&priv->active_tx_urbs, 0);
1487
1488 for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++)
1489 priv->tx_contexts[i].echo_index = MAX_TX_URBS;
1490
1491 priv->dev = dev;
1492 priv->netdev = netdev;
1493 priv->channel = channel;
1494
1495 priv->can.state = CAN_STATE_STOPPED;
1496 priv->can.clock.freq = CAN_USB_CLOCK;
1497 priv->can.bittiming_const = &kvaser_usb_bittiming_const;
1498 priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
1499 priv->can.do_set_mode = kvaser_usb_set_mode;
1500 if (id->driver_info & KVASER_HAS_TXRX_ERRORS)
1501 priv->can.do_get_berr_counter = kvaser_usb_get_berr_counter;
1502 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
1503 if (id->driver_info & KVASER_HAS_SILENT_MODE)
1504 priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
1505
1506 netdev->flags |= IFF_ECHO;
1507
1508 netdev->netdev_ops = &kvaser_usb_netdev_ops;
1509
1510 SET_NETDEV_DEV(netdev, &intf->dev);
1511
1512 dev->nets[channel] = priv;
1513
1514 err = register_candev(netdev);
1515 if (err) {
1516 dev_err(&intf->dev, "Failed to register can device\n");
1517 free_candev(netdev);
1518 dev->nets[channel] = NULL;
1519 return err;
1520 }
1521
1522 netdev_dbg(netdev, "device registered\n");
1523
1524 return 0;
1525}
1526
1527static void kvaser_usb_get_endpoints(const struct usb_interface *intf,
1528 struct usb_endpoint_descriptor **in,
1529 struct usb_endpoint_descriptor **out)
1530{
1531 const struct usb_host_interface *iface_desc;
1532 struct usb_endpoint_descriptor *endpoint;
1533 int i;
1534
1535 iface_desc = &intf->altsetting[0];
1536
1537 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1538 endpoint = &iface_desc->endpoint[i].desc;
1539
1540 if (usb_endpoint_is_bulk_in(endpoint))
1541 *in = endpoint;
1542
1543 if (usb_endpoint_is_bulk_out(endpoint))
1544 *out = endpoint;
1545 }
1546}
1547
1548static int kvaser_usb_probe(struct usb_interface *intf,
1549 const struct usb_device_id *id)
1550{
1551 struct kvaser_usb *dev;
1552 int err = -ENOMEM;
1553 int i;
1554
1555 dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
1556 if (!dev)
1557 return -ENOMEM;
1558
1559 kvaser_usb_get_endpoints(intf, &dev->bulk_in, &dev->bulk_out);
1560 if (!dev->bulk_in || !dev->bulk_out) {
1561 dev_err(&intf->dev, "Cannot get usb endpoint(s)");
1562 return err;
1563 }
1564
1565 dev->udev = interface_to_usbdev(intf);
1566
1567 init_usb_anchor(&dev->rx_submitted);
1568
1569 usb_set_intfdata(intf, dev);
1570
1571 for (i = 0; i < MAX_NET_DEVICES; i++)
1572 kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
1573
1574 err = kvaser_usb_get_software_info(dev);
1575 if (err) {
1576 dev_err(&intf->dev,
1577 "Cannot get software infos, error %d\n", err);
1578 return err;
1579 }
1580
1581 err = kvaser_usb_get_card_info(dev);
1582 if (err) {
1583 dev_err(&intf->dev,
1584 "Cannot get card infos, error %d\n", err);
1585 return err;
1586 }
1587
1588 dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
1589 ((dev->fw_version >> 24) & 0xff),
1590 ((dev->fw_version >> 16) & 0xff),
1591 (dev->fw_version & 0xffff));
1592
1593 for (i = 0; i < dev->nchannels; i++) {
1594 err = kvaser_usb_init_one(intf, id, i);
1595 if (err) {
1596 kvaser_usb_remove_interfaces(dev);
1597 return err;
1598 }
1599 }
1600
1601 return 0;
1602}
1603
1604static void kvaser_usb_disconnect(struct usb_interface *intf)
1605{
1606 struct kvaser_usb *dev = usb_get_intfdata(intf);
1607
1608 usb_set_intfdata(intf, NULL);
1609
1610 if (!dev)
1611 return;
1612
1613 kvaser_usb_remove_interfaces(dev);
1614}
1615
1616static struct usb_driver kvaser_usb_driver = {
1617 .name = "kvaser_usb",
1618 .probe = kvaser_usb_probe,
1619 .disconnect = kvaser_usb_disconnect,
1620 .id_table = kvaser_usb_table,
1621};
1622
1623module_usb_driver(kvaser_usb_driver);
1624
1625MODULE_AUTHOR("Olivier Sobrie <olivier@sobrie.be>");
1626MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices");
1627MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index c4643c400d46..d9290ea788e0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -520,7 +520,6 @@ static int peak_usb_ndo_open(struct net_device *netdev)
520 return err; 520 return err;
521 } 521 }
522 522
523 dev->open_time = jiffies;
524 netif_start_queue(netdev); 523 netif_start_queue(netdev);
525 524
526 return 0; 525 return 0;
@@ -576,7 +575,6 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
576 575
577 close_candev(netdev); 576 close_candev(netdev);
578 577
579 dev->open_time = 0;
580 dev->can.state = CAN_STATE_STOPPED; 578 dev->can.state = CAN_STATE_STOPPED;
581 579
582 /* can set bus off now */ 580 /* can set bus off now */
@@ -661,9 +659,6 @@ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode)
661 struct peak_usb_device *dev = netdev_priv(netdev); 659 struct peak_usb_device *dev = netdev_priv(netdev);
662 int err = 0; 660 int err = 0;
663 661
664 if (!dev->open_time)
665 return -EINVAL;
666
667 switch (mode) { 662 switch (mode) {
668 case CAN_MODE_START: 663 case CAN_MODE_START:
669 err = peak_usb_restart(dev); 664 err = peak_usb_restart(dev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index c8e5e91d7cb5..073b47ff8eee 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -104,7 +104,6 @@ struct peak_usb_device {
104 struct can_priv can; 104 struct can_priv can;
105 struct peak_usb_adapter *adapter; 105 struct peak_usb_adapter *adapter;
106 unsigned int ctrl_idx; 106 unsigned int ctrl_idx;
107 int open_time;
108 u32 state; 107 u32 state;
109 108
110 struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS]; 109 struct sk_buff *echo_skb[PCAN_USB_MAX_TX_URBS];
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index dd151d53d506..b8fe808b7957 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,5 +1,5 @@
1menu "Distributed Switch Architecture drivers" 1menu "Distributed Switch Architecture drivers"
2 depends on NET_DSA 2 depends on HAVE_NET_DSA
3 3
4config NET_DSA_MV88E6XXX 4config NET_DSA_MV88E6XXX
5 tristate 5 tristate
@@ -7,6 +7,7 @@ config NET_DSA_MV88E6XXX
7 7
8config NET_DSA_MV88E6060 8config NET_DSA_MV88E6060
9 tristate "Marvell 88E6060 ethernet switch chip support" 9 tristate "Marvell 88E6060 ethernet switch chip support"
10 select NET_DSA
10 select NET_DSA_TAG_TRAILER 11 select NET_DSA_TAG_TRAILER
11 ---help--- 12 ---help---
12 This enables support for the Marvell 88E6060 ethernet switch 13 This enables support for the Marvell 88E6060 ethernet switch
@@ -18,6 +19,7 @@ config NET_DSA_MV88E6XXX_NEED_PPU
18 19
19config NET_DSA_MV88E6131 20config NET_DSA_MV88E6131
20 tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support" 21 tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
22 select NET_DSA
21 select NET_DSA_MV88E6XXX 23 select NET_DSA_MV88E6XXX
22 select NET_DSA_MV88E6XXX_NEED_PPU 24 select NET_DSA_MV88E6XXX_NEED_PPU
23 select NET_DSA_TAG_DSA 25 select NET_DSA_TAG_DSA
@@ -27,6 +29,7 @@ config NET_DSA_MV88E6131
27 29
28config NET_DSA_MV88E6123_61_65 30config NET_DSA_MV88E6123_61_65
29 tristate "Marvell 88E6123/6161/6165 ethernet switch chip support" 31 tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
32 select NET_DSA
30 select NET_DSA_MV88E6XXX 33 select NET_DSA_MV88E6XXX
31 select NET_DSA_TAG_EDSA 34 select NET_DSA_TAG_EDSA
32 ---help--- 35 ---help---
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 1a8eef2c3d58..633c709b9d99 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -92,7 +92,7 @@
92#include <asm/io.h> 92#include <asm/io.h>
93#include <asm/irq.h> 93#include <asm/irq.h>
94 94
95static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 95static char version[] = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
96 96
97#ifdef EL3_DEBUG 97#ifdef EL3_DEBUG
98static int el3_debug = EL3_DEBUG; 98static int el3_debug = EL3_DEBUG;
@@ -184,7 +184,7 @@ static int max_interrupt_work = 10;
184static int nopnp; 184static int nopnp;
185#endif 185#endif
186 186
187static int __devinit el3_common_init(struct net_device *dev); 187static int el3_common_init(struct net_device *dev);
188static void el3_common_remove(struct net_device *dev); 188static void el3_common_remove(struct net_device *dev);
189static ushort id_read_eeprom(int index); 189static ushort id_read_eeprom(int index);
190static ushort read_eeprom(int ioaddr, int index); 190static ushort read_eeprom(int ioaddr, int index);
@@ -270,9 +270,8 @@ static int el3_isa_id_sequence(__be16 *phys_addr)
270 270
271} 271}
272 272
273static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr, 273static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr,
274 int ioaddr, int irq, int if_port, 274 int irq, int if_port, enum el3_cardtype type)
275 enum el3_cardtype type)
276{ 275{
277 struct el3_private *lp = netdev_priv(dev); 276 struct el3_private *lp = netdev_priv(dev);
278 277
@@ -283,8 +282,7 @@ static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr,
283 lp->type = type; 282 lp->type = type;
284} 283}
285 284
286static int __devinit el3_isa_match(struct device *pdev, 285static int el3_isa_match(struct device *pdev, unsigned int ndev)
287 unsigned int ndev)
288{ 286{
289 struct net_device *dev; 287 struct net_device *dev;
290 int ioaddr, isa_irq, if_port, err; 288 int ioaddr, isa_irq, if_port, err;
@@ -341,7 +339,7 @@ static int __devinit el3_isa_match(struct device *pdev,
341 return 1; 339 return 1;
342} 340}
343 341
344static int __devexit el3_isa_remove(struct device *pdev, 342static int el3_isa_remove(struct device *pdev,
345 unsigned int ndev) 343 unsigned int ndev)
346{ 344{
347 el3_device_remove(pdev); 345 el3_device_remove(pdev);
@@ -382,7 +380,7 @@ static int el3_isa_resume(struct device *dev, unsigned int n)
382 380
383static struct isa_driver el3_isa_driver = { 381static struct isa_driver el3_isa_driver = {
384 .match = el3_isa_match, 382 .match = el3_isa_match,
385 .remove = __devexit_p(el3_isa_remove), 383 .remove = el3_isa_remove,
386#ifdef CONFIG_PM 384#ifdef CONFIG_PM
387 .suspend = el3_isa_suspend, 385 .suspend = el3_isa_suspend,
388 .resume = el3_isa_resume, 386 .resume = el3_isa_resume,
@@ -406,8 +404,7 @@ static struct pnp_device_id el3_pnp_ids[] = {
406}; 404};
407MODULE_DEVICE_TABLE(pnp, el3_pnp_ids); 405MODULE_DEVICE_TABLE(pnp, el3_pnp_ids);
408 406
409static int __devinit el3_pnp_probe(struct pnp_dev *pdev, 407static int el3_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id)
410 const struct pnp_device_id *id)
411{ 408{
412 short i; 409 short i;
413 int ioaddr, irq, if_port; 410 int ioaddr, irq, if_port;
@@ -445,7 +442,7 @@ static int __devinit el3_pnp_probe(struct pnp_dev *pdev,
445 return 0; 442 return 0;
446} 443}
447 444
448static void __devexit el3_pnp_remove(struct pnp_dev *pdev) 445static void el3_pnp_remove(struct pnp_dev *pdev)
449{ 446{
450 el3_common_remove(pnp_get_drvdata(pdev)); 447 el3_common_remove(pnp_get_drvdata(pdev));
451 pnp_set_drvdata(pdev, NULL); 448 pnp_set_drvdata(pdev, NULL);
@@ -467,7 +464,7 @@ static struct pnp_driver el3_pnp_driver = {
467 .name = "3c509", 464 .name = "3c509",
468 .id_table = el3_pnp_ids, 465 .id_table = el3_pnp_ids,
469 .probe = el3_pnp_probe, 466 .probe = el3_pnp_probe,
470 .remove = __devexit_p(el3_pnp_remove), 467 .remove = el3_pnp_remove,
471#ifdef CONFIG_PM 468#ifdef CONFIG_PM
472 .suspend = el3_pnp_suspend, 469 .suspend = el3_pnp_suspend,
473 .resume = el3_pnp_resume, 470 .resume = el3_pnp_resume,
@@ -496,7 +493,7 @@ static struct eisa_driver el3_eisa_driver = {
496 .driver = { 493 .driver = {
497 .name = "3c579", 494 .name = "3c579",
498 .probe = el3_eisa_probe, 495 .probe = el3_eisa_probe,
499 .remove = __devexit_p (el3_device_remove), 496 .remove = el3_device_remove,
500 .suspend = el3_suspend, 497 .suspend = el3_suspend,
501 .resume = el3_resume, 498 .resume = el3_resume,
502 } 499 }
@@ -519,7 +516,7 @@ static const struct net_device_ops netdev_ops = {
519#endif 516#endif
520}; 517};
521 518
522static int __devinit el3_common_init(struct net_device *dev) 519static int el3_common_init(struct net_device *dev)
523{ 520{
524 struct el3_private *lp = netdev_priv(dev); 521 struct el3_private *lp = netdev_priv(dev);
525 int err; 522 int err;
@@ -618,7 +615,7 @@ static int __init el3_eisa_probe (struct device *device)
618/* This remove works for all device types. 615/* This remove works for all device types.
619 * 616 *
620 * The net dev must be stored in the driver data field */ 617 * The net dev must be stored in the driver data field */
621static int __devexit el3_device_remove (struct device *device) 618static int el3_device_remove(struct device *device)
622{ 619{
623 struct net_device *dev; 620 struct net_device *dev;
624 621
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index e463d1036829..ed0feb3cc6fa 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -102,7 +102,7 @@ static int vortex_debug = 1;
102#include <linux/delay.h> 102#include <linux/delay.h>
103 103
104 104
105static const char version[] __devinitconst = 105static const char version[] =
106 DRV_NAME ": Donald Becker and others.\n"; 106 DRV_NAME ": Donald Becker and others.\n";
107 107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -277,7 +277,7 @@ static struct vortex_chip_info {
277 int flags; 277 int flags;
278 int drv_flags; 278 int drv_flags;
279 int io_size; 279 int io_size;
280} vortex_info_tbl[] __devinitdata = { 280} vortex_info_tbl[] = {
281 {"3c590 Vortex 10Mbps", 281 {"3c590 Vortex 10Mbps",
282 PCI_USES_MASTER, IS_VORTEX, 32, }, 282 PCI_USES_MASTER, IS_VORTEX, 32, },
283 {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ 283 {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
@@ -931,7 +931,7 @@ static int __init vortex_eisa_probe(struct device *device)
931 return 0; 931 return 0;
932} 932}
933 933
934static int __devexit vortex_eisa_remove(struct device *device) 934static int vortex_eisa_remove(struct device *device)
935{ 935{
936 struct eisa_device *edev; 936 struct eisa_device *edev;
937 struct net_device *dev; 937 struct net_device *dev;
@@ -962,7 +962,7 @@ static struct eisa_driver vortex_eisa_driver = {
962 .driver = { 962 .driver = {
963 .name = "3c59x", 963 .name = "3c59x",
964 .probe = vortex_eisa_probe, 964 .probe = vortex_eisa_probe,
965 .remove = __devexit_p(vortex_eisa_remove) 965 .remove = vortex_eisa_remove
966 } 966 }
967}; 967};
968 968
@@ -1000,8 +1000,8 @@ static int __init vortex_eisa_init(void)
1000} 1000}
1001 1001
1002/* returns count (>= 0), or negative on error */ 1002/* returns count (>= 0), or negative on error */
1003static int __devinit vortex_init_one(struct pci_dev *pdev, 1003static int vortex_init_one(struct pci_dev *pdev,
1004 const struct pci_device_id *ent) 1004 const struct pci_device_id *ent)
1005{ 1005{
1006 int rc, unit, pci_bar; 1006 int rc, unit, pci_bar;
1007 struct vortex_chip_info *vci; 1007 struct vortex_chip_info *vci;
@@ -1088,9 +1088,8 @@ static const struct net_device_ops vortex_netdev_ops = {
1088 * 1088 *
1089 * NOTE: pdev can be NULL, for the case of a Compaq device 1089 * NOTE: pdev can be NULL, for the case of a Compaq device
1090 */ 1090 */
1091static int __devinit vortex_probe1(struct device *gendev, 1091static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1092 void __iomem *ioaddr, int irq, 1092 int chip_idx, int card_idx)
1093 int chip_idx, int card_idx)
1094{ 1093{
1095 struct vortex_private *vp; 1094 struct vortex_private *vp;
1096 int option; 1095 int option;
@@ -3222,7 +3221,7 @@ static void acpi_set_WOL(struct net_device *dev)
3222} 3221}
3223 3222
3224 3223
3225static void __devexit vortex_remove_one(struct pci_dev *pdev) 3224static void vortex_remove_one(struct pci_dev *pdev)
3226{ 3225{
3227 struct net_device *dev = pci_get_drvdata(pdev); 3226 struct net_device *dev = pci_get_drvdata(pdev);
3228 struct vortex_private *vp; 3227 struct vortex_private *vp;
@@ -3265,7 +3264,7 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev)
3265static struct pci_driver vortex_driver = { 3264static struct pci_driver vortex_driver = {
3266 .name = "3c59x", 3265 .name = "3c59x",
3267 .probe = vortex_init_one, 3266 .probe = vortex_init_one,
3268 .remove = __devexit_p(vortex_remove_one), 3267 .remove = vortex_remove_one,
3269 .id_table = vortex_pci_tbl, 3268 .id_table = vortex_pci_tbl,
3270 .driver.pm = VORTEX_PM_OPS, 3269 .driver.pm = VORTEX_PM_OPS,
3271}; 3270};
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index bad4fa6815c5..eb56174469a7 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -80,7 +80,7 @@ config PCMCIA_3C589
80 80
81config VORTEX 81config VORTEX
82 tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" 82 tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
83 depends on (PCI || EISA) 83 depends on (PCI || EISA) && HAS_IOPORT
84 select NET_CORE 84 select NET_CORE
85 select MII 85 select MII
86 ---help--- 86 ---help---
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index bb9670f29b59..27aaaf99e73e 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -168,7 +168,7 @@ enum typhoon_cards {
168}; 168};
169 169
170/* directly indexed by enum typhoon_cards, above */ 170/* directly indexed by enum typhoon_cards, above */
171static struct typhoon_card_info typhoon_card_info[] __devinitdata = { 171static struct typhoon_card_info typhoon_card_info[] = {
172 { "3Com Typhoon (3C990-TX)", 172 { "3Com Typhoon (3C990-TX)",
173 TYPHOON_CRYPTO_NONE}, 173 TYPHOON_CRYPTO_NONE},
174 { "3Com Typhoon (3CR990-TX-95)", 174 { "3Com Typhoon (3CR990-TX-95)",
@@ -2200,7 +2200,7 @@ need_resume:
2200} 2200}
2201#endif 2201#endif
2202 2202
2203static int __devinit 2203static int
2204typhoon_test_mmio(struct pci_dev *pdev) 2204typhoon_test_mmio(struct pci_dev *pdev)
2205{ 2205{
2206 void __iomem *ioaddr = pci_iomap(pdev, 1, 128); 2206 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
@@ -2258,7 +2258,7 @@ static const struct net_device_ops typhoon_netdev_ops = {
2258 .ndo_change_mtu = eth_change_mtu, 2258 .ndo_change_mtu = eth_change_mtu,
2259}; 2259};
2260 2260
2261static int __devinit 2261static int
2262typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2262typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2263{ 2263{
2264 struct net_device *dev; 2264 struct net_device *dev;
@@ -2509,7 +2509,7 @@ error_out:
2509 return err; 2509 return err;
2510} 2510}
2511 2511
2512static void __devexit 2512static void
2513typhoon_remove_one(struct pci_dev *pdev) 2513typhoon_remove_one(struct pci_dev *pdev)
2514{ 2514{
2515 struct net_device *dev = pci_get_drvdata(pdev); 2515 struct net_device *dev = pci_get_drvdata(pdev);
@@ -2533,7 +2533,7 @@ static struct pci_driver typhoon_driver = {
2533 .name = KBUILD_MODNAME, 2533 .name = KBUILD_MODNAME,
2534 .id_table = typhoon_pci_tbl, 2534 .id_table = typhoon_pci_tbl,
2535 .probe = typhoon_init_one, 2535 .probe = typhoon_init_one,
2536 .remove = __devexit_p(typhoon_remove_one), 2536 .remove = typhoon_remove_one,
2537#ifdef CONFIG_PM 2537#ifdef CONFIG_PM
2538 .suspend = typhoon_suspend, 2538 .suspend = typhoon_suspend,
2539 .resume = typhoon_resume, 2539 .resume = typhoon_resume,
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 203ff9dccadb..0338352bc036 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -191,11 +191,11 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
191 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 191 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
192 192
193 if (ei_local->word16) 193 if (ei_local->word16)
194 readsw(nic_base + NE_DATAPORT, hdr, 194 ioread16_rep(nic_base + NE_DATAPORT, hdr,
195 sizeof(struct e8390_pkt_hdr) >> 1); 195 sizeof(struct e8390_pkt_hdr) >> 1);
196 else 196 else
197 readsb(nic_base + NE_DATAPORT, hdr, 197 ioread8_rep(nic_base + NE_DATAPORT, hdr,
198 sizeof(struct e8390_pkt_hdr)); 198 sizeof(struct e8390_pkt_hdr));
199 199
200 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 200 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
201 ei_local->dmaing &= ~0x01; 201 ei_local->dmaing &= ~0x01;
@@ -237,12 +237,12 @@ static void ax_block_input(struct net_device *dev, int count,
237 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); 237 ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
238 238
239 if (ei_local->word16) { 239 if (ei_local->word16) {
240 readsw(nic_base + NE_DATAPORT, buf, count >> 1); 240 ioread16_rep(nic_base + NE_DATAPORT, buf, count >> 1);
241 if (count & 0x01) 241 if (count & 0x01)
242 buf[count-1] = ei_inb(nic_base + NE_DATAPORT); 242 buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
243 243
244 } else { 244 } else {
245 readsb(nic_base + NE_DATAPORT, buf, count); 245 ioread8_rep(nic_base + NE_DATAPORT, buf, count);
246 } 246 }
247 247
248 ei_local->dmaing &= ~1; 248 ei_local->dmaing &= ~1;
@@ -286,9 +286,9 @@ static void ax_block_output(struct net_device *dev, int count,
286 286
287 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); 287 ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
288 if (ei_local->word16) 288 if (ei_local->word16)
289 writesw(nic_base + NE_DATAPORT, buf, count >> 1); 289 iowrite16_rep(nic_base + NE_DATAPORT, buf, count >> 1);
290 else 290 else
291 writesb(nic_base + NE_DATAPORT, buf, count); 291 iowrite8_rep(nic_base + NE_DATAPORT, buf, count);
292 292
293 dma_start = jiffies; 293 dma_start = jiffies;
294 294
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 8322c54972f3..78c6fb4b1143 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -463,12 +463,6 @@ etherh_open(struct net_device *dev)
463{ 463{
464 struct ei_device *ei_local = netdev_priv(dev); 464 struct ei_device *ei_local = netdev_priv(dev);
465 465
466 if (!is_valid_ether_addr(dev->dev_addr)) {
467 printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
468 dev->name);
469 return -EINVAL;
470 }
471
472 if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev)) 466 if (request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev))
473 return -EAGAIN; 467 return -EAGAIN;
474 468
@@ -527,7 +521,7 @@ static void __init etherh_banner(void)
527 * Read the ethernet address string from the on board rom. 521 * Read the ethernet address string from the on board rom.
528 * This is an ascii string... 522 * This is an ascii string...
529 */ 523 */
530static int __devinit etherh_addr(char *addr, struct expansion_card *ec) 524static int etherh_addr(char *addr, struct expansion_card *ec)
531{ 525{
532 struct in_chunk_dir cd; 526 struct in_chunk_dir cd;
533 char *s; 527 char *s;
@@ -657,7 +651,7 @@ static const struct net_device_ops etherh_netdev_ops = {
657static u32 etherh_regoffsets[16]; 651static u32 etherh_regoffsets[16];
658static u32 etherm_regoffsets[16]; 652static u32 etherm_regoffsets[16];
659 653
660static int __devinit 654static int
661etherh_probe(struct expansion_card *ec, const struct ecard_id *id) 655etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
662{ 656{
663 const struct etherh_data *data = id->data; 657 const struct etherh_data *data = id->data;
@@ -775,7 +769,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
775 return ret; 769 return ret;
776} 770}
777 771
778static void __devexit etherh_remove(struct expansion_card *ec) 772static void etherh_remove(struct expansion_card *ec)
779{ 773{
780 struct net_device *dev = ecard_get_drvdata(ec); 774 struct net_device *dev = ecard_get_drvdata(ec);
781 775
@@ -839,7 +833,7 @@ static const struct ecard_id etherh_ids[] = {
839 833
840static struct ecard_driver etherh_driver = { 834static struct ecard_driver etherh_driver = {
841 .probe = etherh_probe, 835 .probe = etherh_probe,
842 .remove = __devexit_p(etherh_remove), 836 .remove = etherh_remove,
843 .id_table = etherh_ids, 837 .id_table = etherh_ids,
844 .drv = { 838 .drv = {
845 .name = DRV_NAME, 839 .name = DRV_NAME,
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 5370c884620b..fb3dd4399cf3 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -53,9 +53,9 @@ static const char version[] =
53#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8)) 53#define WORDSWAP(a) ((((a)>>8)&0xff) | ((a)<<8))
54 54
55 55
56static int __devinit hydra_init_one(struct zorro_dev *z, 56static int hydra_init_one(struct zorro_dev *z,
57 const struct zorro_device_id *ent); 57 const struct zorro_device_id *ent);
58static int __devinit hydra_init(struct zorro_dev *z); 58static int hydra_init(struct zorro_dev *z);
59static int hydra_open(struct net_device *dev); 59static int hydra_open(struct net_device *dev);
60static int hydra_close(struct net_device *dev); 60static int hydra_close(struct net_device *dev);
61static void hydra_reset_8390(struct net_device *dev); 61static void hydra_reset_8390(struct net_device *dev);
@@ -65,9 +65,9 @@ static void hydra_block_input(struct net_device *dev, int count,
65 struct sk_buff *skb, int ring_offset); 65 struct sk_buff *skb, int ring_offset);
66static void hydra_block_output(struct net_device *dev, int count, 66static void hydra_block_output(struct net_device *dev, int count,
67 const unsigned char *buf, int start_page); 67 const unsigned char *buf, int start_page);
68static void __devexit hydra_remove_one(struct zorro_dev *z); 68static void hydra_remove_one(struct zorro_dev *z);
69 69
70static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = { 70static struct zorro_device_id hydra_zorro_tbl[] = {
71 { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, 71 { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
72 { 0 } 72 { 0 }
73}; 73};
@@ -77,11 +77,11 @@ static struct zorro_driver hydra_driver = {
77 .name = "hydra", 77 .name = "hydra",
78 .id_table = hydra_zorro_tbl, 78 .id_table = hydra_zorro_tbl,
79 .probe = hydra_init_one, 79 .probe = hydra_init_one,
80 .remove = __devexit_p(hydra_remove_one), 80 .remove = hydra_remove_one,
81}; 81};
82 82
83static int __devinit hydra_init_one(struct zorro_dev *z, 83static int hydra_init_one(struct zorro_dev *z,
84 const struct zorro_device_id *ent) 84 const struct zorro_device_id *ent)
85{ 85{
86 int err; 86 int err;
87 87
@@ -110,7 +110,7 @@ static const struct net_device_ops hydra_netdev_ops = {
110#endif 110#endif
111}; 111};
112 112
113static int __devinit hydra_init(struct zorro_dev *z) 113static int hydra_init(struct zorro_dev *z)
114{ 114{
115 struct net_device *dev; 115 struct net_device *dev;
116 unsigned long board = ZTWO_VADDR(z->resource.start); 116 unsigned long board = ZTWO_VADDR(z->resource.start);
@@ -247,7 +247,7 @@ static void hydra_block_output(struct net_device *dev, int count,
247 z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count); 247 z_memcpy_toio(mem_base+((start_page - NESM_START_PG)<<8), buf, count);
248} 248}
249 249
250static void __devexit hydra_remove_one(struct zorro_dev *z) 250static void hydra_remove_one(struct zorro_dev *z)
251{ 251{
252 struct net_device *dev = zorro_get_drvdata(z); 252 struct net_device *dev = zorro_get_drvdata(z);
253 253
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 5e8845febfb8..c0c127913dec 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -61,7 +61,7 @@ static int options[MAX_UNITS];
61#include "8390.h" 61#include "8390.h"
62 62
63/* These identify the driver base version and may not be removed. */ 63/* These identify the driver base version and may not be removed. */
64static const char version[] __devinitconst = 64static const char version[] =
65 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE 65 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
66 " D. Becker/P. Gortmaker\n"; 66 " D. Becker/P. Gortmaker\n";
67 67
@@ -119,7 +119,7 @@ enum ne2k_pci_chipsets {
119static struct { 119static struct {
120 char *name; 120 char *name;
121 int flags; 121 int flags;
122} pci_clone_list[] __devinitdata = { 122} pci_clone_list[] = {
123 {"RealTek RTL-8029", REALTEK_FDX}, 123 {"RealTek RTL-8029", REALTEK_FDX},
124 {"Winbond 89C940", 0}, 124 {"Winbond 89C940", 0},
125 {"Compex RL2000", 0}, 125 {"Compex RL2000", 0},
@@ -215,8 +215,8 @@ static const struct net_device_ops ne2k_netdev_ops = {
215#endif 215#endif
216}; 216};
217 217
218static int __devinit ne2k_pci_init_one (struct pci_dev *pdev, 218static int ne2k_pci_init_one(struct pci_dev *pdev,
219 const struct pci_device_id *ent) 219 const struct pci_device_id *ent)
220{ 220{
221 struct net_device *dev; 221 struct net_device *dev;
222 int i; 222 int i;
@@ -647,7 +647,7 @@ static const struct ethtool_ops ne2k_pci_ethtool_ops = {
647 .get_drvinfo = ne2k_pci_get_drvinfo, 647 .get_drvinfo = ne2k_pci_get_drvinfo,
648}; 648};
649 649
650static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev) 650static void ne2k_pci_remove_one(struct pci_dev *pdev)
651{ 651{
652 struct net_device *dev = pci_get_drvdata(pdev); 652 struct net_device *dev = pci_get_drvdata(pdev);
653 653
@@ -696,7 +696,7 @@ static int ne2k_pci_resume (struct pci_dev *pdev)
696static struct pci_driver ne2k_driver = { 696static struct pci_driver ne2k_driver = {
697 .name = DRV_NAME, 697 .name = DRV_NAME,
698 .probe = ne2k_pci_init_one, 698 .probe = ne2k_pci_init_one,
699 .remove = __devexit_p(ne2k_pci_remove_one), 699 .remove = ne2k_pci_remove_one,
700 .id_table = ne2k_pci_tbl, 700 .id_table = ne2k_pci_tbl,
701#ifdef CONFIG_PM 701#ifdef CONFIG_PM
702 .suspend = ne2k_pci_suspend, 702 .suspend = ne2k_pci_suspend,
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index e3f57427d5c5..ebcdb52ec739 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -222,7 +222,7 @@ static int __init ne3210_eisa_probe (struct device *device)
222 return retval; 222 return retval;
223} 223}
224 224
225static int __devexit ne3210_eisa_remove (struct device *device) 225static int ne3210_eisa_remove(struct device *device)
226{ 226{
227 struct net_device *dev = dev_get_drvdata(device); 227 struct net_device *dev = dev_get_drvdata(device);
228 unsigned long ioaddr = to_eisa_device (device)->base_addr; 228 unsigned long ioaddr = to_eisa_device (device)->base_addr;
@@ -324,7 +324,7 @@ static struct eisa_driver ne3210_eisa_driver = {
324 .driver = { 324 .driver = {
325 .name = "ne3210", 325 .name = "ne3210",
326 .probe = ne3210_eisa_probe, 326 .probe = ne3210_eisa_probe,
327 .remove = __devexit_p (ne3210_eisa_remove), 327 .remove = ne3210_eisa_remove,
328 }, 328 },
329}; 329};
330 330
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 7818e6397e91..85ec4c2d2645 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -75,7 +75,7 @@ static struct card_info {
75 zorro_id id; 75 zorro_id id;
76 const char *name; 76 const char *name;
77 unsigned int offset; 77 unsigned int offset;
78} cards[] __devinitdata = { 78} cards[] = {
79 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 }, 79 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, "Ariadne II", 0x0600 },
80 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 }, 80 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, "X-Surf", 0x8600 },
81}; 81};
@@ -254,7 +254,7 @@ static int zorro8390_close(struct net_device *dev)
254 return 0; 254 return 0;
255} 255}
256 256
257static void __devexit zorro8390_remove_one(struct zorro_dev *z) 257static void zorro8390_remove_one(struct zorro_dev *z)
258{ 258{
259 struct net_device *dev = zorro_get_drvdata(z); 259 struct net_device *dev = zorro_get_drvdata(z);
260 260
@@ -264,7 +264,7 @@ static void __devexit zorro8390_remove_one(struct zorro_dev *z)
264 free_netdev(dev); 264 free_netdev(dev);
265} 265}
266 266
267static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = { 267static struct zorro_device_id zorro8390_zorro_tbl[] = {
268 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, }, 268 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2, },
269 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, 269 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
270 { 0 } 270 { 0 }
@@ -286,9 +286,8 @@ static const struct net_device_ops zorro8390_netdev_ops = {
286#endif 286#endif
287}; 287};
288 288
289static int __devinit zorro8390_init(struct net_device *dev, 289static int zorro8390_init(struct net_device *dev, unsigned long board,
290 unsigned long board, const char *name, 290 const char *name, unsigned long ioaddr)
291 unsigned long ioaddr)
292{ 291{
293 int i; 292 int i;
294 int err; 293 int err;
@@ -396,8 +395,8 @@ static int __devinit zorro8390_init(struct net_device *dev,
396 return 0; 395 return 0;
397} 396}
398 397
399static int __devinit zorro8390_init_one(struct zorro_dev *z, 398static int zorro8390_init_one(struct zorro_dev *z,
400 const struct zorro_device_id *ent) 399 const struct zorro_device_id *ent)
401{ 400{
402 struct net_device *dev; 401 struct net_device *dev;
403 unsigned long board, ioaddr; 402 unsigned long board, ioaddr;
@@ -432,7 +431,7 @@ static struct zorro_driver zorro8390_driver = {
432 .name = "zorro8390", 431 .name = "zorro8390",
433 .id_table = zorro8390_zorro_tbl, 432 .id_table = zorro8390_zorro_tbl,
434 .probe = zorro8390_init_one, 433 .probe = zorro8390_init_one,
435 .remove = __devexit_p(zorro8390_remove_one), 434 .remove = zorro8390_remove_one,
436}; 435};
437 436
438static int __init zorro8390_init_module(void) 437static int __init zorro8390_init_module(void)
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 5b65992c2a0a..549b77500579 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -166,7 +166,7 @@ static int rx_copybreak /* = 0 */;
166#define FIRMWARE_TX "adaptec/starfire_tx.bin" 166#define FIRMWARE_TX "adaptec/starfire_tx.bin"
167 167
168/* These identify the driver base version and may not be removed. */ 168/* These identify the driver base version and may not be removed. */
169static const char version[] __devinitconst = 169static const char version[] =
170KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n" 170KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
171" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; 171" (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
172 172
@@ -295,7 +295,7 @@ MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
295static const struct chip_info { 295static const struct chip_info {
296 const char *name; 296 const char *name;
297 int drv_flags; 297 int drv_flags;
298} netdrv_tbl[] __devinitconst = { 298} netdrv_tbl[] = {
299 { "Adaptec Starfire 6915", CanHaveMII }, 299 { "Adaptec Starfire 6915", CanHaveMII },
300}; 300};
301 301
@@ -641,8 +641,8 @@ static const struct net_device_ops netdev_ops = {
641#endif 641#endif
642}; 642};
643 643
644static int __devinit starfire_init_one(struct pci_dev *pdev, 644static int starfire_init_one(struct pci_dev *pdev,
645 const struct pci_device_id *ent) 645 const struct pci_device_id *ent)
646{ 646{
647 struct device *d = &pdev->dev; 647 struct device *d = &pdev->dev;
648 struct netdev_private *np; 648 struct netdev_private *np;
@@ -1990,7 +1990,7 @@ static int starfire_resume(struct pci_dev *pdev)
1990#endif /* CONFIG_PM */ 1990#endif /* CONFIG_PM */
1991 1991
1992 1992
1993static void __devexit starfire_remove_one (struct pci_dev *pdev) 1993static void starfire_remove_one(struct pci_dev *pdev)
1994{ 1994{
1995 struct net_device *dev = pci_get_drvdata(pdev); 1995 struct net_device *dev = pci_get_drvdata(pdev);
1996 struct netdev_private *np = netdev_priv(dev); 1996 struct netdev_private *np = netdev_priv(dev);
@@ -2018,7 +2018,7 @@ static void __devexit starfire_remove_one (struct pci_dev *pdev)
2018static struct pci_driver starfire_driver = { 2018static struct pci_driver starfire_driver = {
2019 .name = DRV_NAME, 2019 .name = DRV_NAME,
2020 .probe = starfire_init_one, 2020 .probe = starfire_init_one,
2021 .remove = __devexit_p(starfire_remove_one), 2021 .remove = starfire_remove_one,
2022#ifdef CONFIG_PM 2022#ifdef CONFIG_PM
2023 .suspend = starfire_suspend, 2023 .suspend = starfire_suspend,
2024 .resume = starfire_resume, 2024 .resume = starfire_resume,
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index 49a30d37ae4a..e49c0eff040b 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -61,7 +61,7 @@ config BFIN_RX_DESC_NUM
61 61
62config BFIN_MAC_USE_HWSTAMP 62config BFIN_MAC_USE_HWSTAMP
63 bool "Use IEEE 1588 hwstamp" 63 bool "Use IEEE 1588 hwstamp"
64 depends on BFIN_MAC && BF518 64 select PTP_1588_CLOCK
65 default y 65 default y
66 ---help--- 66 ---help---
67 To support the IEEE 1588 Precision Time Protocol (PTP), select y here 67 To support the IEEE 1588 Precision Time Protocol (PTP), select y here
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index f816426e1085..c1fdb8be8bee 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,14 +548,17 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
548 return 0; 548 return 0;
549} 549}
550 550
551#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
551static int bfin_mac_ethtool_get_ts_info(struct net_device *dev, 552static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
552 struct ethtool_ts_info *info) 553 struct ethtool_ts_info *info)
553{ 554{
555 struct bfin_mac_local *lp = netdev_priv(dev);
556
554 info->so_timestamping = 557 info->so_timestamping =
555 SOF_TIMESTAMPING_TX_HARDWARE | 558 SOF_TIMESTAMPING_TX_HARDWARE |
556 SOF_TIMESTAMPING_RX_HARDWARE | 559 SOF_TIMESTAMPING_RX_HARDWARE |
557 SOF_TIMESTAMPING_SYS_HARDWARE; 560 SOF_TIMESTAMPING_RAW_HARDWARE;
558 info->phc_index = -1; 561 info->phc_index = lp->phc_index;
559 info->tx_types = 562 info->tx_types =
560 (1 << HWTSTAMP_TX_OFF) | 563 (1 << HWTSTAMP_TX_OFF) |
561 (1 << HWTSTAMP_TX_ON); 564 (1 << HWTSTAMP_TX_ON);
@@ -566,6 +569,7 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
566 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); 569 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
567 return 0; 570 return 0;
568} 571}
572#endif
569 573
570static const struct ethtool_ops bfin_mac_ethtool_ops = { 574static const struct ethtool_ops bfin_mac_ethtool_ops = {
571 .get_settings = bfin_mac_ethtool_getsettings, 575 .get_settings = bfin_mac_ethtool_getsettings,
@@ -574,7 +578,9 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
574 .get_drvinfo = bfin_mac_ethtool_getdrvinfo, 578 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
575 .get_wol = bfin_mac_ethtool_getwol, 579 .get_wol = bfin_mac_ethtool_getwol,
576 .set_wol = bfin_mac_ethtool_setwol, 580 .set_wol = bfin_mac_ethtool_setwol,
581#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
577 .get_ts_info = bfin_mac_ethtool_get_ts_info, 582 .get_ts_info = bfin_mac_ethtool_get_ts_info,
583#endif
578}; 584};
579 585
580/**************************************************************************/ 586/**************************************************************************/
@@ -649,6 +655,20 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
649#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP 655#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
650#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE) 656#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
651 657
658static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
659{
660 u32 ipn = 1000000000UL / input_clk;
661 u32 ppn = 1;
662 unsigned int shift = 0;
663
664 while (ppn <= ipn) {
665 ppn <<= 1;
666 shift++;
667 }
668 *shift_result = shift;
669 return 1000000000UL / ppn;
670}
671
652static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev, 672static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
653 struct ifreq *ifr, int cmd) 673 struct ifreq *ifr, int cmd)
654{ 674{
@@ -798,19 +818,7 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
798 bfin_read_EMAC_PTP_TXSNAPLO(); 818 bfin_read_EMAC_PTP_TXSNAPLO();
799 bfin_read_EMAC_PTP_TXSNAPHI(); 819 bfin_read_EMAC_PTP_TXSNAPHI();
800 820
801 /*
802 * Set registers so that rollover occurs soon to test this.
803 */
804 bfin_write_EMAC_PTP_TIMELO(0x00000000);
805 bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
806
807 SSYNC(); 821 SSYNC();
808
809 lp->compare.last_update = 0;
810 timecounter_init(&lp->clock,
811 &lp->cycles,
812 ktime_to_ns(ktime_get_real()));
813 timecompare_update(&lp->compare, 0);
814 } 822 }
815 823
816 lp->stamp_cfg = config; 824 lp->stamp_cfg = config;
@@ -818,15 +826,6 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
818 -EFAULT : 0; 826 -EFAULT : 0;
819} 827}
820 828
821static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
822{
823 ktime_t sys = ktime_get_real();
824
825 pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
826 __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
827 sys.tv.nsec, cmp->offset, cmp->skew);
828}
829
830static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb) 829static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
831{ 830{
832 struct bfin_mac_local *lp = netdev_priv(netdev); 831 struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -857,15 +856,9 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
857 regval = bfin_read_EMAC_PTP_TXSNAPLO(); 856 regval = bfin_read_EMAC_PTP_TXSNAPLO();
858 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32; 857 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
859 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 858 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
860 ns = timecounter_cyc2time(&lp->clock, 859 ns = regval << lp->shift;
861 regval);
862 timecompare_update(&lp->compare, ns);
863 shhwtstamps.hwtstamp = ns_to_ktime(ns); 860 shhwtstamps.hwtstamp = ns_to_ktime(ns);
864 shhwtstamps.syststamp =
865 timecompare_transform(&lp->compare, ns);
866 skb_tstamp_tx(skb, &shhwtstamps); 861 skb_tstamp_tx(skb, &shhwtstamps);
867
868 bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
869 } 862 }
870 } 863 }
871} 864}
@@ -888,55 +881,184 @@ static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
888 881
889 regval = bfin_read_EMAC_PTP_RXSNAPLO(); 882 regval = bfin_read_EMAC_PTP_RXSNAPLO();
890 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32; 883 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
891 ns = timecounter_cyc2time(&lp->clock, regval); 884 ns = regval << lp->shift;
892 timecompare_update(&lp->compare, ns);
893 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 885 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
894 shhwtstamps->hwtstamp = ns_to_ktime(ns); 886 shhwtstamps->hwtstamp = ns_to_ktime(ns);
895 shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns); 887}
888
889static void bfin_mac_hwtstamp_init(struct net_device *netdev)
890{
891 struct bfin_mac_local *lp = netdev_priv(netdev);
892 u64 addend, ppb;
893 u32 input_clk, phc_clk;
894
895 /* Initialize hardware timer */
896 input_clk = get_sclk();
897 phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
898 addend = phc_clk * (1ULL << 32);
899 do_div(addend, input_clk);
900 bfin_write_EMAC_PTP_ADDEND((u32)addend);
901
902 lp->addend = addend;
903 ppb = 1000000000ULL * input_clk;
904 do_div(ppb, phc_clk);
905 lp->max_ppb = ppb - 1000000000ULL - 1ULL;
896 906
897 bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare); 907 /* Initialize hwstamp config */
908 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
909 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
898} 910}
899 911
900/* 912static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
901 * bfin_read_clock - read raw cycle counter (to be used by time counter)
902 */
903static cycle_t bfin_read_clock(const struct cyclecounter *tc)
904{ 913{
905 u64 stamp; 914 u64 ns;
915 u32 lo, hi;
916
917 lo = bfin_read_EMAC_PTP_TIMELO();
918 hi = bfin_read_EMAC_PTP_TIMEHI();
906 919
907 stamp = bfin_read_EMAC_PTP_TIMELO(); 920 ns = ((u64) hi) << 32;
908 stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL; 921 ns |= lo;
922 ns <<= lp->shift;
909 923
910 return stamp; 924 return ns;
911} 925}
912 926
913#define PTP_CLK 25000000 927static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
928{
929 u32 hi, lo;
914 930
915static void bfin_mac_hwtstamp_init(struct net_device *netdev) 931 ns >>= lp->shift;
932 hi = ns >> 32;
933 lo = ns & 0xffffffff;
934
935 bfin_write_EMAC_PTP_TIMELO(lo);
936 bfin_write_EMAC_PTP_TIMEHI(hi);
937}
938
939/* PTP Hardware Clock operations */
940
941static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
942{
943 u64 adj;
944 u32 diff, addend;
945 int neg_adj = 0;
946 struct bfin_mac_local *lp =
947 container_of(ptp, struct bfin_mac_local, caps);
948
949 if (ppb < 0) {
950 neg_adj = 1;
951 ppb = -ppb;
952 }
953 addend = lp->addend;
954 adj = addend;
955 adj *= ppb;
956 diff = div_u64(adj, 1000000000ULL);
957
958 addend = neg_adj ? addend - diff : addend + diff;
959
960 bfin_write_EMAC_PTP_ADDEND(addend);
961
962 return 0;
963}
964
965static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
966{
967 s64 now;
968 unsigned long flags;
969 struct bfin_mac_local *lp =
970 container_of(ptp, struct bfin_mac_local, caps);
971
972 spin_lock_irqsave(&lp->phc_lock, flags);
973
974 now = bfin_ptp_time_read(lp);
975 now += delta;
976 bfin_ptp_time_write(lp, now);
977
978 spin_unlock_irqrestore(&lp->phc_lock, flags);
979
980 return 0;
981}
982
983static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
984{
985 u64 ns;
986 u32 remainder;
987 unsigned long flags;
988 struct bfin_mac_local *lp =
989 container_of(ptp, struct bfin_mac_local, caps);
990
991 spin_lock_irqsave(&lp->phc_lock, flags);
992
993 ns = bfin_ptp_time_read(lp);
994
995 spin_unlock_irqrestore(&lp->phc_lock, flags);
996
997 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
998 ts->tv_nsec = remainder;
999 return 0;
1000}
1001
1002static int bfin_ptp_settime(struct ptp_clock_info *ptp,
1003 const struct timespec *ts)
1004{
1005 u64 ns;
1006 unsigned long flags;
1007 struct bfin_mac_local *lp =
1008 container_of(ptp, struct bfin_mac_local, caps);
1009
1010 ns = ts->tv_sec * 1000000000ULL;
1011 ns += ts->tv_nsec;
1012
1013 spin_lock_irqsave(&lp->phc_lock, flags);
1014
1015 bfin_ptp_time_write(lp, ns);
1016
1017 spin_unlock_irqrestore(&lp->phc_lock, flags);
1018
1019 return 0;
1020}
1021
1022static int bfin_ptp_enable(struct ptp_clock_info *ptp,
1023 struct ptp_clock_request *rq, int on)
1024{
1025 return -EOPNOTSUPP;
1026}
1027
1028static struct ptp_clock_info bfin_ptp_caps = {
1029 .owner = THIS_MODULE,
1030 .name = "BF518 clock",
1031 .max_adj = 0,
1032 .n_alarm = 0,
1033 .n_ext_ts = 0,
1034 .n_per_out = 0,
1035 .pps = 0,
1036 .adjfreq = bfin_ptp_adjfreq,
1037 .adjtime = bfin_ptp_adjtime,
1038 .gettime = bfin_ptp_gettime,
1039 .settime = bfin_ptp_settime,
1040 .enable = bfin_ptp_enable,
1041};
1042
1043static int bfin_phc_init(struct net_device *netdev, struct device *dev)
916{ 1044{
917 struct bfin_mac_local *lp = netdev_priv(netdev); 1045 struct bfin_mac_local *lp = netdev_priv(netdev);
918 u64 append;
919 1046
920 /* Initialize hardware timer */ 1047 lp->caps = bfin_ptp_caps;
921 append = PTP_CLK * (1ULL << 32); 1048 lp->caps.max_adj = lp->max_ppb;
922 do_div(append, get_sclk()); 1049 lp->clock = ptp_clock_register(&lp->caps, dev);
923 bfin_write_EMAC_PTP_ADDEND((u32)append); 1050 if (IS_ERR(lp->clock))
924 1051 return PTR_ERR(lp->clock);
925 memset(&lp->cycles, 0, sizeof(lp->cycles));
926 lp->cycles.read = bfin_read_clock;
927 lp->cycles.mask = CLOCKSOURCE_MASK(64);
928 lp->cycles.mult = 1000000000 / PTP_CLK;
929 lp->cycles.shift = 0;
930
931 /* Synchronize our NIC clock against system wall clock */
932 memset(&lp->compare, 0, sizeof(lp->compare));
933 lp->compare.source = &lp->clock;
934 lp->compare.target = ktime_get_real;
935 lp->compare.num_samples = 10;
936 1052
937 /* Initialize hwstamp config */ 1053 lp->phc_index = ptp_clock_index(lp->clock);
938 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE; 1054 spin_lock_init(&lp->phc_lock);
939 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF; 1055
1056 return 0;
1057}
1058
1059static void bfin_phc_release(struct bfin_mac_local *lp)
1060{
1061 ptp_clock_unregister(lp->clock);
940} 1062}
941 1063
942#else 1064#else
@@ -945,6 +1067,8 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
945# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP) 1067# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
946# define bfin_rx_hwtstamp(dev, skb) 1068# define bfin_rx_hwtstamp(dev, skb)
947# define bfin_tx_hwtstamp(dev, skb) 1069# define bfin_tx_hwtstamp(dev, skb)
1070# define bfin_phc_init(netdev, dev) 0
1071# define bfin_phc_release(lp)
948#endif 1072#endif
949 1073
950static inline void _tx_reclaim_skb(void) 1074static inline void _tx_reclaim_skb(void)
@@ -1479,7 +1603,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
1479#endif 1603#endif
1480}; 1604};
1481 1605
1482static int __devinit bfin_mac_probe(struct platform_device *pdev) 1606static int bfin_mac_probe(struct platform_device *pdev)
1483{ 1607{
1484 struct net_device *ndev; 1608 struct net_device *ndev;
1485 struct bfin_mac_local *lp; 1609 struct bfin_mac_local *lp;
@@ -1579,12 +1703,17 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1579 } 1703 }
1580 1704
1581 bfin_mac_hwtstamp_init(ndev); 1705 bfin_mac_hwtstamp_init(ndev);
1706 if (bfin_phc_init(ndev, &pdev->dev)) {
1707 dev_err(&pdev->dev, "Cannot register PHC device!\n");
1708 goto out_err_phc;
1709 }
1582 1710
1583 /* now, print out the card info, in a short format.. */ 1711 /* now, print out the card info, in a short format.. */
1584 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1712 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1585 1713
1586 return 0; 1714 return 0;
1587 1715
1716out_err_phc:
1588out_err_reg_ndev: 1717out_err_reg_ndev:
1589 free_irq(IRQ_MAC_RX, ndev); 1718 free_irq(IRQ_MAC_RX, ndev);
1590out_err_request_irq: 1719out_err_request_irq:
@@ -1598,11 +1727,13 @@ out_err_probe_mac:
1598 return rc; 1727 return rc;
1599} 1728}
1600 1729
1601static int __devexit bfin_mac_remove(struct platform_device *pdev) 1730static int bfin_mac_remove(struct platform_device *pdev)
1602{ 1731{
1603 struct net_device *ndev = platform_get_drvdata(pdev); 1732 struct net_device *ndev = platform_get_drvdata(pdev);
1604 struct bfin_mac_local *lp = netdev_priv(ndev); 1733 struct bfin_mac_local *lp = netdev_priv(ndev);
1605 1734
1735 bfin_phc_release(lp);
1736
1606 platform_set_drvdata(pdev, NULL); 1737 platform_set_drvdata(pdev, NULL);
1607 1738
1608 lp->mii_bus->priv = NULL; 1739 lp->mii_bus->priv = NULL;
@@ -1655,7 +1786,7 @@ static int bfin_mac_resume(struct platform_device *pdev)
1655#define bfin_mac_resume NULL 1786#define bfin_mac_resume NULL
1656#endif /* CONFIG_PM */ 1787#endif /* CONFIG_PM */
1657 1788
1658static int __devinit bfin_mii_bus_probe(struct platform_device *pdev) 1789static int bfin_mii_bus_probe(struct platform_device *pdev)
1659{ 1790{
1660 struct mii_bus *miibus; 1791 struct mii_bus *miibus;
1661 struct bfin_mii_bus_platform_data *mii_bus_pd; 1792 struct bfin_mii_bus_platform_data *mii_bus_pd;
@@ -1733,7 +1864,7 @@ out_err_alloc:
1733 return rc; 1864 return rc;
1734} 1865}
1735 1866
1736static int __devexit bfin_mii_bus_remove(struct platform_device *pdev) 1867static int bfin_mii_bus_remove(struct platform_device *pdev)
1737{ 1868{
1738 struct mii_bus *miibus = platform_get_drvdata(pdev); 1869 struct mii_bus *miibus = platform_get_drvdata(pdev);
1739 struct bfin_mii_bus_platform_data *mii_bus_pd = 1870 struct bfin_mii_bus_platform_data *mii_bus_pd =
@@ -1750,7 +1881,7 @@ static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
1750 1881
1751static struct platform_driver bfin_mii_bus_driver = { 1882static struct platform_driver bfin_mii_bus_driver = {
1752 .probe = bfin_mii_bus_probe, 1883 .probe = bfin_mii_bus_probe,
1753 .remove = __devexit_p(bfin_mii_bus_remove), 1884 .remove = bfin_mii_bus_remove,
1754 .driver = { 1885 .driver = {
1755 .name = "bfin_mii_bus", 1886 .name = "bfin_mii_bus",
1756 .owner = THIS_MODULE, 1887 .owner = THIS_MODULE,
@@ -1759,7 +1890,7 @@ static struct platform_driver bfin_mii_bus_driver = {
1759 1890
1760static struct platform_driver bfin_mac_driver = { 1891static struct platform_driver bfin_mac_driver = {
1761 .probe = bfin_mac_probe, 1892 .probe = bfin_mac_probe,
1762 .remove = __devexit_p(bfin_mac_remove), 1893 .remove = bfin_mac_remove,
1763 .resume = bfin_mac_resume, 1894 .resume = bfin_mac_resume,
1764 .suspend = bfin_mac_suspend, 1895 .suspend = bfin_mac_suspend,
1765 .driver = { 1896 .driver = {
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index 960905c08223..7a07ee07906b 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -11,8 +11,7 @@
11#define _BFIN_MAC_H_ 11#define _BFIN_MAC_H_
12 12
13#include <linux/net_tstamp.h> 13#include <linux/net_tstamp.h>
14#include <linux/clocksource.h> 14#include <linux/ptp_clock_kernel.h>
15#include <linux/timecompare.h>
16#include <linux/timer.h> 15#include <linux/timer.h>
17#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
18#include <linux/bfin_mac.h> 17#include <linux/bfin_mac.h>
@@ -94,10 +93,14 @@ struct bfin_mac_local {
94 struct mii_bus *mii_bus; 93 struct mii_bus *mii_bus;
95 94
96#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP) 95#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
97 struct cyclecounter cycles; 96 u32 addend;
98 struct timecounter clock; 97 unsigned int shift;
99 struct timecompare compare; 98 s32 max_ppb;
100 struct hwtstamp_config stamp_cfg; 99 struct hwtstamp_config stamp_cfg;
100 struct ptp_clock_info caps;
101 struct ptp_clock *clock;
102 int phc_index;
103 spinlock_t phc_lock; /* protects time lo/hi registers */
101#endif 104#endif
102}; 105};
103 106
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 9c77c736f171..aa53115bb38b 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1376,7 +1376,7 @@ error:
1376} 1376}
1377 1377
1378/* Initialize the GRETH MAC */ 1378/* Initialize the GRETH MAC */
1379static int __devinit greth_of_probe(struct platform_device *ofdev) 1379static int greth_of_probe(struct platform_device *ofdev)
1380{ 1380{
1381 struct net_device *dev; 1381 struct net_device *dev;
1382 struct greth_private *greth; 1382 struct greth_private *greth;
@@ -1576,7 +1576,7 @@ error1:
1576 return err; 1576 return err;
1577} 1577}
1578 1578
1579static int __devexit greth_of_remove(struct platform_device *of_dev) 1579static int greth_of_remove(struct platform_device *of_dev)
1580{ 1580{
1581 struct net_device *ndev = dev_get_drvdata(&of_dev->dev); 1581 struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
1582 struct greth_private *greth = netdev_priv(ndev); 1582 struct greth_private *greth = netdev_priv(ndev);
@@ -1619,7 +1619,7 @@ static struct platform_driver greth_of_driver = {
1619 .of_match_table = greth_of_match, 1619 .of_match_table = greth_of_match,
1620 }, 1620 },
1621 .probe = greth_of_probe, 1621 .probe = greth_of_probe,
1622 .remove = __devexit_p(greth_of_remove), 1622 .remove = greth_of_remove,
1623}; 1623};
1624 1624
1625module_platform_driver(greth_of_driver); 1625module_platform_driver(greth_of_driver);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 7219123fa0a4..c0bc41a784ca 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -426,7 +426,7 @@ MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descript
426MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)"); 426MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)");
427 427
428 428
429static const char version[] __devinitconst = 429static const char version[] =
430 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n" 430 "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n"
431 " http://home.cern.ch/~jes/gige/acenic.html\n"; 431 " http://home.cern.ch/~jes/gige/acenic.html\n";
432 432
@@ -454,8 +454,8 @@ static const struct net_device_ops ace_netdev_ops = {
454 .ndo_change_mtu = ace_change_mtu, 454 .ndo_change_mtu = ace_change_mtu,
455}; 455};
456 456
457static int __devinit acenic_probe_one(struct pci_dev *pdev, 457static int acenic_probe_one(struct pci_dev *pdev,
458 const struct pci_device_id *id) 458 const struct pci_device_id *id)
459{ 459{
460 struct net_device *dev; 460 struct net_device *dev;
461 struct ace_private *ap; 461 struct ace_private *ap;
@@ -603,7 +603,7 @@ static int __devinit acenic_probe_one(struct pci_dev *pdev,
603 return -ENODEV; 603 return -ENODEV;
604} 604}
605 605
606static void __devexit acenic_remove_one(struct pci_dev *pdev) 606static void acenic_remove_one(struct pci_dev *pdev)
607{ 607{
608 struct net_device *dev = pci_get_drvdata(pdev); 608 struct net_device *dev = pci_get_drvdata(pdev);
609 struct ace_private *ap = netdev_priv(dev); 609 struct ace_private *ap = netdev_priv(dev);
@@ -699,7 +699,7 @@ static struct pci_driver acenic_pci_driver = {
699 .name = "acenic", 699 .name = "acenic",
700 .id_table = acenic_pci_tbl, 700 .id_table = acenic_pci_tbl,
701 .probe = acenic_probe_one, 701 .probe = acenic_probe_one,
702 .remove = __devexit_p(acenic_remove_one), 702 .remove = acenic_remove_one,
703}; 703};
704 704
705static int __init acenic_init(void) 705static int __init acenic_init(void)
@@ -871,7 +871,7 @@ static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd)
871} 871}
872 872
873 873
874static int __devinit ace_init(struct net_device *dev) 874static int ace_init(struct net_device *dev)
875{ 875{
876 struct ace_private *ap; 876 struct ace_private *ap;
877 struct ace_regs __iomem *regs; 877 struct ace_regs __iomem *regs;
@@ -2824,8 +2824,8 @@ static struct net_device_stats *ace_get_stats(struct net_device *dev)
2824} 2824}
2825 2825
2826 2826
2827static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src, 2827static void ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
2828 u32 dest, int size) 2828 u32 dest, int size)
2829{ 2829{
2830 void __iomem *tdest; 2830 void __iomem *tdest;
2831 short tsize, i; 2831 short tsize, i;
@@ -2851,7 +2851,7 @@ static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src,
2851} 2851}
2852 2852
2853 2853
2854static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size) 2854static void ace_clear(struct ace_regs __iomem *regs, u32 dest, int size)
2855{ 2855{
2856 void __iomem *tdest; 2856 void __iomem *tdest;
2857 short tsize = 0, i; 2857 short tsize = 0, i;
@@ -2882,7 +2882,7 @@ static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int siz
2882 * This operation requires the NIC to be halted and is performed with 2882 * This operation requires the NIC to be halted and is performed with
2883 * interrupts disabled and with the spinlock hold. 2883 * interrupts disabled and with the spinlock hold.
2884 */ 2884 */
2885static int __devinit ace_load_firmware(struct net_device *dev) 2885static int ace_load_firmware(struct net_device *dev)
2886{ 2886{
2887 const struct firmware *fw; 2887 const struct firmware *fw;
2888 const char *fw_name = "acenic/tg2.bin"; 2888 const char *fw_name = "acenic/tg2.bin";
@@ -2962,7 +2962,7 @@ static int __devinit ace_load_firmware(struct net_device *dev)
2962 * Thanks to Stevarino Webinski for helping tracking down the bugs in the 2962 * Thanks to Stevarino Webinski for helping tracking down the bugs in the
2963 * code i2c readout code by beta testing all my hacks. 2963 * code i2c readout code by beta testing all my hacks.
2964 */ 2964 */
2965static void __devinit eeprom_start(struct ace_regs __iomem *regs) 2965static void eeprom_start(struct ace_regs __iomem *regs)
2966{ 2966{
2967 u32 local; 2967 u32 local;
2968 2968
@@ -2991,7 +2991,7 @@ static void __devinit eeprom_start(struct ace_regs __iomem *regs)
2991} 2991}
2992 2992
2993 2993
2994static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic) 2994static void eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
2995{ 2995{
2996 short i; 2996 short i;
2997 u32 local; 2997 u32 local;
@@ -3028,7 +3028,7 @@ static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic)
3028} 3028}
3029 3029
3030 3030
3031static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs) 3031static int eeprom_check_ack(struct ace_regs __iomem *regs)
3032{ 3032{
3033 int state; 3033 int state;
3034 u32 local; 3034 u32 local;
@@ -3056,7 +3056,7 @@ static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs)
3056} 3056}
3057 3057
3058 3058
3059static void __devinit eeprom_stop(struct ace_regs __iomem *regs) 3059static void eeprom_stop(struct ace_regs __iomem *regs)
3060{ 3060{
3061 u32 local; 3061 u32 local;
3062 3062
@@ -3091,8 +3091,7 @@ static void __devinit eeprom_stop(struct ace_regs __iomem *regs)
3091/* 3091/*
3092 * Read a whole byte from the EEPROM. 3092 * Read a whole byte from the EEPROM.
3093 */ 3093 */
3094static int __devinit read_eeprom_byte(struct net_device *dev, 3094static int read_eeprom_byte(struct net_device *dev, unsigned long offset)
3095 unsigned long offset)
3096{ 3095{
3097 struct ace_private *ap = netdev_priv(dev); 3096 struct ace_private *ap = netdev_priv(dev);
3098 struct ace_regs __iomem *regs = ap->regs; 3097 struct ace_regs __iomem *regs = ap->regs;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 689dfcafc6d4..3789affbc0e5 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -639,12 +639,12 @@ static void lance_set_multicast(struct net_device *dev)
639 netif_wake_queue(dev); 639 netif_wake_queue(dev);
640} 640}
641 641
642static int __devinit a2065_init_one(struct zorro_dev *z, 642static int a2065_init_one(struct zorro_dev *z,
643 const struct zorro_device_id *ent); 643 const struct zorro_device_id *ent);
644static void __devexit a2065_remove_one(struct zorro_dev *z); 644static void a2065_remove_one(struct zorro_dev *z);
645 645
646 646
647static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = { 647static struct zorro_device_id a2065_zorro_tbl[] = {
648 { ZORRO_PROD_CBM_A2065_1 }, 648 { ZORRO_PROD_CBM_A2065_1 },
649 { ZORRO_PROD_CBM_A2065_2 }, 649 { ZORRO_PROD_CBM_A2065_2 },
650 { ZORRO_PROD_AMERISTAR_A2065 }, 650 { ZORRO_PROD_AMERISTAR_A2065 },
@@ -656,7 +656,7 @@ static struct zorro_driver a2065_driver = {
656 .name = "a2065", 656 .name = "a2065",
657 .id_table = a2065_zorro_tbl, 657 .id_table = a2065_zorro_tbl,
658 .probe = a2065_init_one, 658 .probe = a2065_init_one,
659 .remove = __devexit_p(a2065_remove_one), 659 .remove = a2065_remove_one,
660}; 660};
661 661
662static const struct net_device_ops lance_netdev_ops = { 662static const struct net_device_ops lance_netdev_ops = {
@@ -670,8 +670,8 @@ static const struct net_device_ops lance_netdev_ops = {
670 .ndo_set_mac_address = eth_mac_addr, 670 .ndo_set_mac_address = eth_mac_addr,
671}; 671};
672 672
673static int __devinit a2065_init_one(struct zorro_dev *z, 673static int a2065_init_one(struct zorro_dev *z,
674 const struct zorro_device_id *ent) 674 const struct zorro_device_id *ent)
675{ 675{
676 struct net_device *dev; 676 struct net_device *dev;
677 struct lance_private *priv; 677 struct lance_private *priv;
@@ -754,7 +754,7 @@ static int __devinit a2065_init_one(struct zorro_dev *z,
754} 754}
755 755
756 756
757static void __devexit a2065_remove_one(struct zorro_dev *z) 757static void a2065_remove_one(struct zorro_dev *z)
758{ 758{
759 struct net_device *dev = zorro_get_drvdata(z); 759 struct net_device *dev = zorro_get_drvdata(z);
760 760
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index e10ffad525a7..60e2b701afe7 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -671,7 +671,7 @@ static const struct net_device_ops am79c961_netdev_ops = {
671#endif 671#endif
672}; 672};
673 673
674static int __devinit am79c961_probe(struct platform_device *pdev) 674static int am79c961_probe(struct platform_device *pdev)
675{ 675{
676 struct resource *res; 676 struct resource *res;
677 struct net_device *dev; 677 struct net_device *dev;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 3491d4312fc9..42d4e6ad58a5 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1702,7 +1702,7 @@ static int amd8111e_resume(struct pci_dev *pci_dev)
1702} 1702}
1703 1703
1704 1704
1705static void __devexit amd8111e_remove_one(struct pci_dev *pdev) 1705static void amd8111e_remove_one(struct pci_dev *pdev)
1706{ 1706{
1707 struct net_device *dev = pci_get_drvdata(pdev); 1707 struct net_device *dev = pci_get_drvdata(pdev);
1708 if (dev) { 1708 if (dev) {
@@ -1774,7 +1774,7 @@ static void amd8111e_config_ipg(struct net_device* dev)
1774 1774
1775} 1775}
1776 1776
1777static void __devinit amd8111e_probe_ext_phy(struct net_device* dev) 1777static void amd8111e_probe_ext_phy(struct net_device *dev)
1778{ 1778{
1779 struct amd8111e_priv *lp = netdev_priv(dev); 1779 struct amd8111e_priv *lp = netdev_priv(dev);
1780 int i; 1780 int i;
@@ -1810,7 +1810,7 @@ static const struct net_device_ops amd8111e_netdev_ops = {
1810#endif 1810#endif
1811}; 1811};
1812 1812
1813static int __devinit amd8111e_probe_one(struct pci_dev *pdev, 1813static int amd8111e_probe_one(struct pci_dev *pdev,
1814 const struct pci_device_id *ent) 1814 const struct pci_device_id *ent)
1815{ 1815{
1816 int err,i,pm_cap; 1816 int err,i,pm_cap;
@@ -1976,7 +1976,7 @@ static struct pci_driver amd8111e_driver = {
1976 .name = MODULE_NAME, 1976 .name = MODULE_NAME,
1977 .id_table = amd8111e_pci_tbl, 1977 .id_table = amd8111e_pci_tbl,
1978 .probe = amd8111e_probe_one, 1978 .probe = amd8111e_probe_one,
1979 .remove = __devexit_p(amd8111e_remove_one), 1979 .remove = amd8111e_remove_one,
1980 .suspend = amd8111e_suspend, 1980 .suspend = amd8111e_suspend,
1981 .resume = amd8111e_resume 1981 .resume = amd8111e_resume
1982}; 1982};
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index f2958df9a1e4..98f4522fd17b 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -682,7 +682,7 @@ static void set_multicast_list(struct net_device *dev)
682} 682}
683 683
684 684
685static void __devexit ariadne_remove_one(struct zorro_dev *z) 685static void ariadne_remove_one(struct zorro_dev *z)
686{ 686{
687 struct net_device *dev = zorro_get_drvdata(z); 687 struct net_device *dev = zorro_get_drvdata(z);
688 688
@@ -692,7 +692,7 @@ static void __devexit ariadne_remove_one(struct zorro_dev *z)
692 free_netdev(dev); 692 free_netdev(dev);
693} 693}
694 694
695static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = { 695static struct zorro_device_id ariadne_zorro_tbl[] = {
696 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, 696 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
697 { 0 } 697 { 0 }
698}; 698};
@@ -710,8 +710,8 @@ static const struct net_device_ops ariadne_netdev_ops = {
710 .ndo_set_mac_address = eth_mac_addr, 710 .ndo_set_mac_address = eth_mac_addr,
711}; 711};
712 712
713static int __devinit ariadne_init_one(struct zorro_dev *z, 713static int ariadne_init_one(struct zorro_dev *z,
714 const struct zorro_device_id *ent) 714 const struct zorro_device_id *ent)
715{ 715{
716 unsigned long board = z->resource.start; 716 unsigned long board = z->resource.start;
717 unsigned long base_addr = board + ARIADNE_LANCE; 717 unsigned long base_addr = board + ARIADNE_LANCE;
@@ -774,7 +774,7 @@ static struct zorro_driver ariadne_driver = {
774 .name = "ariadne", 774 .name = "ariadne",
775 .id_table = ariadne_zorro_tbl, 775 .id_table = ariadne_zorro_tbl,
776 .probe = ariadne_init_one, 776 .probe = ariadne_init_one,
777 .remove = __devexit_p(ariadne_remove_one), 777 .remove = ariadne_remove_one,
778}; 778};
779 779
780static int __init ariadne_init_module(void) 780static int __init ariadne_init_module(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index f195acfa2df7..2ea221ed4777 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1016,7 +1016,7 @@ static const struct net_device_ops au1000_netdev_ops = {
1016 .ndo_change_mtu = eth_change_mtu, 1016 .ndo_change_mtu = eth_change_mtu,
1017}; 1017};
1018 1018
1019static int __devinit au1000_probe(struct platform_device *pdev) 1019static int au1000_probe(struct platform_device *pdev)
1020{ 1020{
1021 static unsigned version_printed; 1021 static unsigned version_printed;
1022 struct au1000_private *aup = NULL; 1022 struct au1000_private *aup = NULL;
@@ -1295,7 +1295,7 @@ out:
1295 return err; 1295 return err;
1296} 1296}
1297 1297
1298static int __devexit au1000_remove(struct platform_device *pdev) 1298static int au1000_remove(struct platform_device *pdev)
1299{ 1299{
1300 struct net_device *dev = platform_get_drvdata(pdev); 1300 struct net_device *dev = platform_get_drvdata(pdev);
1301 struct au1000_private *aup = netdev_priv(dev); 1301 struct au1000_private *aup = netdev_priv(dev);
@@ -1340,7 +1340,7 @@ static int __devexit au1000_remove(struct platform_device *pdev)
1340 1340
1341static struct platform_driver au1000_eth_driver = { 1341static struct platform_driver au1000_eth_driver = {
1342 .probe = au1000_probe, 1342 .probe = au1000_probe,
1343 .remove = __devexit_p(au1000_remove), 1343 .remove = au1000_remove,
1344 .driver = { 1344 .driver = {
1345 .name = "au1000-eth", 1345 .name = "au1000-eth",
1346 .owner = THIS_MODULE, 1346 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 7203b522f234..baca0bd1b393 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -72,7 +72,7 @@
72#include <asm/dec/machtype.h> 72#include <asm/dec/machtype.h>
73#include <asm/dec/system.h> 73#include <asm/dec/system.h>
74 74
75static char version[] __devinitdata = 75static char version[] =
76"declance.c: v0.011 by Linux MIPS DECstation task force\n"; 76"declance.c: v0.011 by Linux MIPS DECstation task force\n";
77 77
78MODULE_AUTHOR("Linux MIPS DECstation task force"); 78MODULE_AUTHOR("Linux MIPS DECstation task force");
@@ -1020,7 +1020,7 @@ static const struct net_device_ops lance_netdev_ops = {
1020 .ndo_set_mac_address = eth_mac_addr, 1020 .ndo_set_mac_address = eth_mac_addr,
1021}; 1021};
1022 1022
1023static int __devinit dec_lance_probe(struct device *bdev, const int type) 1023static int dec_lance_probe(struct device *bdev, const int type)
1024{ 1024{
1025 static unsigned version_printed; 1025 static unsigned version_printed;
1026 static const char fmt[] = "declance%d"; 1026 static const char fmt[] = "declance%d";
@@ -1322,7 +1322,7 @@ static void __exit dec_lance_platform_remove(void)
1322} 1322}
1323 1323
1324#ifdef CONFIG_TC 1324#ifdef CONFIG_TC
1325static int __devinit dec_lance_tc_probe(struct device *dev); 1325static int dec_lance_tc_probe(struct device *dev);
1326static int __exit dec_lance_tc_remove(struct device *dev); 1326static int __exit dec_lance_tc_remove(struct device *dev);
1327 1327
1328static const struct tc_device_id dec_lance_tc_table[] = { 1328static const struct tc_device_id dec_lance_tc_table[] = {
@@ -1341,7 +1341,7 @@ static struct tc_driver dec_lance_tc_driver = {
1341 }, 1341 },
1342}; 1342};
1343 1343
1344static int __devinit dec_lance_tc_probe(struct device *dev) 1344static int dec_lance_tc_probe(struct device *dev)
1345{ 1345{
1346 int status = dec_lance_probe(dev, PMAD_LANCE); 1346 int status = dec_lance_probe(dev, PMAD_LANCE);
1347 if (!status) 1347 if (!status)
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
index c771de71612a..34a485363d5b 100644
--- a/drivers/net/ethernet/amd/depca.c
+++ b/drivers/net/ethernet/amd/depca.c
@@ -338,21 +338,21 @@ static struct eisa_driver depca_eisa_driver = {
338 .driver = { 338 .driver = {
339 .name = depca_string, 339 .name = depca_string,
340 .probe = depca_eisa_probe, 340 .probe = depca_eisa_probe,
341 .remove = __devexit_p (depca_device_remove) 341 .remove = depca_device_remove
342 } 342 }
343}; 343};
344#endif 344#endif
345 345
346static int depca_isa_probe (struct platform_device *); 346static int depca_isa_probe (struct platform_device *);
347 347
348static int __devexit depca_isa_remove(struct platform_device *pdev) 348static int depca_isa_remove(struct platform_device *pdev)
349{ 349{
350 return depca_device_remove(&pdev->dev); 350 return depca_device_remove(&pdev->dev);
351} 351}
352 352
353static struct platform_driver depca_isa_driver = { 353static struct platform_driver depca_isa_driver = {
354 .probe = depca_isa_probe, 354 .probe = depca_isa_probe,
355 .remove = __devexit_p(depca_isa_remove), 355 .remove = depca_isa_remove,
356 .driver = { 356 .driver = {
357 .name = depca_string, 357 .name = depca_string,
358 }, 358 },
@@ -1320,7 +1320,7 @@ static enum depca_type __init depca_shmem_probe (ulong *mem_start)
1320 return adapter; 1320 return adapter;
1321} 1321}
1322 1322
1323static int __devinit depca_isa_probe (struct platform_device *device) 1323static int depca_isa_probe(struct platform_device *device)
1324{ 1324{
1325 struct net_device *dev; 1325 struct net_device *dev;
1326 struct depca_private *lp; 1326 struct depca_private *lp;
@@ -1412,7 +1412,7 @@ static int __init depca_eisa_probe (struct device *device)
1412} 1412}
1413#endif 1413#endif
1414 1414
1415static int __devexit depca_device_remove (struct device *device) 1415static int depca_device_remove(struct device *device)
1416{ 1416{
1417 struct net_device *dev; 1417 struct net_device *dev;
1418 struct depca_private *lp; 1418 struct depca_private *lp;
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 8baff4e5d964..0c61fd50d882 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -46,11 +46,9 @@ struct hplance_private {
46 * plus board-specific init, open and close actions. 46 * plus board-specific init, open and close actions.
47 * Oh, and we need to tell the generic code how to read and write LANCE registers... 47 * Oh, and we need to tell the generic code how to read and write LANCE registers...
48 */ 48 */
49static int __devinit hplance_init_one(struct dio_dev *d, 49static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent);
50 const struct dio_device_id *ent); 50static void hplance_init(struct net_device *dev, struct dio_dev *d);
51static void __devinit hplance_init(struct net_device *dev, 51static void hplance_remove_one(struct dio_dev *d);
52 struct dio_dev *d);
53static void __devexit hplance_remove_one(struct dio_dev *d);
54static void hplance_writerap(void *priv, unsigned short value); 52static void hplance_writerap(void *priv, unsigned short value);
55static void hplance_writerdp(void *priv, unsigned short value); 53static void hplance_writerdp(void *priv, unsigned short value);
56static unsigned short hplance_readrdp(void *priv); 54static unsigned short hplance_readrdp(void *priv);
@@ -66,7 +64,7 @@ static struct dio_driver hplance_driver = {
66 .name = "hplance", 64 .name = "hplance",
67 .id_table = hplance_dio_tbl, 65 .id_table = hplance_dio_tbl,
68 .probe = hplance_init_one, 66 .probe = hplance_init_one,
69 .remove = __devexit_p(hplance_remove_one), 67 .remove = hplance_remove_one,
70}; 68};
71 69
72static const struct net_device_ops hplance_netdev_ops = { 70static const struct net_device_ops hplance_netdev_ops = {
@@ -83,8 +81,7 @@ static const struct net_device_ops hplance_netdev_ops = {
83}; 81};
84 82
85/* Find all the HP Lance boards and initialise them... */ 83/* Find all the HP Lance boards and initialise them... */
86static int __devinit hplance_init_one(struct dio_dev *d, 84static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent)
87 const struct dio_device_id *ent)
88{ 85{
89 struct net_device *dev; 86 struct net_device *dev;
90 int err = -ENOMEM; 87 int err = -ENOMEM;
@@ -118,7 +115,7 @@ static int __devinit hplance_init_one(struct dio_dev *d,
118 return err; 115 return err;
119} 116}
120 117
121static void __devexit hplance_remove_one(struct dio_dev *d) 118static void hplance_remove_one(struct dio_dev *d)
122{ 119{
123 struct net_device *dev = dio_get_drvdata(d); 120 struct net_device *dev = dio_get_drvdata(d);
124 121
@@ -128,7 +125,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
128} 125}
129 126
130/* Initialise a single lance board at the given DIO device */ 127/* Initialise a single lance board at the given DIO device */
131static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d) 128static void hplance_init(struct net_device *dev, struct dio_dev *d)
132{ 129{
133 unsigned long va = (d->resource.start + DIO_VIRADDRBASE); 130 unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
134 struct hplance_private *lp; 131 struct hplance_private *lp;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 86b6d8e4e6cd..a227ccdcb9b5 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1443,7 +1443,7 @@ static const struct ethtool_ops pcnet32_ethtool_ops = {
1443/* only probes for non-PCI devices, the rest are handled by 1443/* only probes for non-PCI devices, the rest are handled by
1444 * pci_register_driver via pcnet32_probe_pci */ 1444 * pci_register_driver via pcnet32_probe_pci */
1445 1445
1446static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist) 1446static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
1447{ 1447{
1448 unsigned int *port, ioaddr; 1448 unsigned int *port, ioaddr;
1449 1449
@@ -1462,7 +1462,7 @@ static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
1462 } 1462 }
1463} 1463}
1464 1464
1465static int __devinit 1465static int
1466pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) 1466pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1467{ 1467{
1468 unsigned long ioaddr; 1468 unsigned long ioaddr;
@@ -1521,7 +1521,7 @@ static const struct net_device_ops pcnet32_netdev_ops = {
1521 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. 1521 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1522 * pdev will be NULL when called from pcnet32_probe_vlbus. 1522 * pdev will be NULL when called from pcnet32_probe_vlbus.
1523 */ 1523 */
1524static int __devinit 1524static int
1525pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) 1525pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1526{ 1526{
1527 struct pcnet32_private *lp; 1527 struct pcnet32_private *lp;
@@ -2823,7 +2823,7 @@ static int pcnet32_pm_resume(struct pci_dev *pdev)
2823 return 0; 2823 return 0;
2824} 2824}
2825 2825
2826static void __devexit pcnet32_remove_one(struct pci_dev *pdev) 2826static void pcnet32_remove_one(struct pci_dev *pdev)
2827{ 2827{
2828 struct net_device *dev = pci_get_drvdata(pdev); 2828 struct net_device *dev = pci_get_drvdata(pdev);
2829 2829
@@ -2844,7 +2844,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2844static struct pci_driver pcnet32_driver = { 2844static struct pci_driver pcnet32_driver = {
2845 .name = DRV_NAME, 2845 .name = DRV_NAME,
2846 .probe = pcnet32_probe_pci, 2846 .probe = pcnet32_probe_pci,
2847 .remove = __devexit_p(pcnet32_remove_one), 2847 .remove = pcnet32_remove_one,
2848 .id_table = pcnet32_pci_tbl, 2848 .id_table = pcnet32_pci_tbl,
2849 .suspend = pcnet32_pm_suspend, 2849 .suspend = pcnet32_pm_suspend,
2850 .resume = pcnet32_pm_resume, 2850 .resume = pcnet32_pm_resume,
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index d7a3533d990b..c2d696c88e46 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1304,9 +1304,9 @@ static const struct net_device_ops sparc_lance_ops = {
1304 .ndo_validate_addr = eth_validate_addr, 1304 .ndo_validate_addr = eth_validate_addr,
1305}; 1305};
1306 1306
1307static int __devinit sparc_lance_probe_one(struct platform_device *op, 1307static int sparc_lance_probe_one(struct platform_device *op,
1308 struct platform_device *ledma, 1308 struct platform_device *ledma,
1309 struct platform_device *lebuffer) 1309 struct platform_device *lebuffer)
1310{ 1310{
1311 struct device_node *dp = op->dev.of_node; 1311 struct device_node *dp = op->dev.of_node;
1312 static unsigned version_printed; 1312 static unsigned version_printed;
@@ -1488,7 +1488,7 @@ fail:
1488 return -ENODEV; 1488 return -ENODEV;
1489} 1489}
1490 1490
1491static int __devinit sunlance_sbus_probe(struct platform_device *op) 1491static int sunlance_sbus_probe(struct platform_device *op)
1492{ 1492{
1493 struct platform_device *parent = to_platform_device(op->dev.parent); 1493 struct platform_device *parent = to_platform_device(op->dev.parent);
1494 struct device_node *parent_dp = parent->dev.of_node; 1494 struct device_node *parent_dp = parent->dev.of_node;
@@ -1504,7 +1504,7 @@ static int __devinit sunlance_sbus_probe(struct platform_device *op)
1504 return err; 1504 return err;
1505} 1505}
1506 1506
1507static int __devexit sunlance_sbus_remove(struct platform_device *op) 1507static int sunlance_sbus_remove(struct platform_device *op)
1508{ 1508{
1509 struct lance_private *lp = dev_get_drvdata(&op->dev); 1509 struct lance_private *lp = dev_get_drvdata(&op->dev);
1510 struct net_device *net_dev = lp->dev; 1510 struct net_device *net_dev = lp->dev;
@@ -1536,7 +1536,7 @@ static struct platform_driver sunlance_sbus_driver = {
1536 .of_match_table = sunlance_sbus_match, 1536 .of_match_table = sunlance_sbus_match,
1537 }, 1537 },
1538 .probe = sunlance_sbus_probe, 1538 .probe = sunlance_sbus_probe,
1539 .remove = __devexit_p(sunlance_sbus_remove), 1539 .remove = sunlance_sbus_remove,
1540}; 1540};
1541 1541
1542module_platform_driver(sunlance_sbus_driver); 1542module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 855bdafb1a87..f36bbd6d5085 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1244,7 +1244,7 @@ static const struct net_device_ops bmac_netdev_ops = {
1244 .ndo_validate_addr = eth_validate_addr, 1244 .ndo_validate_addr = eth_validate_addr,
1245}; 1245};
1246 1246
1247static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) 1247static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1248{ 1248{
1249 int j, rev, ret; 1249 int j, rev, ret;
1250 struct bmac_data *bp; 1250 struct bmac_data *bp;
@@ -1602,7 +1602,7 @@ bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1602} 1602}
1603#endif 1603#endif
1604 1604
1605static int __devexit bmac_remove(struct macio_dev *mdev) 1605static int bmac_remove(struct macio_dev *mdev)
1606{ 1606{
1607 struct net_device *dev = macio_get_drvdata(mdev); 1607 struct net_device *dev = macio_get_drvdata(mdev);
1608 struct bmac_data *bp = netdev_priv(dev); 1608 struct bmac_data *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index e1df4b76c885..842fe7684904 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -106,7 +106,7 @@ static const struct net_device_ops mace_netdev_ops = {
106 .ndo_validate_addr = eth_validate_addr, 106 .ndo_validate_addr = eth_validate_addr,
107}; 107};
108 108
109static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match) 109static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
110{ 110{
111 struct device_node *mace = macio_get_of_node(mdev); 111 struct device_node *mace = macio_get_of_node(mdev);
112 struct net_device *dev; 112 struct net_device *dev;
@@ -271,7 +271,7 @@ static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_i
271 return rc; 271 return rc;
272} 272}
273 273
274static int __devexit mace_remove(struct macio_dev *mdev) 274static int mace_remove(struct macio_dev *mdev)
275{ 275{
276 struct net_device *dev = macio_get_drvdata(mdev); 276 struct net_device *dev = macio_get_drvdata(mdev);
277 struct mace_data *mp; 277 struct mace_data *mp;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index a92ddee7f665..a206779c68cf 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -195,7 +195,7 @@ static const struct net_device_ops mace_netdev_ops = {
195 * model of Macintrash has a MACE (AV macintoshes) 195 * model of Macintrash has a MACE (AV macintoshes)
196 */ 196 */
197 197
198static int __devinit mace_probe(struct platform_device *pdev) 198static int mace_probe(struct platform_device *pdev)
199{ 199{
200 int j; 200 int j;
201 struct mace_data *mp; 201 struct mace_data *mp;
@@ -746,7 +746,7 @@ MODULE_LICENSE("GPL");
746MODULE_DESCRIPTION("Macintosh MACE ethernet driver"); 746MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
747MODULE_ALIAS("platform:macmace"); 747MODULE_ALIAS("platform:macmace");
748 748
749static int __devexit mac_mace_device_remove (struct platform_device *pdev) 749static int mac_mace_device_remove(struct platform_device *pdev)
750{ 750{
751 struct net_device *dev = platform_get_drvdata(pdev); 751 struct net_device *dev = platform_get_drvdata(pdev);
752 struct mace_data *mp = netdev_priv(dev); 752 struct mace_data *mp = netdev_priv(dev);
@@ -768,7 +768,7 @@ static int __devexit mac_mace_device_remove (struct platform_device *pdev)
768 768
769static struct platform_driver mac_mace_driver = { 769static struct platform_driver mac_mace_driver = {
770 .probe = mace_probe, 770 .probe = mace_probe,
771 .remove = __devexit_p(mac_mace_device_remove), 771 .remove = mac_mace_device_remove,
772 .driver = { 772 .driver = {
773 .name = mac_mace_string, 773 .name = mac_mace_string,
774 .owner = THIS_MODULE, 774 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index d19f82f7597a..56d3f697e0c7 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -643,7 +643,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
643 * @adapter: board private structure to initialize 643 * @adapter: board private structure to initialize
644 * 644 *
645 */ 645 */
646static int __devinit atl1c_alloc_queues(struct atl1c_adapter *adapter) 646static int atl1c_alloc_queues(struct atl1c_adapter *adapter)
647{ 647{
648 return 0; 648 return 0;
649} 649}
@@ -702,7 +702,7 @@ struct atl1c_platform_patch {
702 u32 patch_flag; 702 u32 patch_flag;
703#define ATL1C_LINK_PATCH 0x1 703#define ATL1C_LINK_PATCH 0x1
704}; 704};
705static const struct atl1c_platform_patch plats[] __devinitconst = { 705static const struct atl1c_platform_patch plats[] = {
706{0x2060, 0xC1, 0x1019, 0x8152, 0x1}, 706{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
707{0x2060, 0xC1, 0x1019, 0x2060, 0x1}, 707{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
708{0x2060, 0xC1, 0x1019, 0xE000, 0x1}, 708{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
@@ -725,7 +725,7 @@ static const struct atl1c_platform_patch plats[] __devinitconst = {
725{0}, 725{0},
726}; 726};
727 727
728static void __devinit atl1c_patch_assign(struct atl1c_hw *hw) 728static void atl1c_patch_assign(struct atl1c_hw *hw)
729{ 729{
730 struct pci_dev *pdev = hw->adapter->pdev; 730 struct pci_dev *pdev = hw->adapter->pdev;
731 u32 misc_ctrl; 731 u32 misc_ctrl;
@@ -764,7 +764,7 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
764 * Fields are initialized based on PCI device information and 764 * Fields are initialized based on PCI device information and
765 * OS network device settings (MTU size). 765 * OS network device settings (MTU size).
766 */ 766 */
767static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter) 767static int atl1c_sw_init(struct atl1c_adapter *adapter)
768{ 768{
769 struct atl1c_hw *hw = &adapter->hw; 769 struct atl1c_hw *hw = &adapter->hw;
770 struct pci_dev *pdev = adapter->pdev; 770 struct pci_dev *pdev = adapter->pdev;
@@ -2442,8 +2442,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2442 * The OS initialization, configuring of the adapter private structure, 2442 * The OS initialization, configuring of the adapter private structure,
2443 * and a hardware reset occur. 2443 * and a hardware reset occur.
2444 */ 2444 */
2445static int __devinit atl1c_probe(struct pci_dev *pdev, 2445static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2446 const struct pci_device_id *ent)
2447{ 2446{
2448 struct net_device *netdev; 2447 struct net_device *netdev;
2449 struct atl1c_adapter *adapter; 2448 struct atl1c_adapter *adapter;
@@ -2587,7 +2586,7 @@ err_dma:
2587 * Hot-Plug event, or because the driver is going to be removed from 2586 * Hot-Plug event, or because the driver is going to be removed from
2588 * memory. 2587 * memory.
2589 */ 2588 */
2590static void __devexit atl1c_remove(struct pci_dev *pdev) 2589static void atl1c_remove(struct pci_dev *pdev)
2591{ 2590{
2592 struct net_device *netdev = pci_get_drvdata(pdev); 2591 struct net_device *netdev = pci_get_drvdata(pdev);
2593 struct atl1c_adapter *adapter = netdev_priv(netdev); 2592 struct atl1c_adapter *adapter = netdev_priv(netdev);
@@ -2697,7 +2696,7 @@ static struct pci_driver atl1c_driver = {
2697 .name = atl1c_driver_name, 2696 .name = atl1c_driver_name,
2698 .id_table = atl1c_pci_tbl, 2697 .id_table = atl1c_pci_tbl,
2699 .probe = atl1c_probe, 2698 .probe = atl1c_probe,
2700 .remove = __devexit_p(atl1c_remove), 2699 .remove = atl1c_remove,
2701 .shutdown = atl1c_shutdown, 2700 .shutdown = atl1c_shutdown,
2702 .err_handler = &atl1c_err_handler, 2701 .err_handler = &atl1c_err_handler,
2703 .driver.pm = &atl1c_pm_ops, 2702 .driver.pm = &atl1c_pm_ops,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e213da29e73d..e4466a36d106 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -534,7 +534,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev)
534 * @adapter: board private structure to initialize 534 * @adapter: board private structure to initialize
535 * 535 *
536 */ 536 */
537static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter) 537static int atl1e_alloc_queues(struct atl1e_adapter *adapter)
538{ 538{
539 return 0; 539 return 0;
540} 540}
@@ -547,7 +547,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
547 * Fields are initialized based on PCI device information and 547 * Fields are initialized based on PCI device information and
548 * OS network device settings (MTU size). 548 * OS network device settings (MTU size).
549 */ 549 */
550static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter) 550static int atl1e_sw_init(struct atl1e_adapter *adapter)
551{ 551{
552 struct atl1e_hw *hw = &adapter->hw; 552 struct atl1e_hw *hw = &adapter->hw;
553 struct pci_dev *pdev = adapter->pdev; 553 struct pci_dev *pdev = adapter->pdev;
@@ -2235,8 +2235,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2235 * The OS initialization, configuring of the adapter private structure, 2235 * The OS initialization, configuring of the adapter private structure,
2236 * and a hardware reset occur. 2236 * and a hardware reset occur.
2237 */ 2237 */
2238static int __devinit atl1e_probe(struct pci_dev *pdev, 2238static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2239 const struct pci_device_id *ent)
2240{ 2239{
2241 struct net_device *netdev; 2240 struct net_device *netdev;
2242 struct atl1e_adapter *adapter = NULL; 2241 struct atl1e_adapter *adapter = NULL;
@@ -2387,7 +2386,7 @@ err_dma:
2387 * Hot-Plug event, or because the driver is going to be removed from 2386 * Hot-Plug event, or because the driver is going to be removed from
2388 * memory. 2387 * memory.
2389 */ 2388 */
2390static void __devexit atl1e_remove(struct pci_dev *pdev) 2389static void atl1e_remove(struct pci_dev *pdev)
2391{ 2390{
2392 struct net_device *netdev = pci_get_drvdata(pdev); 2391 struct net_device *netdev = pci_get_drvdata(pdev);
2393 struct atl1e_adapter *adapter = netdev_priv(netdev); 2392 struct atl1e_adapter *adapter = netdev_priv(netdev);
@@ -2499,7 +2498,7 @@ static struct pci_driver atl1e_driver = {
2499 .name = atl1e_driver_name, 2498 .name = atl1e_driver_name,
2500 .id_table = atl1e_pci_tbl, 2499 .id_table = atl1e_pci_tbl,
2501 .probe = atl1e_probe, 2500 .probe = atl1e_probe,
2502 .remove = __devexit_p(atl1e_remove), 2501 .remove = atl1e_remove,
2503 /* Power Management Hooks */ 2502 /* Power Management Hooks */
2504#ifdef CONFIG_PM 2503#ifdef CONFIG_PM
2505 .suspend = atl1e_suspend, 2504 .suspend = atl1e_suspend,
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index b5086f1e637f..fa314282c9ad 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -40,7 +40,7 @@
40#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET } 40#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET }
41 41
42#define ATL1E_PARAM(x, desc) \ 42#define ATL1E_PARAM(x, desc) \
43 static int __devinitdata x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \ 43 static int x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \
44 static unsigned int num_##x; \ 44 static unsigned int num_##x; \
45 module_param_array_named(x, x, int, &num_##x, 0); \ 45 module_param_array_named(x, x, int, &num_##x, 0); \
46 MODULE_PARM_DESC(x, desc); 46 MODULE_PARM_DESC(x, desc);
@@ -116,7 +116,8 @@ struct atl1e_option {
116 } arg; 116 } arg;
117}; 117};
118 118
119static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter) 119static int atl1e_validate_option(int *value, struct atl1e_option *opt,
120 struct atl1e_adapter *adapter)
120{ 121{
121 if (*value == OPTION_UNSET) { 122 if (*value == OPTION_UNSET) {
122 *value = opt->def; 123 *value = opt->def;
@@ -177,7 +178,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
177 * value exists, a default value is used. The final value is stored 178 * value exists, a default value is used. The final value is stored
178 * in a variable in the adapter structure. 179 * in a variable in the adapter structure.
179 */ 180 */
180void __devinit atl1e_check_options(struct atl1e_adapter *adapter) 181void atl1e_check_options(struct atl1e_adapter *adapter)
181{ 182{
182 int bd = adapter->bd_number; 183 int bd = adapter->bd_number;
183 184
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 7bae2ad7a7c0..71b3d7daa21d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -113,7 +113,7 @@ static const struct ethtool_ops atl1_ethtool_ops;
113 * 113 *
114 * Default Value: 100 (200us) 114 * Default Value: 100 (200us)
115 */ 115 */
116static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; 116static int int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
117static unsigned int num_int_mod_timer; 117static unsigned int num_int_mod_timer;
118module_param_array_named(int_mod_timer, int_mod_timer, int, 118module_param_array_named(int_mod_timer, int_mod_timer, int,
119 &num_int_mod_timer, 0); 119 &num_int_mod_timer, 0);
@@ -143,8 +143,8 @@ struct atl1_option {
143 } arg; 143 } arg;
144}; 144};
145 145
146static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, 146static int atl1_validate_option(int *value, struct atl1_option *opt,
147 struct pci_dev *pdev) 147 struct pci_dev *pdev)
148{ 148{
149 if (*value == OPTION_UNSET) { 149 if (*value == OPTION_UNSET) {
150 *value = opt->def; 150 *value = opt->def;
@@ -204,7 +204,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
204 * value exists, a default value is used. The final value is stored 204 * value exists, a default value is used. The final value is stored
205 * in a variable in the adapter structure. 205 * in a variable in the adapter structure.
206 */ 206 */
207static void __devinit atl1_check_options(struct atl1_adapter *adapter) 207static void atl1_check_options(struct atl1_adapter *adapter)
208{ 208{
209 struct pci_dev *pdev = adapter->pdev; 209 struct pci_dev *pdev = adapter->pdev;
210 int bd = adapter->bd_number; 210 int bd = adapter->bd_number;
@@ -945,7 +945,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw)
945 * Fields are initialized based on PCI device information and 945 * Fields are initialized based on PCI device information and
946 * OS network device settings (MTU size). 946 * OS network device settings (MTU size).
947 */ 947 */
948static int __devinit atl1_sw_init(struct atl1_adapter *adapter) 948static int atl1_sw_init(struct atl1_adapter *adapter)
949{ 949{
950 struct atl1_hw *hw = &adapter->hw; 950 struct atl1_hw *hw = &adapter->hw;
951 struct net_device *netdev = adapter->netdev; 951 struct net_device *netdev = adapter->netdev;
@@ -2934,8 +2934,7 @@ static const struct net_device_ops atl1_netdev_ops = {
2934 * The OS initialization, configuring of the adapter private structure, 2934 * The OS initialization, configuring of the adapter private structure,
2935 * and a hardware reset occur. 2935 * and a hardware reset occur.
2936 */ 2936 */
2937static int __devinit atl1_probe(struct pci_dev *pdev, 2937static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2938 const struct pci_device_id *ent)
2939{ 2938{
2940 struct net_device *netdev; 2939 struct net_device *netdev;
2941 struct atl1_adapter *adapter; 2940 struct atl1_adapter *adapter;
@@ -3113,7 +3112,7 @@ err_request_regions:
3113 * Hot-Plug event, or because the driver is going to be removed from 3112 * Hot-Plug event, or because the driver is going to be removed from
3114 * memory. 3113 * memory.
3115 */ 3114 */
3116static void __devexit atl1_remove(struct pci_dev *pdev) 3115static void atl1_remove(struct pci_dev *pdev)
3117{ 3116{
3118 struct net_device *netdev = pci_get_drvdata(pdev); 3117 struct net_device *netdev = pci_get_drvdata(pdev);
3119 struct atl1_adapter *adapter; 3118 struct atl1_adapter *adapter;
@@ -3146,7 +3145,7 @@ static struct pci_driver atl1_driver = {
3146 .name = ATLX_DRIVER_NAME, 3145 .name = ATLX_DRIVER_NAME,
3147 .id_table = atl1_pci_tbl, 3146 .id_table = atl1_pci_tbl,
3148 .probe = atl1_probe, 3147 .probe = atl1_probe,
3149 .remove = __devexit_p(atl1_remove), 3148 .remove = atl1_remove,
3150 .shutdown = atl1_shutdown, 3149 .shutdown = atl1_shutdown,
3151 .driver.pm = ATL1_PM_OPS, 3150 .driver.pm = ATL1_PM_OPS,
3152}; 3151};
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 623dd8635c46..aab83a2d4e07 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -83,7 +83,7 @@ static void atl2_check_options(struct atl2_adapter *adapter);
83 * Fields are initialized based on PCI device information and 83 * Fields are initialized based on PCI device information and
84 * OS network device settings (MTU size). 84 * OS network device settings (MTU size).
85 */ 85 */
86static int __devinit atl2_sw_init(struct atl2_adapter *adapter) 86static int atl2_sw_init(struct atl2_adapter *adapter)
87{ 87{
88 struct atl2_hw *hw = &adapter->hw; 88 struct atl2_hw *hw = &adapter->hw;
89 struct pci_dev *pdev = adapter->pdev; 89 struct pci_dev *pdev = adapter->pdev;
@@ -1338,8 +1338,7 @@ static const struct net_device_ops atl2_netdev_ops = {
1338 * The OS initialization, configuring of the adapter private structure, 1338 * The OS initialization, configuring of the adapter private structure,
1339 * and a hardware reset occur. 1339 * and a hardware reset occur.
1340 */ 1340 */
1341static int __devinit atl2_probe(struct pci_dev *pdev, 1341static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1342 const struct pci_device_id *ent)
1343{ 1342{
1344 struct net_device *netdev; 1343 struct net_device *netdev;
1345 struct atl2_adapter *adapter; 1344 struct atl2_adapter *adapter;
@@ -1498,7 +1497,7 @@ err_dma:
1498 */ 1497 */
1499/* FIXME: write the original MAC address back in case it was changed from a 1498/* FIXME: write the original MAC address back in case it was changed from a
1500 * BIOS-set value, as in atl1 -- CHS */ 1499 * BIOS-set value, as in atl1 -- CHS */
1501static void __devexit atl2_remove(struct pci_dev *pdev) 1500static void atl2_remove(struct pci_dev *pdev)
1502{ 1501{
1503 struct net_device *netdev = pci_get_drvdata(pdev); 1502 struct net_device *netdev = pci_get_drvdata(pdev);
1504 struct atl2_adapter *adapter = netdev_priv(netdev); 1503 struct atl2_adapter *adapter = netdev_priv(netdev);
@@ -1705,7 +1704,7 @@ static struct pci_driver atl2_driver = {
1705 .name = atl2_driver_name, 1704 .name = atl2_driver_name,
1706 .id_table = atl2_pci_tbl, 1705 .id_table = atl2_pci_tbl,
1707 .probe = atl2_probe, 1706 .probe = atl2_probe,
1708 .remove = __devexit_p(atl2_remove), 1707 .remove = atl2_remove,
1709 /* Power Management Hooks */ 1708 /* Power Management Hooks */
1710 .suspend = atl2_suspend, 1709 .suspend = atl2_suspend,
1711#ifdef CONFIG_PM 1710#ifdef CONFIG_PM
@@ -2845,12 +2844,12 @@ static void atl2_force_ps(struct atl2_hw *hw)
2845 */ 2844 */
2846 2845
2847#define ATL2_PARAM(X, desc) \ 2846#define ATL2_PARAM(X, desc) \
2848 static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \ 2847 static const int X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
2849 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \ 2848 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
2850 MODULE_PARM_DESC(X, desc); 2849 MODULE_PARM_DESC(X, desc);
2851#else 2850#else
2852#define ATL2_PARAM(X, desc) \ 2851#define ATL2_PARAM(X, desc) \
2853 static int __devinitdata X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \ 2852 static int X[ATL2_MAX_NIC+1] = ATL2_PARAM_INIT; \
2854 static unsigned int num_##X; \ 2853 static unsigned int num_##X; \
2855 module_param_array_named(X, X, int, &num_##X, 0); \ 2854 module_param_array_named(X, X, int, &num_##X, 0); \
2856 MODULE_PARM_DESC(X, desc); 2855 MODULE_PARM_DESC(X, desc);
@@ -2934,7 +2933,7 @@ struct atl2_option {
2934 } arg; 2933 } arg;
2935}; 2934};
2936 2935
2937static int __devinit atl2_validate_option(int *value, struct atl2_option *opt) 2936static int atl2_validate_option(int *value, struct atl2_option *opt)
2938{ 2937{
2939 int i; 2938 int i;
2940 struct atl2_opt_list *ent; 2939 struct atl2_opt_list *ent;
@@ -2992,7 +2991,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
2992 * value exists, a default value is used. The final value is stored 2991 * value exists, a default value is used. The final value is stored
2993 * in a variable in the adapter structure. 2992 * in a variable in the adapter structure.
2994 */ 2993 */
2995static void __devinit atl2_check_options(struct atl2_adapter *adapter) 2994static void atl2_check_options(struct atl2_adapter *adapter)
2996{ 2995{
2997 int val; 2996 int val;
2998 struct atl2_option opt; 2997 struct atl2_option opt;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 4bd416b72e65..f55267363f35 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -102,6 +102,7 @@ config TIGON3
102 depends on PCI 102 depends on PCI
103 select PHYLIB 103 select PHYLIB
104 select HWMON 104 select HWMON
105 select PTP_1588_CLOCK
105 ---help--- 106 ---help---
106 This driver supports Broadcom Tigon3 based gigabit Ethernet cards. 107 This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
107 108
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 9786c0e9890e..219f6226fcb1 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2083,7 +2083,7 @@ out:
2083 return err; 2083 return err;
2084} 2084}
2085 2085
2086static int __devinit b44_get_invariants(struct b44 *bp) 2086static int b44_get_invariants(struct b44 *bp)
2087{ 2087{
2088 struct ssb_device *sdev = bp->sdev; 2088 struct ssb_device *sdev = bp->sdev;
2089 int err = 0; 2089 int err = 0;
@@ -2141,8 +2141,8 @@ static const struct net_device_ops b44_netdev_ops = {
2141#endif 2141#endif
2142}; 2142};
2143 2143
2144static int __devinit b44_init_one(struct ssb_device *sdev, 2144static int b44_init_one(struct ssb_device *sdev,
2145 const struct ssb_device_id *ent) 2145 const struct ssb_device_id *ent)
2146{ 2146{
2147 struct net_device *dev; 2147 struct net_device *dev;
2148 struct b44 *bp; 2148 struct b44 *bp;
@@ -2249,7 +2249,7 @@ out:
2249 return err; 2249 return err;
2250} 2250}
2251 2251
2252static void __devexit b44_remove_one(struct ssb_device *sdev) 2252static void b44_remove_one(struct ssb_device *sdev)
2253{ 2253{
2254 struct net_device *dev = ssb_get_drvdata(sdev); 2254 struct net_device *dev = ssb_get_drvdata(sdev);
2255 2255
@@ -2340,7 +2340,7 @@ static struct ssb_driver b44_ssb_driver = {
2340 .name = DRV_MODULE_NAME, 2340 .name = DRV_MODULE_NAME,
2341 .id_table = b44_ssb_tbl, 2341 .id_table = b44_ssb_tbl,
2342 .probe = b44_init_one, 2342 .probe = b44_init_one,
2343 .remove = __devexit_p(b44_remove_one), 2343 .remove = b44_remove_one,
2344 .suspend = b44_suspend, 2344 .suspend = b44_suspend,
2345 .resume = b44_resume, 2345 .resume = b44_resume,
2346}; 2346};
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index c7ca7ec065ee..39387d67b722 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1612,7 +1612,7 @@ static const struct net_device_ops bcm_enet_ops = {
1612/* 1612/*
1613 * allocate netdevice, request register memory and register device. 1613 * allocate netdevice, request register memory and register device.
1614 */ 1614 */
1615static int __devinit bcm_enet_probe(struct platform_device *pdev) 1615static int bcm_enet_probe(struct platform_device *pdev)
1616{ 1616{
1617 struct bcm_enet_priv *priv; 1617 struct bcm_enet_priv *priv;
1618 struct net_device *dev; 1618 struct net_device *dev;
@@ -1830,7 +1830,7 @@ out:
1830/* 1830/*
1831 * exit func, stops hardware and unregisters netdevice 1831 * exit func, stops hardware and unregisters netdevice
1832 */ 1832 */
1833static int __devexit bcm_enet_remove(struct platform_device *pdev) 1833static int bcm_enet_remove(struct platform_device *pdev)
1834{ 1834{
1835 struct bcm_enet_priv *priv; 1835 struct bcm_enet_priv *priv;
1836 struct net_device *dev; 1836 struct net_device *dev;
@@ -1877,7 +1877,7 @@ static int __devexit bcm_enet_remove(struct platform_device *pdev)
1877 1877
1878struct platform_driver bcm63xx_enet_driver = { 1878struct platform_driver bcm63xx_enet_driver = {
1879 .probe = bcm_enet_probe, 1879 .probe = bcm_enet_probe,
1880 .remove = __devexit_p(bcm_enet_remove), 1880 .remove = bcm_enet_remove,
1881 .driver = { 1881 .driver = {
1882 .name = "bcm63xx_enet", 1882 .name = "bcm63xx_enet",
1883 .owner = THIS_MODULE, 1883 .owner = THIS_MODULE,
@@ -1887,7 +1887,7 @@ struct platform_driver bcm63xx_enet_driver = {
1887/* 1887/*
1888 * reserve & remap memory space shared between all macs 1888 * reserve & remap memory space shared between all macs
1889 */ 1889 */
1890static int __devinit bcm_enet_shared_probe(struct platform_device *pdev) 1890static int bcm_enet_shared_probe(struct platform_device *pdev)
1891{ 1891{
1892 struct resource *res; 1892 struct resource *res;
1893 unsigned int iomem_size; 1893 unsigned int iomem_size;
@@ -1908,7 +1908,7 @@ static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
1908 return 0; 1908 return 0;
1909} 1909}
1910 1910
1911static int __devexit bcm_enet_shared_remove(struct platform_device *pdev) 1911static int bcm_enet_shared_remove(struct platform_device *pdev)
1912{ 1912{
1913 struct resource *res; 1913 struct resource *res;
1914 1914
@@ -1924,7 +1924,7 @@ static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
1924 */ 1924 */
1925struct platform_driver bcm63xx_enet_shared_driver = { 1925struct platform_driver bcm63xx_enet_shared_driver = {
1926 .probe = bcm_enet_shared_probe, 1926 .probe = bcm_enet_shared_probe,
1927 .remove = __devexit_p(bcm_enet_shared_remove), 1927 .remove = bcm_enet_shared_remove,
1928 .driver = { 1928 .driver = {
1929 .name = "bcm63xx_enet_shared", 1929 .name = "bcm63xx_enet_shared",
1930 .owner = THIS_MODULE, 1930 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index d4310700c7a7..a1adfaf87f49 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -71,7 +71,7 @@
71/* Time in jiffies before concluding the transmitter is hung. */ 71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT (5*HZ) 72#define TX_TIMEOUT (5*HZ)
73 73
74static char version[] __devinitdata = 74static char version[] =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 76
77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>"); 77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
@@ -106,7 +106,7 @@ typedef enum {
106/* indexed by board_t, above */ 106/* indexed by board_t, above */
107static struct { 107static struct {
108 char *name; 108 char *name;
109} board_info[] __devinitdata = { 109} board_info[] = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" }, 110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" }, 111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" }, 112 { "HP NC370i Multifunction Gigabit Server Adapter" },
@@ -260,10 +260,10 @@ static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
260 * needs to be skipped. 260 * needs to be skipped.
261 */ 261 */
262 diff = txr->tx_prod - txr->tx_cons; 262 diff = txr->tx_prod - txr->tx_cons;
263 if (unlikely(diff >= TX_DESC_CNT)) { 263 if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
264 diff &= 0xffff; 264 diff &= 0xffff;
265 if (diff == TX_DESC_CNT) 265 if (diff == BNX2_TX_DESC_CNT)
266 diff = MAX_TX_DESC_CNT; 266 diff = BNX2_MAX_TX_DESC_CNT;
267 } 267 }
268 return bp->tx_ring_size - diff; 268 return bp->tx_ring_size - diff;
269} 269}
@@ -274,8 +274,8 @@ bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
274 u32 val; 274 u32 val;
275 275
276 spin_lock_bh(&bp->indirect_lock); 276 spin_lock_bh(&bp->indirect_lock);
277 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); 277 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW); 278 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
279 spin_unlock_bh(&bp->indirect_lock); 279 spin_unlock_bh(&bp->indirect_lock);
280 return val; 280 return val;
281} 281}
@@ -284,8 +284,8 @@ static void
284bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val) 284bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285{ 285{
286 spin_lock_bh(&bp->indirect_lock); 286 spin_lock_bh(&bp->indirect_lock);
287 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset); 287 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val); 288 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289 spin_unlock_bh(&bp->indirect_lock); 289 spin_unlock_bh(&bp->indirect_lock);
290} 290}
291 291
@@ -306,21 +306,21 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306{ 306{
307 offset += cid_addr; 307 offset += cid_addr;
308 spin_lock_bh(&bp->indirect_lock); 308 spin_lock_bh(&bp->indirect_lock);
309 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 309 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
310 int i; 310 int i;
311 311
312 REG_WR(bp, BNX2_CTX_CTX_DATA, val); 312 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
313 REG_WR(bp, BNX2_CTX_CTX_CTRL, 313 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
314 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ); 314 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315 for (i = 0; i < 5; i++) { 315 for (i = 0; i < 5; i++) {
316 val = REG_RD(bp, BNX2_CTX_CTX_CTRL); 316 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
317 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0) 317 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318 break; 318 break;
319 udelay(5); 319 udelay(5);
320 } 320 }
321 } else { 321 } else {
322 REG_WR(bp, BNX2_CTX_DATA_ADR, offset); 322 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
323 REG_WR(bp, BNX2_CTX_DATA, val); 323 BNX2_WR(bp, BNX2_CTX_DATA, val);
324 } 324 }
325 spin_unlock_bh(&bp->indirect_lock); 325 spin_unlock_bh(&bp->indirect_lock);
326} 326}
@@ -434,7 +434,6 @@ struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
434 434
435 return cp; 435 return cp;
436} 436}
437EXPORT_SYMBOL(bnx2_cnic_probe);
438 437
439static void 438static void
440bnx2_cnic_stop(struct bnx2 *bp) 439bnx2_cnic_stop(struct bnx2 *bp)
@@ -494,11 +493,11 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
494 int i, ret; 493 int i, ret;
495 494
496 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { 495 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
497 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); 496 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
498 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; 497 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
499 498
500 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); 499 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
501 REG_RD(bp, BNX2_EMAC_MDIO_MODE); 500 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
502 501
503 udelay(40); 502 udelay(40);
504 } 503 }
@@ -506,16 +505,16 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
506 val1 = (bp->phy_addr << 21) | (reg << 16) | 505 val1 = (bp->phy_addr << 21) | (reg << 16) |
507 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT | 506 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
508 BNX2_EMAC_MDIO_COMM_START_BUSY; 507 BNX2_EMAC_MDIO_COMM_START_BUSY;
509 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); 508 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
510 509
511 for (i = 0; i < 50; i++) { 510 for (i = 0; i < 50; i++) {
512 udelay(10); 511 udelay(10);
513 512
514 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); 513 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
515 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { 514 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
516 udelay(5); 515 udelay(5);
517 516
518 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); 517 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
519 val1 &= BNX2_EMAC_MDIO_COMM_DATA; 518 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
520 519
521 break; 520 break;
@@ -532,11 +531,11 @@ bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
532 } 531 }
533 532
534 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { 533 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
535 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); 534 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
536 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; 535 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
537 536
538 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); 537 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
539 REG_RD(bp, BNX2_EMAC_MDIO_MODE); 538 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
540 539
541 udelay(40); 540 udelay(40);
542 } 541 }
@@ -551,11 +550,11 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
551 int i, ret; 550 int i, ret;
552 551
553 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { 552 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
554 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); 553 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
555 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL; 554 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
556 555
557 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); 556 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
558 REG_RD(bp, BNX2_EMAC_MDIO_MODE); 557 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
559 558
560 udelay(40); 559 udelay(40);
561 } 560 }
@@ -563,12 +562,12 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
563 val1 = (bp->phy_addr << 21) | (reg << 16) | val | 562 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
564 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE | 563 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
565 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT; 564 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
566 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1); 565 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
567 566
568 for (i = 0; i < 50; i++) { 567 for (i = 0; i < 50; i++) {
569 udelay(10); 568 udelay(10);
570 569
571 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM); 570 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
572 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) { 571 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
573 udelay(5); 572 udelay(5);
574 break; 573 break;
@@ -581,11 +580,11 @@ bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
581 ret = 0; 580 ret = 0;
582 581
583 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) { 582 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
584 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE); 583 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
585 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL; 584 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
586 585
587 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1); 586 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
588 REG_RD(bp, BNX2_EMAC_MDIO_MODE); 587 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
589 588
590 udelay(40); 589 udelay(40);
591 } 590 }
@@ -601,10 +600,10 @@ bnx2_disable_int(struct bnx2 *bp)
601 600
602 for (i = 0; i < bp->irq_nvecs; i++) { 601 for (i = 0; i < bp->irq_nvecs; i++) {
603 bnapi = &bp->bnx2_napi[i]; 602 bnapi = &bp->bnx2_napi[i];
604 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 603 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
605 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 604 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
606 } 605 }
607 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); 606 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
608} 607}
609 608
610static void 609static void
@@ -616,16 +615,16 @@ bnx2_enable_int(struct bnx2 *bp)
616 for (i = 0; i < bp->irq_nvecs; i++) { 615 for (i = 0; i < bp->irq_nvecs; i++) {
617 bnapi = &bp->bnx2_napi[i]; 616 bnapi = &bp->bnx2_napi[i];
618 617
619 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 618 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
620 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 619 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
621 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | 620 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
622 bnapi->last_status_idx); 621 bnapi->last_status_idx);
623 622
624 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 623 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
625 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 624 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
626 bnapi->last_status_idx); 625 bnapi->last_status_idx);
627 } 626 }
628 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); 627 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
629} 628}
630 629
631static void 630static void
@@ -824,7 +823,7 @@ bnx2_free_mem(struct bnx2 *bp)
824 823
825 for (i = 0; i < bp->ctx_pages; i++) { 824 for (i = 0; i < bp->ctx_pages; i++) {
826 if (bp->ctx_blk[i]) { 825 if (bp->ctx_blk[i]) {
827 dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE, 826 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
828 bp->ctx_blk[i], 827 bp->ctx_blk[i],
829 bp->ctx_blk_mapping[i]); 828 bp->ctx_blk_mapping[i]);
830 bp->ctx_blk[i] = NULL; 829 bp->ctx_blk[i] = NULL;
@@ -887,13 +886,13 @@ bnx2_alloc_mem(struct bnx2 *bp)
887 886
888 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size; 887 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
889 888
890 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 889 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
891 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE; 890 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
892 if (bp->ctx_pages == 0) 891 if (bp->ctx_pages == 0)
893 bp->ctx_pages = 1; 892 bp->ctx_pages = 1;
894 for (i = 0; i < bp->ctx_pages; i++) { 893 for (i = 0; i < bp->ctx_pages; i++) {
895 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev, 894 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
896 BCM_PAGE_SIZE, 895 BNX2_PAGE_SIZE,
897 &bp->ctx_blk_mapping[i], 896 &bp->ctx_blk_mapping[i],
898 GFP_KERNEL); 897 GFP_KERNEL);
899 if (bp->ctx_blk[i] == NULL) 898 if (bp->ctx_blk[i] == NULL)
@@ -1034,7 +1033,7 @@ bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1034 } 1033 }
1035 1034
1036 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && 1035 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1037 (CHIP_NUM(bp) == CHIP_NUM_5708)) { 1036 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1038 u32 val; 1037 u32 val;
1039 1038
1040 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val); 1039 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
@@ -1294,14 +1293,14 @@ bnx2_set_mac_link(struct bnx2 *bp)
1294{ 1293{
1295 u32 val; 1294 u32 val;
1296 1295
1297 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620); 1296 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1298 if (bp->link_up && (bp->line_speed == SPEED_1000) && 1297 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1299 (bp->duplex == DUPLEX_HALF)) { 1298 (bp->duplex == DUPLEX_HALF)) {
1300 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff); 1299 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1301 } 1300 }
1302 1301
1303 /* Configure the EMAC mode register. */ 1302 /* Configure the EMAC mode register. */
1304 val = REG_RD(bp, BNX2_EMAC_MODE); 1303 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1305 1304
1306 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | 1305 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1307 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | 1306 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
@@ -1310,7 +1309,7 @@ bnx2_set_mac_link(struct bnx2 *bp)
1310 if (bp->link_up) { 1309 if (bp->link_up) {
1311 switch (bp->line_speed) { 1310 switch (bp->line_speed) {
1312 case SPEED_10: 1311 case SPEED_10:
1313 if (CHIP_NUM(bp) != CHIP_NUM_5706) { 1312 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1314 val |= BNX2_EMAC_MODE_PORT_MII_10M; 1313 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1315 break; 1314 break;
1316 } 1315 }
@@ -1333,25 +1332,25 @@ bnx2_set_mac_link(struct bnx2 *bp)
1333 /* Set the MAC to operate in the appropriate duplex mode. */ 1332 /* Set the MAC to operate in the appropriate duplex mode. */
1334 if (bp->duplex == DUPLEX_HALF) 1333 if (bp->duplex == DUPLEX_HALF)
1335 val |= BNX2_EMAC_MODE_HALF_DUPLEX; 1334 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1336 REG_WR(bp, BNX2_EMAC_MODE, val); 1335 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1337 1336
1338 /* Enable/disable rx PAUSE. */ 1337 /* Enable/disable rx PAUSE. */
1339 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN; 1338 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1340 1339
1341 if (bp->flow_ctrl & FLOW_CTRL_RX) 1340 if (bp->flow_ctrl & FLOW_CTRL_RX)
1342 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN; 1341 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1343 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode); 1342 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1344 1343
1345 /* Enable/disable tx PAUSE. */ 1344 /* Enable/disable tx PAUSE. */
1346 val = REG_RD(bp, BNX2_EMAC_TX_MODE); 1345 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1347 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN; 1346 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1348 1347
1349 if (bp->flow_ctrl & FLOW_CTRL_TX) 1348 if (bp->flow_ctrl & FLOW_CTRL_TX)
1350 val |= BNX2_EMAC_TX_MODE_FLOW_EN; 1349 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1351 REG_WR(bp, BNX2_EMAC_TX_MODE, val); 1350 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1352 1351
1353 /* Acknowledge the interrupt. */ 1352 /* Acknowledge the interrupt. */
1354 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE); 1353 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1355 1354
1356 bnx2_init_all_rx_contexts(bp); 1355 bnx2_init_all_rx_contexts(bp);
1357} 1356}
@@ -1360,7 +1359,7 @@ static void
1360bnx2_enable_bmsr1(struct bnx2 *bp) 1359bnx2_enable_bmsr1(struct bnx2 *bp)
1361{ 1360{
1362 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && 1361 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1363 (CHIP_NUM(bp) == CHIP_NUM_5709)) 1362 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1364 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1363 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1365 MII_BNX2_BLK_ADDR_GP_STATUS); 1364 MII_BNX2_BLK_ADDR_GP_STATUS);
1366} 1365}
@@ -1369,7 +1368,7 @@ static void
1369bnx2_disable_bmsr1(struct bnx2 *bp) 1368bnx2_disable_bmsr1(struct bnx2 *bp)
1370{ 1369{
1371 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && 1370 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1372 (CHIP_NUM(bp) == CHIP_NUM_5709)) 1371 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1373 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1372 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1374 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1373 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1375} 1374}
@@ -1386,7 +1385,7 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp)
1386 if (bp->autoneg & AUTONEG_SPEED) 1385 if (bp->autoneg & AUTONEG_SPEED)
1387 bp->advertising |= ADVERTISED_2500baseX_Full; 1386 bp->advertising |= ADVERTISED_2500baseX_Full;
1388 1387
1389 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1388 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1390 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); 1389 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1391 1390
1392 bnx2_read_phy(bp, bp->mii_up1, &up1); 1391 bnx2_read_phy(bp, bp->mii_up1, &up1);
@@ -1396,7 +1395,7 @@ bnx2_test_and_enable_2g5(struct bnx2 *bp)
1396 ret = 0; 1395 ret = 0;
1397 } 1396 }
1398 1397
1399 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1398 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1400 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1399 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1401 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1400 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1402 1401
@@ -1412,7 +1411,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
1412 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) 1411 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1413 return 0; 1412 return 0;
1414 1413
1415 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1414 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1416 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G); 1415 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1417 1416
1418 bnx2_read_phy(bp, bp->mii_up1, &up1); 1417 bnx2_read_phy(bp, bp->mii_up1, &up1);
@@ -1422,7 +1421,7 @@ bnx2_test_and_disable_2g5(struct bnx2 *bp)
1422 ret = 1; 1421 ret = 1;
1423 } 1422 }
1424 1423
1425 if (CHIP_NUM(bp) == CHIP_NUM_5709) 1424 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1426 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1425 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1427 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1426 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1428 1427
@@ -1438,7 +1437,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1438 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) 1437 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1439 return; 1438 return;
1440 1439
1441 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 1440 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1442 u32 val; 1441 u32 val;
1443 1442
1444 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
@@ -1454,7 +1453,7 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1454 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1453 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1455 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1454 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1456 1455
1457 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1456 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1458 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1457 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1459 if (!err) 1458 if (!err)
1460 bmcr |= BCM5708S_BMCR_FORCE_2500; 1459 bmcr |= BCM5708S_BMCR_FORCE_2500;
@@ -1482,7 +1481,7 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1482 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)) 1481 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1483 return; 1482 return;
1484 1483
1485 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 1484 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1486 u32 val; 1485 u32 val;
1487 1486
1488 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, 1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
@@ -1496,7 +1495,7 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1496 MII_BNX2_BLK_ADDR_COMBO_IEEEB0); 1495 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1497 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1496 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1498 1497
1499 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1498 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1500 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1499 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1501 if (!err) 1500 if (!err)
1502 bmcr &= ~BCM5708S_BMCR_FORCE_2500; 1501 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
@@ -1547,14 +1546,14 @@ bnx2_set_link(struct bnx2 *bp)
1547 bnx2_disable_bmsr1(bp); 1546 bnx2_disable_bmsr1(bp);
1548 1547
1549 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) && 1548 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1550 (CHIP_NUM(bp) == CHIP_NUM_5706)) { 1549 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1551 u32 val, an_dbg; 1550 u32 val, an_dbg;
1552 1551
1553 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) { 1552 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1554 bnx2_5706s_force_link_dn(bp, 0); 1553 bnx2_5706s_force_link_dn(bp, 0);
1555 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN; 1554 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1556 } 1555 }
1557 val = REG_RD(bp, BNX2_EMAC_STATUS); 1556 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1558 1557
1559 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG); 1558 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1560 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg); 1559 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
@@ -1571,11 +1570,11 @@ bnx2_set_link(struct bnx2 *bp)
1571 bp->link_up = 1; 1570 bp->link_up = 1;
1572 1571
1573 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 1572 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1574 if (CHIP_NUM(bp) == CHIP_NUM_5706) 1573 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1575 bnx2_5706s_linkup(bp); 1574 bnx2_5706s_linkup(bp);
1576 else if (CHIP_NUM(bp) == CHIP_NUM_5708) 1575 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1577 bnx2_5708s_linkup(bp); 1576 bnx2_5708s_linkup(bp);
1578 else if (CHIP_NUM(bp) == CHIP_NUM_5709) 1577 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1579 bnx2_5709s_linkup(bp); 1578 bnx2_5709s_linkup(bp);
1580 } 1579 }
1581 else { 1580 else {
@@ -1757,7 +1756,7 @@ __acquires(&bp->phy_lock)
1757 new_bmcr = bmcr & ~BMCR_ANENABLE; 1756 new_bmcr = bmcr & ~BMCR_ANENABLE;
1758 new_bmcr |= BMCR_SPEED1000; 1757 new_bmcr |= BMCR_SPEED1000;
1759 1758
1760 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 1759 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1761 if (bp->req_line_speed == SPEED_2500) 1760 if (bp->req_line_speed == SPEED_2500)
1762 bnx2_enable_forced_2g5(bp); 1761 bnx2_enable_forced_2g5(bp);
1763 else if (bp->req_line_speed == SPEED_1000) { 1762 else if (bp->req_line_speed == SPEED_1000) {
@@ -1765,7 +1764,7 @@ __acquires(&bp->phy_lock)
1765 new_bmcr &= ~0x2000; 1764 new_bmcr &= ~0x2000;
1766 } 1765 }
1767 1766
1768 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1767 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1769 if (bp->req_line_speed == SPEED_2500) 1768 if (bp->req_line_speed == SPEED_2500)
1770 new_bmcr |= BCM5708S_BMCR_FORCE_2500; 1769 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1771 else 1770 else
@@ -1942,8 +1941,8 @@ bnx2_send_heart_beat(struct bnx2 *bp)
1942 spin_lock(&bp->indirect_lock); 1941 spin_lock(&bp->indirect_lock);
1943 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK); 1942 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1944 addr = bp->shmem_base + BNX2_DRV_PULSE_MB; 1943 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1945 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr); 1944 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1946 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg); 1945 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1947 spin_unlock(&bp->indirect_lock); 1946 spin_unlock(&bp->indirect_lock);
1948} 1947}
1949 1948
@@ -2230,9 +2229,9 @@ bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2230 bnx2_write_phy(bp, BCM5708S_UP1, val); 2229 bnx2_write_phy(bp, BCM5708S_UP1, val);
2231 } 2230 }
2232 2231
2233 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || 2232 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2234 (CHIP_ID(bp) == CHIP_ID_5708_B0) || 2233 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2235 (CHIP_ID(bp) == CHIP_ID_5708_B1)) { 2234 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2236 /* increase tx signal amplitude */ 2235 /* increase tx signal amplitude */
2237 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, 2236 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2238 BCM5708S_BLK_ADDR_TX_MISC); 2237 BCM5708S_BLK_ADDR_TX_MISC);
@@ -2268,8 +2267,8 @@ bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2268 2267
2269 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT; 2268 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2270 2269
2271 if (CHIP_NUM(bp) == CHIP_NUM_5706) 2270 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2272 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300); 2271 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2273 2272
2274 if (bp->dev->mtu > 1500) { 2273 if (bp->dev->mtu > 1500) {
2275 u32 val; 2274 u32 val;
@@ -2368,7 +2367,7 @@ __acquires(&bp->phy_lock)
2368 bp->mii_adv = MII_ADVERTISE; 2367 bp->mii_adv = MII_ADVERTISE;
2369 bp->mii_lpa = MII_LPA; 2368 bp->mii_lpa = MII_LPA;
2370 2369
2371 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); 2370 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2372 2371
2373 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) 2372 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2374 goto setup_phy; 2373 goto setup_phy;
@@ -2379,11 +2378,11 @@ __acquires(&bp->phy_lock)
2379 bp->phy_id |= val & 0xffff; 2378 bp->phy_id |= val & 0xffff;
2380 2379
2381 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 2380 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2382 if (CHIP_NUM(bp) == CHIP_NUM_5706) 2381 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2383 rc = bnx2_init_5706s_phy(bp, reset_phy); 2382 rc = bnx2_init_5706s_phy(bp, reset_phy);
2384 else if (CHIP_NUM(bp) == CHIP_NUM_5708) 2383 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2385 rc = bnx2_init_5708s_phy(bp, reset_phy); 2384 rc = bnx2_init_5708s_phy(bp, reset_phy);
2386 else if (CHIP_NUM(bp) == CHIP_NUM_5709) 2385 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2387 rc = bnx2_init_5709s_phy(bp, reset_phy); 2386 rc = bnx2_init_5709s_phy(bp, reset_phy);
2388 } 2387 }
2389 else { 2388 else {
@@ -2402,10 +2401,10 @@ bnx2_set_mac_loopback(struct bnx2 *bp)
2402{ 2401{
2403 u32 mac_mode; 2402 u32 mac_mode;
2404 2403
2405 mac_mode = REG_RD(bp, BNX2_EMAC_MODE); 2404 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2406 mac_mode &= ~BNX2_EMAC_MODE_PORT; 2405 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2407 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK; 2406 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2408 REG_WR(bp, BNX2_EMAC_MODE, mac_mode); 2407 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2409 bp->link_up = 1; 2408 bp->link_up = 1;
2410 return 0; 2409 return 0;
2411} 2410}
@@ -2431,13 +2430,13 @@ bnx2_set_phy_loopback(struct bnx2 *bp)
2431 msleep(100); 2430 msleep(100);
2432 } 2431 }
2433 2432
2434 mac_mode = REG_RD(bp, BNX2_EMAC_MODE); 2433 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2435 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX | 2434 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2436 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK | 2435 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2437 BNX2_EMAC_MODE_25G_MODE); 2436 BNX2_EMAC_MODE_25G_MODE);
2438 2437
2439 mac_mode |= BNX2_EMAC_MODE_PORT_GMII; 2438 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2440 REG_WR(bp, BNX2_EMAC_MODE, mac_mode); 2439 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2441 bp->link_up = 1; 2440 bp->link_up = 1;
2442 return 0; 2441 return 0;
2443} 2442}
@@ -2449,7 +2448,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
2449 u32 mcp_p0, mcp_p1; 2448 u32 mcp_p0, mcp_p1;
2450 2449
2451 netdev_err(dev, "<--- start MCP states dump --->\n"); 2450 netdev_err(dev, "<--- start MCP states dump --->\n");
2452 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 2451 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2453 mcp_p0 = BNX2_MCP_STATE_P0; 2452 mcp_p0 = BNX2_MCP_STATE_P0;
2454 mcp_p1 = BNX2_MCP_STATE_P1; 2453 mcp_p1 = BNX2_MCP_STATE_P1;
2455 } else { 2454 } else {
@@ -2538,10 +2537,10 @@ bnx2_init_5709_context(struct bnx2 *bp)
2538 u32 val; 2537 u32 val;
2539 2538
2540 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12); 2539 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2541 val |= (BCM_PAGE_BITS - 8) << 16; 2540 val |= (BNX2_PAGE_BITS - 8) << 16;
2542 REG_WR(bp, BNX2_CTX_COMMAND, val); 2541 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2543 for (i = 0; i < 10; i++) { 2542 for (i = 0; i < 10; i++) {
2544 val = REG_RD(bp, BNX2_CTX_COMMAND); 2543 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2545 if (!(val & BNX2_CTX_COMMAND_MEM_INIT)) 2544 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2546 break; 2545 break;
2547 udelay(2); 2546 udelay(2);
@@ -2553,20 +2552,20 @@ bnx2_init_5709_context(struct bnx2 *bp)
2553 int j; 2552 int j;
2554 2553
2555 if (bp->ctx_blk[i]) 2554 if (bp->ctx_blk[i])
2556 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE); 2555 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2557 else 2556 else
2558 return -ENOMEM; 2557 return -ENOMEM;
2559 2558
2560 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0, 2559 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2561 (bp->ctx_blk_mapping[i] & 0xffffffff) | 2560 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2562 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID); 2561 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2563 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1, 2562 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2564 (u64) bp->ctx_blk_mapping[i] >> 32); 2563 (u64) bp->ctx_blk_mapping[i] >> 32);
2565 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i | 2564 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2566 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 2565 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2567 for (j = 0; j < 10; j++) { 2566 for (j = 0; j < 10; j++) {
2568 2567
2569 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL); 2568 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2570 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 2569 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2571 break; 2570 break;
2572 udelay(5); 2571 udelay(5);
@@ -2591,7 +2590,7 @@ bnx2_init_context(struct bnx2 *bp)
2591 2590
2592 vcid--; 2591 vcid--;
2593 2592
2594 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 2593 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2595 u32 new_vcid; 2594 u32 new_vcid;
2596 2595
2597 vcid_addr = GET_PCID_ADDR(vcid); 2596 vcid_addr = GET_PCID_ADDR(vcid);
@@ -2612,8 +2611,8 @@ bnx2_init_context(struct bnx2 *bp)
2612 vcid_addr += (i << PHY_CTX_SHIFT); 2611 vcid_addr += (i << PHY_CTX_SHIFT);
2613 pcid_addr += (i << PHY_CTX_SHIFT); 2612 pcid_addr += (i << PHY_CTX_SHIFT);
2614 2613
2615 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); 2614 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2616 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); 2615 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2617 2616
2618 /* Zero out the context. */ 2617 /* Zero out the context. */
2619 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) 2618 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
@@ -2633,7 +2632,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2633 if (good_mbuf == NULL) 2632 if (good_mbuf == NULL)
2634 return -ENOMEM; 2633 return -ENOMEM;
2635 2634
2636 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 2635 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2637 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); 2636 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2638 2637
2639 good_mbuf_cnt = 0; 2638 good_mbuf_cnt = 0;
@@ -2678,21 +2677,21 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2678 2677
2679 val = (mac_addr[0] << 8) | mac_addr[1]; 2678 val = (mac_addr[0] << 8) | mac_addr[1];
2680 2679
2681 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val); 2680 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2682 2681
2683 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | 2682 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2684 (mac_addr[4] << 8) | mac_addr[5]; 2683 (mac_addr[4] << 8) | mac_addr[5];
2685 2684
2686 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val); 2685 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2687} 2686}
2688 2687
2689static inline int 2688static inline int
2690bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) 2689bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2691{ 2690{
2692 dma_addr_t mapping; 2691 dma_addr_t mapping;
2693 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; 2692 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2694 struct rx_bd *rxbd = 2693 struct bnx2_rx_bd *rxbd =
2695 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)]; 2694 &rxr->rx_pg_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2696 struct page *page = alloc_page(gfp); 2695 struct page *page = alloc_page(gfp);
2697 2696
2698 if (!page) 2697 if (!page)
@@ -2714,7 +2713,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf
2714static void 2713static void
2715bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) 2714bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2716{ 2715{
2717 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index]; 2716 struct bnx2_sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2718 struct page *page = rx_pg->page; 2717 struct page *page = rx_pg->page;
2719 2718
2720 if (!page) 2719 if (!page)
@@ -2731,9 +2730,10 @@ static inline int
2731bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp) 2730bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2732{ 2731{
2733 u8 *data; 2732 u8 *data;
2734 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index]; 2733 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2735 dma_addr_t mapping; 2734 dma_addr_t mapping;
2736 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 2735 struct bnx2_rx_bd *rxbd =
2736 &rxr->rx_desc_ring[BNX2_RX_RING(index)][BNX2_RX_IDX(index)];
2737 2737
2738 data = kmalloc(bp->rx_buf_size, gfp); 2738 data = kmalloc(bp->rx_buf_size, gfp);
2739 if (!data) 2739 if (!data)
@@ -2770,9 +2770,9 @@ bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2770 old_link_state = sblk->status_attn_bits_ack & event; 2770 old_link_state = sblk->status_attn_bits_ack & event;
2771 if (new_link_state != old_link_state) { 2771 if (new_link_state != old_link_state) {
2772 if (new_link_state) 2772 if (new_link_state)
2773 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); 2773 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2774 else 2774 else
2775 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); 2775 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2776 } else 2776 } else
2777 is_set = 0; 2777 is_set = 0;
2778 2778
@@ -2802,7 +2802,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2802 barrier(); 2802 barrier();
2803 cons = *bnapi->hw_tx_cons_ptr; 2803 cons = *bnapi->hw_tx_cons_ptr;
2804 barrier(); 2804 barrier();
2805 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) 2805 if (unlikely((cons & BNX2_MAX_TX_DESC_CNT) == BNX2_MAX_TX_DESC_CNT))
2806 cons++; 2806 cons++;
2807 return cons; 2807 return cons;
2808} 2808}
@@ -2823,11 +2823,11 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2823 sw_cons = txr->tx_cons; 2823 sw_cons = txr->tx_cons;
2824 2824
2825 while (sw_cons != hw_cons) { 2825 while (sw_cons != hw_cons) {
2826 struct sw_tx_bd *tx_buf; 2826 struct bnx2_sw_tx_bd *tx_buf;
2827 struct sk_buff *skb; 2827 struct sk_buff *skb;
2828 int i, last; 2828 int i, last;
2829 2829
2830 sw_ring_cons = TX_RING_IDX(sw_cons); 2830 sw_ring_cons = BNX2_TX_RING_IDX(sw_cons);
2831 2831
2832 tx_buf = &txr->tx_buf_ring[sw_ring_cons]; 2832 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2833 skb = tx_buf->skb; 2833 skb = tx_buf->skb;
@@ -2841,7 +2841,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2841 2841
2842 last_idx = sw_cons + tx_buf->nr_frags + 1; 2842 last_idx = sw_cons + tx_buf->nr_frags + 1;
2843 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1; 2843 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2844 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) { 2844 if (unlikely(last_ring_idx >= BNX2_MAX_TX_DESC_CNT)) {
2845 last_idx++; 2845 last_idx++;
2846 } 2846 }
2847 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { 2847 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
@@ -2856,17 +2856,18 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2856 last = tx_buf->nr_frags; 2856 last = tx_buf->nr_frags;
2857 2857
2858 for (i = 0; i < last; i++) { 2858 for (i = 0; i < last; i++) {
2859 sw_cons = NEXT_TX_BD(sw_cons); 2859 struct bnx2_sw_tx_bd *tx_buf;
2860 2860
2861 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2862
2863 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(sw_cons)];
2861 dma_unmap_page(&bp->pdev->dev, 2864 dma_unmap_page(&bp->pdev->dev,
2862 dma_unmap_addr( 2865 dma_unmap_addr(tx_buf, mapping),
2863 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2864 mapping),
2865 skb_frag_size(&skb_shinfo(skb)->frags[i]), 2866 skb_frag_size(&skb_shinfo(skb)->frags[i]),
2866 PCI_DMA_TODEVICE); 2867 PCI_DMA_TODEVICE);
2867 } 2868 }
2868 2869
2869 sw_cons = NEXT_TX_BD(sw_cons); 2870 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2870 2871
2871 tx_bytes += skb->len; 2872 tx_bytes += skb->len;
2872 dev_kfree_skb(skb); 2873 dev_kfree_skb(skb);
@@ -2905,8 +2906,8 @@ static void
2905bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, 2906bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2906 struct sk_buff *skb, int count) 2907 struct sk_buff *skb, int count)
2907{ 2908{
2908 struct sw_pg *cons_rx_pg, *prod_rx_pg; 2909 struct bnx2_sw_pg *cons_rx_pg, *prod_rx_pg;
2909 struct rx_bd *cons_bd, *prod_bd; 2910 struct bnx2_rx_bd *cons_bd, *prod_bd;
2910 int i; 2911 int i;
2911 u16 hw_prod, prod; 2912 u16 hw_prod, prod;
2912 u16 cons = rxr->rx_pg_cons; 2913 u16 cons = rxr->rx_pg_cons;
@@ -2933,12 +2934,14 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2933 hw_prod = rxr->rx_pg_prod; 2934 hw_prod = rxr->rx_pg_prod;
2934 2935
2935 for (i = 0; i < count; i++) { 2936 for (i = 0; i < count; i++) {
2936 prod = RX_PG_RING_IDX(hw_prod); 2937 prod = BNX2_RX_PG_RING_IDX(hw_prod);
2937 2938
2938 prod_rx_pg = &rxr->rx_pg_ring[prod]; 2939 prod_rx_pg = &rxr->rx_pg_ring[prod];
2939 cons_rx_pg = &rxr->rx_pg_ring[cons]; 2940 cons_rx_pg = &rxr->rx_pg_ring[cons];
2940 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2941 cons_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(cons)]
2941 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2942 [BNX2_RX_IDX(cons)];
2943 prod_bd = &rxr->rx_pg_desc_ring[BNX2_RX_RING(prod)]
2944 [BNX2_RX_IDX(prod)];
2942 2945
2943 if (prod != cons) { 2946 if (prod != cons) {
2944 prod_rx_pg->page = cons_rx_pg->page; 2947 prod_rx_pg->page = cons_rx_pg->page;
@@ -2950,8 +2953,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2950 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2953 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2951 2954
2952 } 2955 }
2953 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons)); 2956 cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(cons));
2954 hw_prod = NEXT_RX_BD(hw_prod); 2957 hw_prod = BNX2_NEXT_RX_BD(hw_prod);
2955 } 2958 }
2956 rxr->rx_pg_prod = hw_prod; 2959 rxr->rx_pg_prod = hw_prod;
2957 rxr->rx_pg_cons = cons; 2960 rxr->rx_pg_cons = cons;
@@ -2961,8 +2964,8 @@ static inline void
2961bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, 2964bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2962 u8 *data, u16 cons, u16 prod) 2965 u8 *data, u16 cons, u16 prod)
2963{ 2966{
2964 struct sw_bd *cons_rx_buf, *prod_rx_buf; 2967 struct bnx2_sw_bd *cons_rx_buf, *prod_rx_buf;
2965 struct rx_bd *cons_bd, *prod_bd; 2968 struct bnx2_rx_bd *cons_bd, *prod_bd;
2966 2969
2967 cons_rx_buf = &rxr->rx_buf_ring[cons]; 2970 cons_rx_buf = &rxr->rx_buf_ring[cons];
2968 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2971 prod_rx_buf = &rxr->rx_buf_ring[prod];
@@ -2981,8 +2984,8 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2981 dma_unmap_addr_set(prod_rx_buf, mapping, 2984 dma_unmap_addr_set(prod_rx_buf, mapping,
2982 dma_unmap_addr(cons_rx_buf, mapping)); 2985 dma_unmap_addr(cons_rx_buf, mapping));
2983 2986
2984 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2987 cons_bd = &rxr->rx_desc_ring[BNX2_RX_RING(cons)][BNX2_RX_IDX(cons)];
2985 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2988 prod_bd = &rxr->rx_desc_ring[BNX2_RX_RING(prod)][BNX2_RX_IDX(prod)];
2986 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2989 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2987 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2990 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2988} 2991}
@@ -3022,7 +3025,7 @@ error:
3022 return skb; 3025 return skb;
3023 } else { 3026 } else {
3024 unsigned int i, frag_len, frag_size, pages; 3027 unsigned int i, frag_len, frag_size, pages;
3025 struct sw_pg *rx_pg; 3028 struct bnx2_sw_pg *rx_pg;
3026 u16 pg_cons = rxr->rx_pg_cons; 3029 u16 pg_cons = rxr->rx_pg_cons;
3027 u16 pg_prod = rxr->rx_pg_prod; 3030 u16 pg_prod = rxr->rx_pg_prod;
3028 3031
@@ -3065,7 +3068,7 @@ error:
3065 rx_pg->page = NULL; 3068 rx_pg->page = NULL;
3066 3069
3067 err = bnx2_alloc_rx_page(bp, rxr, 3070 err = bnx2_alloc_rx_page(bp, rxr,
3068 RX_PG_RING_IDX(pg_prod), 3071 BNX2_RX_PG_RING_IDX(pg_prod),
3069 GFP_ATOMIC); 3072 GFP_ATOMIC);
3070 if (unlikely(err)) { 3073 if (unlikely(err)) {
3071 rxr->rx_pg_cons = pg_cons; 3074 rxr->rx_pg_cons = pg_cons;
@@ -3083,8 +3086,8 @@ error:
3083 skb->truesize += PAGE_SIZE; 3086 skb->truesize += PAGE_SIZE;
3084 skb->len += frag_len; 3087 skb->len += frag_len;
3085 3088
3086 pg_prod = NEXT_RX_BD(pg_prod); 3089 pg_prod = BNX2_NEXT_RX_BD(pg_prod);
3087 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons)); 3090 pg_cons = BNX2_RX_PG_RING_IDX(BNX2_NEXT_RX_BD(pg_cons));
3088 } 3091 }
3089 rxr->rx_pg_prod = pg_prod; 3092 rxr->rx_pg_prod = pg_prod;
3090 rxr->rx_pg_cons = pg_cons; 3093 rxr->rx_pg_cons = pg_cons;
@@ -3101,7 +3104,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3101 barrier(); 3104 barrier();
3102 cons = *bnapi->hw_rx_cons_ptr; 3105 cons = *bnapi->hw_rx_cons_ptr;
3103 barrier(); 3106 barrier();
3104 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) 3107 if (unlikely((cons & BNX2_MAX_RX_DESC_CNT) == BNX2_MAX_RX_DESC_CNT))
3105 cons++; 3108 cons++;
3106 return cons; 3109 return cons;
3107} 3110}
@@ -3125,13 +3128,14 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3125 while (sw_cons != hw_cons) { 3128 while (sw_cons != hw_cons) {
3126 unsigned int len, hdr_len; 3129 unsigned int len, hdr_len;
3127 u32 status; 3130 u32 status;
3128 struct sw_bd *rx_buf, *next_rx_buf; 3131 struct bnx2_sw_bd *rx_buf, *next_rx_buf;
3129 struct sk_buff *skb; 3132 struct sk_buff *skb;
3130 dma_addr_t dma_addr; 3133 dma_addr_t dma_addr;
3131 u8 *data; 3134 u8 *data;
3135 u16 next_ring_idx;
3132 3136
3133 sw_ring_cons = RX_RING_IDX(sw_cons); 3137 sw_ring_cons = BNX2_RX_RING_IDX(sw_cons);
3134 sw_ring_prod = RX_RING_IDX(sw_prod); 3138 sw_ring_prod = BNX2_RX_RING_IDX(sw_prod);
3135 3139
3136 rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; 3140 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3137 data = rx_buf->data; 3141 data = rx_buf->data;
@@ -3146,8 +3150,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3146 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3150 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3147 PCI_DMA_FROMDEVICE); 3151 PCI_DMA_FROMDEVICE);
3148 3152
3149 next_rx_buf = 3153 next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons));
3150 &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))]; 3154 next_rx_buf = &rxr->rx_buf_ring[next_ring_idx];
3151 prefetch(get_l2_fhdr(next_rx_buf->data)); 3155 prefetch(get_l2_fhdr(next_rx_buf->data));
3152 3156
3153 len = rx_hdr->l2_fhdr_pkt_len; 3157 len = rx_hdr->l2_fhdr_pkt_len;
@@ -3239,8 +3243,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3239 rx_pkt++; 3243 rx_pkt++;
3240 3244
3241next_rx: 3245next_rx:
3242 sw_cons = NEXT_RX_BD(sw_cons); 3246 sw_cons = BNX2_NEXT_RX_BD(sw_cons);
3243 sw_prod = NEXT_RX_BD(sw_prod); 3247 sw_prod = BNX2_NEXT_RX_BD(sw_prod);
3244 3248
3245 if ((rx_pkt == budget)) 3249 if ((rx_pkt == budget))
3246 break; 3250 break;
@@ -3255,11 +3259,11 @@ next_rx:
3255 rxr->rx_prod = sw_prod; 3259 rxr->rx_prod = sw_prod;
3256 3260
3257 if (pg_ring_used) 3261 if (pg_ring_used)
3258 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); 3262 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3259 3263
3260 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod); 3264 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3261 3265
3262 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); 3266 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3263 3267
3264 mmiowb(); 3268 mmiowb();
3265 3269
@@ -3277,7 +3281,7 @@ bnx2_msi(int irq, void *dev_instance)
3277 struct bnx2 *bp = bnapi->bp; 3281 struct bnx2 *bp = bnapi->bp;
3278 3282
3279 prefetch(bnapi->status_blk.msi); 3283 prefetch(bnapi->status_blk.msi);
3280 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3284 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3281 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 3285 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3282 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 3286 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3283 3287
@@ -3321,18 +3325,18 @@ bnx2_interrupt(int irq, void *dev_instance)
3321 * the status block write. 3325 * the status block write.
3322 */ 3326 */
3323 if ((sblk->status_idx == bnapi->last_status_idx) && 3327 if ((sblk->status_idx == bnapi->last_status_idx) &&
3324 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & 3328 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3325 BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) 3329 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3326 return IRQ_NONE; 3330 return IRQ_NONE;
3327 3331
3328 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3332 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3329 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | 3333 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3330 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 3334 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3331 3335
3332 /* Read back to deassert IRQ immediately to avoid too many 3336 /* Read back to deassert IRQ immediately to avoid too many
3333 * spurious interrupts. 3337 * spurious interrupts.
3334 */ 3338 */
3335 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); 3339 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3336 3340
3337 /* Return here if interrupt is shared and is disabled. */ 3341 /* Return here if interrupt is shared and is disabled. */
3338 if (unlikely(atomic_read(&bp->intr_sem) != 0)) 3342 if (unlikely(atomic_read(&bp->intr_sem) != 0))
@@ -3388,14 +3392,14 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
3388 u32 msi_ctrl; 3392 u32 msi_ctrl;
3389 3393
3390 if (bnx2_has_work(bnapi)) { 3394 if (bnx2_has_work(bnapi)) {
3391 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL); 3395 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3392 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE)) 3396 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3393 return; 3397 return;
3394 3398
3395 if (bnapi->last_status_idx == bp->idle_chk_status_idx) { 3399 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3396 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl & 3400 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3397 ~BNX2_PCICFG_MSI_CONTROL_ENABLE); 3401 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3398 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl); 3402 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3399 bnx2_msi(bp->irq_tbl[0].vector, bnapi); 3403 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3400 } 3404 }
3401 } 3405 }
@@ -3434,9 +3438,9 @@ static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3434 /* This is needed to take care of transient status 3438 /* This is needed to take care of transient status
3435 * during link changes. 3439 * during link changes.
3436 */ 3440 */
3437 REG_WR(bp, BNX2_HC_COMMAND, 3441 BNX2_WR(bp, BNX2_HC_COMMAND,
3438 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 3442 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3439 REG_RD(bp, BNX2_HC_COMMAND); 3443 BNX2_RD(bp, BNX2_HC_COMMAND);
3440 } 3444 }
3441} 3445}
3442 3446
@@ -3473,9 +3477,9 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3473 if (likely(!bnx2_has_fast_work(bnapi))) { 3477 if (likely(!bnx2_has_fast_work(bnapi))) {
3474 3478
3475 napi_complete(napi); 3479 napi_complete(napi);
3476 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num | 3480 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3477 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3481 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3478 bnapi->last_status_idx); 3482 bnapi->last_status_idx);
3479 break; 3483 break;
3480 } 3484 }
3481 } 3485 }
@@ -3511,19 +3515,19 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
3511 if (likely(!bnx2_has_work(bnapi))) { 3515 if (likely(!bnx2_has_work(bnapi))) {
3512 napi_complete(napi); 3516 napi_complete(napi);
3513 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) { 3517 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3514 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3518 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3515 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3519 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3516 bnapi->last_status_idx); 3520 bnapi->last_status_idx);
3517 break; 3521 break;
3518 } 3522 }
3519 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3523 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3520 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3524 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3521 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | 3525 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3522 bnapi->last_status_idx); 3526 bnapi->last_status_idx);
3523 3527
3524 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, 3528 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3525 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | 3529 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3526 bnapi->last_status_idx); 3530 bnapi->last_status_idx);
3527 break; 3531 break;
3528 } 3532 }
3529 } 3533 }
@@ -3561,8 +3565,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3561 } 3565 }
3562 else if (dev->flags & IFF_ALLMULTI) { 3566 else if (dev->flags & IFF_ALLMULTI) {
3563 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 3567 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3564 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3568 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3565 0xffffffff); 3569 0xffffffff);
3566 } 3570 }
3567 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN; 3571 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3568 } 3572 }
@@ -3584,8 +3588,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3584 } 3588 }
3585 3589
3586 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 3590 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3587 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3591 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3588 mc_filter[i]); 3592 mc_filter[i]);
3589 } 3593 }
3590 3594
3591 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN; 3595 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
@@ -3610,12 +3614,12 @@ bnx2_set_rx_mode(struct net_device *dev)
3610 3614
3611 if (rx_mode != bp->rx_mode) { 3615 if (rx_mode != bp->rx_mode) {
3612 bp->rx_mode = rx_mode; 3616 bp->rx_mode = rx_mode;
3613 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode); 3617 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3614 } 3618 }
3615 3619
3616 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); 3620 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3617 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode); 3621 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3618 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA); 3622 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3619 3623
3620 spin_unlock_bh(&bp->phy_lock); 3624 spin_unlock_bh(&bp->phy_lock);
3621} 3625}
@@ -3663,10 +3667,10 @@ static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3663 const struct bnx2_rv2p_fw_file *rv2p_fw; 3667 const struct bnx2_rv2p_fw_file *rv2p_fw;
3664 int rc; 3668 int rc;
3665 3669
3666 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 3670 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3667 mips_fw_file = FW_MIPS_FILE_09; 3671 mips_fw_file = FW_MIPS_FILE_09;
3668 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) || 3672 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3669 (CHIP_ID(bp) == CHIP_ID_5709_A1)) 3673 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3670 rv2p_fw_file = FW_RV2P_FILE_09_Ax; 3674 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3671 else 3675 else
3672 rv2p_fw_file = FW_RV2P_FILE_09; 3676 rv2p_fw_file = FW_RV2P_FILE_09;
@@ -3756,13 +3760,13 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3756 } 3760 }
3757 3761
3758 for (i = 0; i < rv2p_code_len; i += 8) { 3762 for (i = 0; i < rv2p_code_len; i += 8) {
3759 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code)); 3763 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3760 rv2p_code++; 3764 rv2p_code++;
3761 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code)); 3765 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3762 rv2p_code++; 3766 rv2p_code++;
3763 3767
3764 val = (i / 8) | cmd; 3768 val = (i / 8) | cmd;
3765 REG_WR(bp, addr, val); 3769 BNX2_WR(bp, addr, val);
3766 } 3770 }
3767 3771
3768 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset); 3772 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
@@ -3772,22 +3776,22 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3772 loc = be32_to_cpu(fw_entry->fixup[i]); 3776 loc = be32_to_cpu(fw_entry->fixup[i]);
3773 if (loc && ((loc * 4) < rv2p_code_len)) { 3777 if (loc && ((loc * 4) < rv2p_code_len)) {
3774 code = be32_to_cpu(*(rv2p_code + loc - 1)); 3778 code = be32_to_cpu(*(rv2p_code + loc - 1));
3775 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code); 3779 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3776 code = be32_to_cpu(*(rv2p_code + loc)); 3780 code = be32_to_cpu(*(rv2p_code + loc));
3777 code = rv2p_fw_fixup(rv2p_proc, i, loc, code); 3781 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3778 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code); 3782 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3779 3783
3780 val = (loc / 2) | cmd; 3784 val = (loc / 2) | cmd;
3781 REG_WR(bp, addr, val); 3785 BNX2_WR(bp, addr, val);
3782 } 3786 }
3783 } 3787 }
3784 3788
3785 /* Reset the processor, un-stall is done later. */ 3789 /* Reset the processor, un-stall is done later. */
3786 if (rv2p_proc == RV2P_PROC1) { 3790 if (rv2p_proc == RV2P_PROC1) {
3787 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET); 3791 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3788 } 3792 }
3789 else { 3793 else {
3790 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET); 3794 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3791 } 3795 }
3792 3796
3793 return 0; 3797 return 0;
@@ -3924,14 +3928,14 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3924 /* delay required during transition out of D3hot */ 3928 /* delay required during transition out of D3hot */
3925 msleep(20); 3929 msleep(20);
3926 3930
3927 val = REG_RD(bp, BNX2_EMAC_MODE); 3931 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3928 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD; 3932 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3929 val &= ~BNX2_EMAC_MODE_MPKT; 3933 val &= ~BNX2_EMAC_MODE_MPKT;
3930 REG_WR(bp, BNX2_EMAC_MODE, val); 3934 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3931 3935
3932 val = REG_RD(bp, BNX2_RPM_CONFIG); 3936 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3933 val &= ~BNX2_RPM_CONFIG_ACPI_ENA; 3937 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3934 REG_WR(bp, BNX2_RPM_CONFIG, val); 3938 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
3935 break; 3939 break;
3936 } 3940 }
3937 case PCI_D3hot: { 3941 case PCI_D3hot: {
@@ -3963,7 +3967,7 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3963 3967
3964 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); 3968 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3965 3969
3966 val = REG_RD(bp, BNX2_EMAC_MODE); 3970 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3967 3971
3968 /* Enable port mode. */ 3972 /* Enable port mode. */
3969 val &= ~BNX2_EMAC_MODE_PORT; 3973 val &= ~BNX2_EMAC_MODE_PORT;
@@ -3978,32 +3982,32 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3978 val |= BNX2_EMAC_MODE_25G_MODE; 3982 val |= BNX2_EMAC_MODE_25G_MODE;
3979 } 3983 }
3980 3984
3981 REG_WR(bp, BNX2_EMAC_MODE, val); 3985 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3982 3986
3983 /* receive all multicast */ 3987 /* receive all multicast */
3984 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) { 3988 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3985 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4), 3989 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3986 0xffffffff); 3990 0xffffffff);
3987 } 3991 }
3988 REG_WR(bp, BNX2_EMAC_RX_MODE, 3992 BNX2_WR(bp, BNX2_EMAC_RX_MODE,
3989 BNX2_EMAC_RX_MODE_SORT_MODE); 3993 BNX2_EMAC_RX_MODE_SORT_MODE);
3990 3994
3991 val = 1 | BNX2_RPM_SORT_USER0_BC_EN | 3995 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3992 BNX2_RPM_SORT_USER0_MC_EN; 3996 BNX2_RPM_SORT_USER0_MC_EN;
3993 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0); 3997 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3994 REG_WR(bp, BNX2_RPM_SORT_USER0, val); 3998 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3995 REG_WR(bp, BNX2_RPM_SORT_USER0, val | 3999 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
3996 BNX2_RPM_SORT_USER0_ENA); 4000 BNX2_RPM_SORT_USER0_ENA);
3997 4001
3998 /* Need to enable EMAC and RPM for WOL. */ 4002 /* Need to enable EMAC and RPM for WOL. */
3999 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 4003 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4000 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE | 4004 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
4001 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE | 4005 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
4002 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE); 4006 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
4003 4007
4004 val = REG_RD(bp, BNX2_RPM_CONFIG); 4008 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4005 val &= ~BNX2_RPM_CONFIG_ACPI_ENA; 4009 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
4006 REG_WR(bp, BNX2_RPM_CONFIG, val); 4010 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4007 4011
4008 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 4012 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4009 } 4013 }
@@ -4016,8 +4020,8 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4016 1, 0); 4020 1, 0);
4017 4021
4018 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4022 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4019 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || 4023 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4020 (CHIP_ID(bp) == CHIP_ID_5706_A1)) { 4024 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4021 4025
4022 if (bp->wol) 4026 if (bp->wol)
4023 pmcsr |= 3; 4027 pmcsr |= 3;
@@ -4050,9 +4054,9 @@ bnx2_acquire_nvram_lock(struct bnx2 *bp)
4050 int j; 4054 int j;
4051 4055
4052 /* Request access to the flash interface. */ 4056 /* Request access to the flash interface. */
4053 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2); 4057 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4054 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4058 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4055 val = REG_RD(bp, BNX2_NVM_SW_ARB); 4059 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4056 if (val & BNX2_NVM_SW_ARB_ARB_ARB2) 4060 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4057 break; 4061 break;
4058 4062
@@ -4072,10 +4076,10 @@ bnx2_release_nvram_lock(struct bnx2 *bp)
4072 u32 val; 4076 u32 val;
4073 4077
4074 /* Relinquish nvram interface. */ 4078 /* Relinquish nvram interface. */
4075 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2); 4079 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4076 4080
4077 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4081 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4078 val = REG_RD(bp, BNX2_NVM_SW_ARB); 4082 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4079 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2)) 4083 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4080 break; 4084 break;
4081 4085
@@ -4094,20 +4098,20 @@ bnx2_enable_nvram_write(struct bnx2 *bp)
4094{ 4098{
4095 u32 val; 4099 u32 val;
4096 4100
4097 val = REG_RD(bp, BNX2_MISC_CFG); 4101 val = BNX2_RD(bp, BNX2_MISC_CFG);
4098 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI); 4102 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4099 4103
4100 if (bp->flash_info->flags & BNX2_NV_WREN) { 4104 if (bp->flash_info->flags & BNX2_NV_WREN) {
4101 int j; 4105 int j;
4102 4106
4103 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); 4107 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4104 REG_WR(bp, BNX2_NVM_COMMAND, 4108 BNX2_WR(bp, BNX2_NVM_COMMAND,
4105 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT); 4109 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4106 4110
4107 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4111 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4108 udelay(5); 4112 udelay(5);
4109 4113
4110 val = REG_RD(bp, BNX2_NVM_COMMAND); 4114 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4111 if (val & BNX2_NVM_COMMAND_DONE) 4115 if (val & BNX2_NVM_COMMAND_DONE)
4112 break; 4116 break;
4113 } 4117 }
@@ -4123,8 +4127,8 @@ bnx2_disable_nvram_write(struct bnx2 *bp)
4123{ 4127{
4124 u32 val; 4128 u32 val;
4125 4129
4126 val = REG_RD(bp, BNX2_MISC_CFG); 4130 val = BNX2_RD(bp, BNX2_MISC_CFG);
4127 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN); 4131 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4128} 4132}
4129 4133
4130 4134
@@ -4133,10 +4137,10 @@ bnx2_enable_nvram_access(struct bnx2 *bp)
4133{ 4137{
4134 u32 val; 4138 u32 val;
4135 4139
4136 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); 4140 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4137 /* Enable both bits, even on read. */ 4141 /* Enable both bits, even on read. */
4138 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 4142 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4139 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN); 4143 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4140} 4144}
4141 4145
4142static void 4146static void
@@ -4144,9 +4148,9 @@ bnx2_disable_nvram_access(struct bnx2 *bp)
4144{ 4148{
4145 u32 val; 4149 u32 val;
4146 4150
4147 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE); 4151 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4148 /* Disable both bits, even after read. */ 4152 /* Disable both bits, even after read. */
4149 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE, 4153 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4150 val & ~(BNX2_NVM_ACCESS_ENABLE_EN | 4154 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4151 BNX2_NVM_ACCESS_ENABLE_WR_EN)); 4155 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4152} 4156}
@@ -4166,13 +4170,13 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4166 BNX2_NVM_COMMAND_DOIT; 4170 BNX2_NVM_COMMAND_DOIT;
4167 4171
4168 /* Need to clear DONE bit separately. */ 4172 /* Need to clear DONE bit separately. */
4169 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); 4173 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4170 4174
4171 /* Address of the NVRAM to read from. */ 4175 /* Address of the NVRAM to read from. */
4172 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); 4176 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4173 4177
4174 /* Issue an erase command. */ 4178 /* Issue an erase command. */
4175 REG_WR(bp, BNX2_NVM_COMMAND, cmd); 4179 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4176 4180
4177 /* Wait for completion. */ 4181 /* Wait for completion. */
4178 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4182 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
@@ -4180,7 +4184,7 @@ bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4180 4184
4181 udelay(5); 4185 udelay(5);
4182 4186
4183 val = REG_RD(bp, BNX2_NVM_COMMAND); 4187 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4184 if (val & BNX2_NVM_COMMAND_DONE) 4188 if (val & BNX2_NVM_COMMAND_DONE)
4185 break; 4189 break;
4186 } 4190 }
@@ -4208,13 +4212,13 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4208 } 4212 }
4209 4213
4210 /* Need to clear DONE bit separately. */ 4214 /* Need to clear DONE bit separately. */
4211 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); 4215 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4212 4216
4213 /* Address of the NVRAM to read from. */ 4217 /* Address of the NVRAM to read from. */
4214 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); 4218 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4215 4219
4216 /* Issue a read command. */ 4220 /* Issue a read command. */
4217 REG_WR(bp, BNX2_NVM_COMMAND, cmd); 4221 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4218 4222
4219 /* Wait for completion. */ 4223 /* Wait for completion. */
4220 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4224 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
@@ -4222,9 +4226,9 @@ bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4222 4226
4223 udelay(5); 4227 udelay(5);
4224 4228
4225 val = REG_RD(bp, BNX2_NVM_COMMAND); 4229 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4226 if (val & BNX2_NVM_COMMAND_DONE) { 4230 if (val & BNX2_NVM_COMMAND_DONE) {
4227 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ)); 4231 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4228 memcpy(ret_val, &v, 4); 4232 memcpy(ret_val, &v, 4);
4229 break; 4233 break;
4230 } 4234 }
@@ -4254,24 +4258,24 @@ bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4254 } 4258 }
4255 4259
4256 /* Need to clear DONE bit separately. */ 4260 /* Need to clear DONE bit separately. */
4257 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE); 4261 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4258 4262
4259 memcpy(&val32, val, 4); 4263 memcpy(&val32, val, 4);
4260 4264
4261 /* Write the data. */ 4265 /* Write the data. */
4262 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32)); 4266 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4263 4267
4264 /* Address of the NVRAM to write to. */ 4268 /* Address of the NVRAM to write to. */
4265 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE); 4269 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4266 4270
4267 /* Issue the write command. */ 4271 /* Issue the write command. */
4268 REG_WR(bp, BNX2_NVM_COMMAND, cmd); 4272 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4269 4273
4270 /* Wait for completion. */ 4274 /* Wait for completion. */
4271 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) { 4275 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4272 udelay(5); 4276 udelay(5);
4273 4277
4274 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE) 4278 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4275 break; 4279 break;
4276 } 4280 }
4277 if (j >= NVRAM_TIMEOUT_COUNT) 4281 if (j >= NVRAM_TIMEOUT_COUNT)
@@ -4287,13 +4291,13 @@ bnx2_init_nvram(struct bnx2 *bp)
4287 int j, entry_count, rc = 0; 4291 int j, entry_count, rc = 0;
4288 const struct flash_spec *flash; 4292 const struct flash_spec *flash;
4289 4293
4290 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 4294 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4291 bp->flash_info = &flash_5709; 4295 bp->flash_info = &flash_5709;
4292 goto get_flash_size; 4296 goto get_flash_size;
4293 } 4297 }
4294 4298
4295 /* Determine the selected interface. */ 4299 /* Determine the selected interface. */
4296 val = REG_RD(bp, BNX2_NVM_CFG1); 4300 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4297 4301
4298 entry_count = ARRAY_SIZE(flash_table); 4302 entry_count = ARRAY_SIZE(flash_table);
4299 4303
@@ -4332,10 +4336,10 @@ bnx2_init_nvram(struct bnx2 *bp)
4332 bnx2_enable_nvram_access(bp); 4336 bnx2_enable_nvram_access(bp);
4333 4337
4334 /* Reconfigure the flash interface */ 4338 /* Reconfigure the flash interface */
4335 REG_WR(bp, BNX2_NVM_CFG1, flash->config1); 4339 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4336 REG_WR(bp, BNX2_NVM_CFG2, flash->config2); 4340 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4337 REG_WR(bp, BNX2_NVM_CFG3, flash->config3); 4341 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4338 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1); 4342 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4339 4343
4340 /* Disable access to flash interface */ 4344 /* Disable access to flash interface */
4341 bnx2_disable_nvram_access(bp); 4345 bnx2_disable_nvram_access(bp);
@@ -4696,10 +4700,10 @@ bnx2_init_fw_cap(struct bnx2 *bp)
4696static void 4700static void
4697bnx2_setup_msix_tbl(struct bnx2 *bp) 4701bnx2_setup_msix_tbl(struct bnx2 *bp)
4698{ 4702{
4699 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN); 4703 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4700 4704
4701 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR); 4705 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4702 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); 4706 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4703} 4707}
4704 4708
4705static int 4709static int
@@ -4711,24 +4715,24 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4711 4715
4712 /* Wait for the current PCI transaction to complete before 4716 /* Wait for the current PCI transaction to complete before
4713 * issuing a reset. */ 4717 * issuing a reset. */
4714 if ((CHIP_NUM(bp) == CHIP_NUM_5706) || 4718 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4715 (CHIP_NUM(bp) == CHIP_NUM_5708)) { 4719 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4716 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, 4720 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4717 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4721 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4718 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4722 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4719 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4723 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4720 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4724 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4721 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); 4725 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4722 udelay(5); 4726 udelay(5);
4723 } else { /* 5709 */ 4727 } else { /* 5709 */
4724 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); 4728 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4725 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; 4729 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4726 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); 4730 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4727 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); 4731 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4728 4732
4729 for (i = 0; i < 100; i++) { 4733 for (i = 0; i < 100; i++) {
4730 msleep(1); 4734 msleep(1);
4731 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL); 4735 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4732 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND)) 4736 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4733 break; 4737 break;
4734 } 4738 }
@@ -4744,17 +4748,17 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4744 4748
4745 /* Do a dummy read to force the chip to complete all current transaction 4749 /* Do a dummy read to force the chip to complete all current transaction
4746 * before we issue a reset. */ 4750 * before we issue a reset. */
4747 val = REG_RD(bp, BNX2_MISC_ID); 4751 val = BNX2_RD(bp, BNX2_MISC_ID);
4748 4752
4749 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 4753 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4750 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET); 4754 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4751 REG_RD(bp, BNX2_MISC_COMMAND); 4755 BNX2_RD(bp, BNX2_MISC_COMMAND);
4752 udelay(5); 4756 udelay(5);
4753 4757
4754 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4758 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4755 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4759 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4756 4760
4757 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); 4761 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4758 4762
4759 } else { 4763 } else {
4760 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4764 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -4762,19 +4766,19 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4762 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4766 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4763 4767
4764 /* Chip reset. */ 4768 /* Chip reset. */
4765 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val); 4769 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4766 4770
4767 /* Reading back any register after chip reset will hang the 4771 /* Reading back any register after chip reset will hang the
4768 * bus on 5706 A0 and A1. The msleep below provides plenty 4772 * bus on 5706 A0 and A1. The msleep below provides plenty
4769 * of margin for write posting. 4773 * of margin for write posting.
4770 */ 4774 */
4771 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || 4775 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4772 (CHIP_ID(bp) == CHIP_ID_5706_A1)) 4776 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4773 msleep(20); 4777 msleep(20);
4774 4778
4775 /* Reset takes approximate 30 usec */ 4779 /* Reset takes approximate 30 usec */
4776 for (i = 0; i < 10; i++) { 4780 for (i = 0; i < 10; i++) {
4777 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG); 4781 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4778 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4782 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4779 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) 4783 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4780 break; 4784 break;
@@ -4789,7 +4793,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4789 } 4793 }
4790 4794
4791 /* Make sure byte swapping is properly configured. */ 4795 /* Make sure byte swapping is properly configured. */
4792 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0); 4796 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4793 if (val != 0x01020304) { 4797 if (val != 0x01020304) {
4794 pr_err("Chip not in correct endian mode\n"); 4798 pr_err("Chip not in correct endian mode\n");
4795 return -ENODEV; 4799 return -ENODEV;
@@ -4808,10 +4812,10 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4808 bnx2_set_default_remote_link(bp); 4812 bnx2_set_default_remote_link(bp);
4809 spin_unlock_bh(&bp->phy_lock); 4813 spin_unlock_bh(&bp->phy_lock);
4810 4814
4811 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 4815 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4812 /* Adjust the voltage regular to two steps lower. The default 4816 /* Adjust the voltage regular to two steps lower. The default
4813 * of this register is 0x0000000e. */ 4817 * of this register is 0x0000000e. */
4814 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa); 4818 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4815 4819
4816 /* Remove bad rbuf memory from the free pool. */ 4820 /* Remove bad rbuf memory from the free pool. */
4817 rc = bnx2_alloc_bad_rbuf(bp); 4821 rc = bnx2_alloc_bad_rbuf(bp);
@@ -4820,7 +4824,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4820 if (bp->flags & BNX2_FLAG_USING_MSIX) { 4824 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4821 bnx2_setup_msix_tbl(bp); 4825 bnx2_setup_msix_tbl(bp);
4822 /* Prevent MSIX table reads and write from timing out */ 4826 /* Prevent MSIX table reads and write from timing out */
4823 REG_WR(bp, BNX2_MISC_ECO_HW_CTL, 4827 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4824 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN); 4828 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4825 } 4829 }
4826 4830
@@ -4834,7 +4838,7 @@ bnx2_init_chip(struct bnx2 *bp)
4834 int rc, i; 4838 int rc, i;
4835 4839
4836 /* Make sure the interrupt is not active. */ 4840 /* Make sure the interrupt is not active. */
4837 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 4841 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4838 4842
4839 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP | 4843 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4840 BNX2_DMA_CONFIG_DATA_WORD_SWAP | 4844 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
@@ -4850,16 +4854,17 @@ bnx2_init_chip(struct bnx2 *bp)
4850 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133)) 4854 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4851 val |= (1 << 23); 4855 val |= (1 << 23);
4852 4856
4853 if ((CHIP_NUM(bp) == CHIP_NUM_5706) && 4857 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4854 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX)) 4858 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4859 !(bp->flags & BNX2_FLAG_PCIX))
4855 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA; 4860 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4856 4861
4857 REG_WR(bp, BNX2_DMA_CONFIG, val); 4862 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4858 4863
4859 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 4864 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4860 val = REG_RD(bp, BNX2_TDMA_CONFIG); 4865 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4861 val |= BNX2_TDMA_CONFIG_ONE_DMA; 4866 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4862 REG_WR(bp, BNX2_TDMA_CONFIG, val); 4867 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4863 } 4868 }
4864 4869
4865 if (bp->flags & BNX2_FLAG_PCIX) { 4870 if (bp->flags & BNX2_FLAG_PCIX) {
@@ -4871,14 +4876,14 @@ bnx2_init_chip(struct bnx2 *bp)
4871 val16 & ~PCI_X_CMD_ERO); 4876 val16 & ~PCI_X_CMD_ERO);
4872 } 4877 }
4873 4878
4874 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 4879 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4875 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE | 4880 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4876 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE | 4881 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4877 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE); 4882 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4878 4883
4879 /* Initialize context mapping and zero out the quick contexts. The 4884 /* Initialize context mapping and zero out the quick contexts. The
4880 * context block must have already been enabled. */ 4885 * context block must have already been enabled. */
4881 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 4886 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4882 rc = bnx2_init_5709_context(bp); 4887 rc = bnx2_init_5709_context(bp);
4883 if (rc) 4888 if (rc)
4884 return rc; 4889 return rc;
@@ -4892,29 +4897,29 @@ bnx2_init_chip(struct bnx2 *bp)
4892 4897
4893 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0); 4898 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4894 4899
4895 val = REG_RD(bp, BNX2_MQ_CONFIG); 4900 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4896 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4901 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4897 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; 4902 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4898 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 4903 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4899 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE; 4904 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4900 if (CHIP_REV(bp) == CHIP_REV_Ax) 4905 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4901 val |= BNX2_MQ_CONFIG_HALT_DIS; 4906 val |= BNX2_MQ_CONFIG_HALT_DIS;
4902 } 4907 }
4903 4908
4904 REG_WR(bp, BNX2_MQ_CONFIG, val); 4909 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4905 4910
4906 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE); 4911 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4907 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val); 4912 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4908 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val); 4913 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4909 4914
4910 val = (BCM_PAGE_BITS - 8) << 24; 4915 val = (BNX2_PAGE_BITS - 8) << 24;
4911 REG_WR(bp, BNX2_RV2P_CONFIG, val); 4916 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4912 4917
4913 /* Configure page size. */ 4918 /* Configure page size. */
4914 val = REG_RD(bp, BNX2_TBDR_CONFIG); 4919 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4915 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE; 4920 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4916 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40; 4921 val |= (BNX2_PAGE_BITS - 8) << 24 | 0x40;
4917 REG_WR(bp, BNX2_TBDR_CONFIG, val); 4922 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4918 4923
4919 val = bp->mac_addr[0] + 4924 val = bp->mac_addr[0] +
4920 (bp->mac_addr[1] << 8) + 4925 (bp->mac_addr[1] << 8) +
@@ -4922,14 +4927,14 @@ bnx2_init_chip(struct bnx2 *bp)
4922 bp->mac_addr[3] + 4927 bp->mac_addr[3] +
4923 (bp->mac_addr[4] << 8) + 4928 (bp->mac_addr[4] << 8) +
4924 (bp->mac_addr[5] << 16); 4929 (bp->mac_addr[5] << 16);
4925 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val); 4930 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4926 4931
4927 /* Program the MTU. Also include 4 bytes for CRC32. */ 4932 /* Program the MTU. Also include 4 bytes for CRC32. */
4928 mtu = bp->dev->mtu; 4933 mtu = bp->dev->mtu;
4929 val = mtu + ETH_HLEN + ETH_FCS_LEN; 4934 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4930 if (val > (MAX_ETHERNET_PACKET_SIZE + 4)) 4935 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4931 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; 4936 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4932 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); 4937 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4933 4938
4934 if (mtu < 1500) 4939 if (mtu < 1500)
4935 mtu = 1500; 4940 mtu = 1500;
@@ -4947,43 +4952,43 @@ bnx2_init_chip(struct bnx2 *bp)
4947 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; 4952 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4948 4953
4949 /* Set up how to generate a link change interrupt. */ 4954 /* Set up how to generate a link change interrupt. */
4950 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK); 4955 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4951 4956
4952 REG_WR(bp, BNX2_HC_STATUS_ADDR_L, 4957 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
4953 (u64) bp->status_blk_mapping & 0xffffffff); 4958 (u64) bp->status_blk_mapping & 0xffffffff);
4954 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32); 4959 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4955 4960
4956 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L, 4961 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4957 (u64) bp->stats_blk_mapping & 0xffffffff); 4962 (u64) bp->stats_blk_mapping & 0xffffffff);
4958 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H, 4963 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4959 (u64) bp->stats_blk_mapping >> 32); 4964 (u64) bp->stats_blk_mapping >> 32);
4960 4965
4961 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP, 4966 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4962 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip); 4967 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4963 4968
4964 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP, 4969 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4965 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip); 4970 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4966 4971
4967 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP, 4972 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4968 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip); 4973 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4969 4974
4970 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks); 4975 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4971 4976
4972 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks); 4977 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4973 4978
4974 REG_WR(bp, BNX2_HC_COM_TICKS, 4979 BNX2_WR(bp, BNX2_HC_COM_TICKS,
4975 (bp->com_ticks_int << 16) | bp->com_ticks); 4980 (bp->com_ticks_int << 16) | bp->com_ticks);
4976 4981
4977 REG_WR(bp, BNX2_HC_CMD_TICKS, 4982 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
4978 (bp->cmd_ticks_int << 16) | bp->cmd_ticks); 4983 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4979 4984
4980 if (bp->flags & BNX2_FLAG_BROKEN_STATS) 4985 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4981 REG_WR(bp, BNX2_HC_STATS_TICKS, 0); 4986 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
4982 else 4987 else
4983 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks); 4988 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4984 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */ 4989 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4985 4990
4986 if (CHIP_ID(bp) == CHIP_ID_5706_A1) 4991 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
4987 val = BNX2_HC_CONFIG_COLLECT_STATS; 4992 val = BNX2_HC_CONFIG_COLLECT_STATS;
4988 else { 4993 else {
4989 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE | 4994 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
@@ -4991,8 +4996,8 @@ bnx2_init_chip(struct bnx2 *bp)
4991 } 4996 }
4992 4997
4993 if (bp->flags & BNX2_FLAG_USING_MSIX) { 4998 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4994 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR, 4999 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4995 BNX2_HC_MSIX_BIT_VECTOR_VAL); 5000 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4996 5001
4997 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B; 5002 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4998 } 5003 }
@@ -5000,7 +5005,7 @@ bnx2_init_chip(struct bnx2 *bp)
5000 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI) 5005 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5001 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM; 5006 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5002 5007
5003 REG_WR(bp, BNX2_HC_CONFIG, val); 5008 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5004 5009
5005 if (bp->rx_ticks < 25) 5010 if (bp->rx_ticks < 25)
5006 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1); 5011 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
@@ -5011,48 +5016,48 @@ bnx2_init_chip(struct bnx2 *bp)
5011 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) + 5016 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
5012 BNX2_HC_SB_CONFIG_1; 5017 BNX2_HC_SB_CONFIG_1;
5013 5018
5014 REG_WR(bp, base, 5019 BNX2_WR(bp, base,
5015 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE | 5020 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5016 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE | 5021 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
5017 BNX2_HC_SB_CONFIG_1_ONE_SHOT); 5022 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
5018 5023
5019 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF, 5024 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5020 (bp->tx_quick_cons_trip_int << 16) | 5025 (bp->tx_quick_cons_trip_int << 16) |
5021 bp->tx_quick_cons_trip); 5026 bp->tx_quick_cons_trip);
5022 5027
5023 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF, 5028 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5024 (bp->tx_ticks_int << 16) | bp->tx_ticks); 5029 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5025 5030
5026 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF, 5031 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5027 (bp->rx_quick_cons_trip_int << 16) | 5032 (bp->rx_quick_cons_trip_int << 16) |
5028 bp->rx_quick_cons_trip); 5033 bp->rx_quick_cons_trip);
5029 5034
5030 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF, 5035 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5031 (bp->rx_ticks_int << 16) | bp->rx_ticks); 5036 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5032 } 5037 }
5033 5038
5034 /* Clear internal stats counters. */ 5039 /* Clear internal stats counters. */
5035 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW); 5040 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5036 5041
5037 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS); 5042 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5038 5043
5039 /* Initialize the receive filter. */ 5044 /* Initialize the receive filter. */
5040 bnx2_set_rx_mode(bp->dev); 5045 bnx2_set_rx_mode(bp->dev);
5041 5046
5042 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 5047 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5043 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL); 5048 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5044 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE; 5049 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5045 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val); 5050 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5046 } 5051 }
5047 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET, 5052 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5048 1, 0); 5053 1, 0);
5049 5054
5050 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT); 5055 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5051 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS); 5056 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5052 5057
5053 udelay(20); 5058 udelay(20);
5054 5059
5055 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND); 5060 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5056 5061
5057 return rc; 5062 return rc;
5058} 5063}
@@ -5086,7 +5091,7 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5086 u32 val, offset0, offset1, offset2, offset3; 5091 u32 val, offset0, offset1, offset2, offset3;
5087 u32 cid_addr = GET_CID_ADDR(cid); 5092 u32 cid_addr = GET_CID_ADDR(cid);
5088 5093
5089 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 5094 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5090 offset0 = BNX2_L2CTX_TYPE_XI; 5095 offset0 = BNX2_L2CTX_TYPE_XI;
5091 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 5096 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5092 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 5097 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
@@ -5113,7 +5118,7 @@ bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5113static void 5118static void
5114bnx2_init_tx_ring(struct bnx2 *bp, int ring_num) 5119bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5115{ 5120{
5116 struct tx_bd *txbd; 5121 struct bnx2_tx_bd *txbd;
5117 u32 cid = TX_CID; 5122 u32 cid = TX_CID;
5118 struct bnx2_napi *bnapi; 5123 struct bnx2_napi *bnapi;
5119 struct bnx2_tx_ring_info *txr; 5124 struct bnx2_tx_ring_info *txr;
@@ -5128,7 +5133,7 @@ bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5128 5133
5129 bp->tx_wake_thresh = bp->tx_ring_size / 2; 5134 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5130 5135
5131 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT]; 5136 txbd = &txr->tx_desc_ring[BNX2_MAX_TX_DESC_CNT];
5132 5137
5133 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32; 5138 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5134 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff; 5139 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
@@ -5143,17 +5148,17 @@ bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5143} 5148}
5144 5149
5145static void 5150static void
5146bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size, 5151bnx2_init_rxbd_rings(struct bnx2_rx_bd *rx_ring[], dma_addr_t dma[],
5147 int num_rings) 5152 u32 buf_size, int num_rings)
5148{ 5153{
5149 int i; 5154 int i;
5150 struct rx_bd *rxbd; 5155 struct bnx2_rx_bd *rxbd;
5151 5156
5152 for (i = 0; i < num_rings; i++) { 5157 for (i = 0; i < num_rings; i++) {
5153 int j; 5158 int j;
5154 5159
5155 rxbd = &rx_ring[i][0]; 5160 rxbd = &rx_ring[i][0];
5156 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { 5161 for (j = 0; j < BNX2_MAX_RX_DESC_CNT; j++, rxbd++) {
5157 rxbd->rx_bd_len = buf_size; 5162 rxbd->rx_bd_len = buf_size;
5158 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 5163 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5159 } 5164 }
@@ -5187,9 +5192,9 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5187 5192
5188 bnx2_init_rx_context(bp, cid); 5193 bnx2_init_rx_context(bp, cid);
5189 5194
5190 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 5195 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5191 val = REG_RD(bp, BNX2_MQ_MAP_L2_5); 5196 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5192 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM); 5197 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5193 } 5198 }
5194 5199
5195 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0); 5200 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
@@ -5208,8 +5213,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5208 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff; 5213 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5209 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val); 5214 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5210 5215
5211 if (CHIP_NUM(bp) == CHIP_NUM_5709) 5216 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5212 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT); 5217 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5213 } 5218 }
5214 5219
5215 val = (u64) rxr->rx_desc_mapping[0] >> 32; 5220 val = (u64) rxr->rx_desc_mapping[0] >> 32;
@@ -5225,8 +5230,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5225 ring_num, i, bp->rx_pg_ring_size); 5230 ring_num, i, bp->rx_pg_ring_size);
5226 break; 5231 break;
5227 } 5232 }
5228 prod = NEXT_RX_BD(prod); 5233 prod = BNX2_NEXT_RX_BD(prod);
5229 ring_prod = RX_PG_RING_IDX(prod); 5234 ring_prod = BNX2_RX_PG_RING_IDX(prod);
5230 } 5235 }
5231 rxr->rx_pg_prod = prod; 5236 rxr->rx_pg_prod = prod;
5232 5237
@@ -5237,8 +5242,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5237 ring_num, i, bp->rx_ring_size); 5242 ring_num, i, bp->rx_ring_size);
5238 break; 5243 break;
5239 } 5244 }
5240 prod = NEXT_RX_BD(prod); 5245 prod = BNX2_NEXT_RX_BD(prod);
5241 ring_prod = RX_RING_IDX(prod); 5246 ring_prod = BNX2_RX_RING_IDX(prod);
5242 } 5247 }
5243 rxr->rx_prod = prod; 5248 rxr->rx_prod = prod;
5244 5249
@@ -5246,10 +5251,10 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5246 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ; 5251 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5247 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX; 5252 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5248 5253
5249 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod); 5254 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5250 REG_WR16(bp, rxr->rx_bidx_addr, prod); 5255 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5251 5256
5252 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq); 5257 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5253} 5258}
5254 5259
5255static void 5260static void
@@ -5260,15 +5265,15 @@ bnx2_init_all_rings(struct bnx2 *bp)
5260 5265
5261 bnx2_clear_ring_states(bp); 5266 bnx2_clear_ring_states(bp);
5262 5267
5263 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0); 5268 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5264 for (i = 0; i < bp->num_tx_rings; i++) 5269 for (i = 0; i < bp->num_tx_rings; i++)
5265 bnx2_init_tx_ring(bp, i); 5270 bnx2_init_tx_ring(bp, i);
5266 5271
5267 if (bp->num_tx_rings > 1) 5272 if (bp->num_tx_rings > 1)
5268 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) | 5273 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5269 (TX_TSS_CID << 7)); 5274 (TX_TSS_CID << 7));
5270 5275
5271 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0); 5276 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5272 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0); 5277 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5273 5278
5274 for (i = 0; i < bp->num_rx_rings; i++) 5279 for (i = 0; i < bp->num_rx_rings; i++)
@@ -5282,8 +5287,8 @@ bnx2_init_all_rings(struct bnx2 *bp)
5282 5287
5283 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift; 5288 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5284 if ((i % 8) == 7) { 5289 if ((i % 8) == 7) {
5285 REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32); 5290 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5286 REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) | 5291 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5287 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK | 5292 BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
5288 BNX2_RLUP_RSS_COMMAND_WRITE | 5293 BNX2_RLUP_RSS_COMMAND_WRITE |
5289 BNX2_RLUP_RSS_COMMAND_HASH_MASK); 5294 BNX2_RLUP_RSS_COMMAND_HASH_MASK);
@@ -5294,7 +5299,7 @@ bnx2_init_all_rings(struct bnx2 *bp)
5294 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI | 5299 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5295 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI; 5300 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5296 5301
5297 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val); 5302 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5298 5303
5299 } 5304 }
5300} 5305}
@@ -5303,8 +5308,8 @@ static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5303{ 5308{
5304 u32 max, num_rings = 1; 5309 u32 max, num_rings = 1;
5305 5310
5306 while (ring_size > MAX_RX_DESC_CNT) { 5311 while (ring_size > BNX2_MAX_RX_DESC_CNT) {
5307 ring_size -= MAX_RX_DESC_CNT; 5312 ring_size -= BNX2_MAX_RX_DESC_CNT;
5308 num_rings++; 5313 num_rings++;
5309 } 5314 }
5310 /* round to next power of 2 */ 5315 /* round to next power of 2 */
@@ -5337,13 +5342,14 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5337 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; 5342 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5338 5343
5339 jumbo_size = size * pages; 5344 jumbo_size = size * pages;
5340 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT) 5345 if (jumbo_size > BNX2_MAX_TOTAL_RX_PG_DESC_CNT)
5341 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT; 5346 jumbo_size = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
5342 5347
5343 bp->rx_pg_ring_size = jumbo_size; 5348 bp->rx_pg_ring_size = jumbo_size;
5344 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size, 5349 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5345 MAX_RX_PG_RINGS); 5350 BNX2_MAX_RX_PG_RINGS);
5346 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1; 5351 bp->rx_max_pg_ring_idx =
5352 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5347 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET; 5353 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5348 bp->rx_copy_thresh = 0; 5354 bp->rx_copy_thresh = 0;
5349 } 5355 }
@@ -5354,8 +5360,8 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5354 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5360 NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5355 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; 5361 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5356 bp->rx_ring_size = size; 5362 bp->rx_ring_size = size;
5357 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS); 5363 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5358 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; 5364 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5359} 5365}
5360 5366
5361static void 5367static void
@@ -5371,13 +5377,13 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5371 if (txr->tx_buf_ring == NULL) 5377 if (txr->tx_buf_ring == NULL)
5372 continue; 5378 continue;
5373 5379
5374 for (j = 0; j < TX_DESC_CNT; ) { 5380 for (j = 0; j < BNX2_TX_DESC_CNT; ) {
5375 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; 5381 struct bnx2_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5376 struct sk_buff *skb = tx_buf->skb; 5382 struct sk_buff *skb = tx_buf->skb;
5377 int k, last; 5383 int k, last;
5378 5384
5379 if (skb == NULL) { 5385 if (skb == NULL) {
5380 j = NEXT_TX_BD(j); 5386 j = BNX2_NEXT_TX_BD(j);
5381 continue; 5387 continue;
5382 } 5388 }
5383 5389
@@ -5389,9 +5395,9 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5389 tx_buf->skb = NULL; 5395 tx_buf->skb = NULL;
5390 5396
5391 last = tx_buf->nr_frags; 5397 last = tx_buf->nr_frags;
5392 j = NEXT_TX_BD(j); 5398 j = BNX2_NEXT_TX_BD(j);
5393 for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) { 5399 for (k = 0; k < last; k++, j = BNX2_NEXT_TX_BD(j)) {
5394 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5400 tx_buf = &txr->tx_buf_ring[BNX2_TX_RING_IDX(j)];
5395 dma_unmap_page(&bp->pdev->dev, 5401 dma_unmap_page(&bp->pdev->dev,
5396 dma_unmap_addr(tx_buf, mapping), 5402 dma_unmap_addr(tx_buf, mapping),
5397 skb_frag_size(&skb_shinfo(skb)->frags[k]), 5403 skb_frag_size(&skb_shinfo(skb)->frags[k]),
@@ -5417,7 +5423,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5417 return; 5423 return;
5418 5424
5419 for (j = 0; j < bp->rx_max_ring_idx; j++) { 5425 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5420 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j]; 5426 struct bnx2_sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5421 u8 *data = rx_buf->data; 5427 u8 *data = rx_buf->data;
5422 5428
5423 if (data == NULL) 5429 if (data == NULL)
@@ -5615,7 +5621,7 @@ bnx2_test_registers(struct bnx2 *bp)
5615 5621
5616 ret = 0; 5622 ret = 0;
5617 is_5709 = 0; 5623 is_5709 = 0;
5618 if (CHIP_NUM(bp) == CHIP_NUM_5709) 5624 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5619 is_5709 = 1; 5625 is_5709 = 1;
5620 5626
5621 for (i = 0; reg_tbl[i].offset != 0xffff; i++) { 5627 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
@@ -5714,7 +5720,7 @@ bnx2_test_memory(struct bnx2 *bp)
5714 }; 5720 };
5715 struct mem_entry *mem_tbl; 5721 struct mem_entry *mem_tbl;
5716 5722
5717 if (CHIP_NUM(bp) == CHIP_NUM_5709) 5723 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5718 mem_tbl = mem_tbl_5709; 5724 mem_tbl = mem_tbl_5709;
5719 else 5725 else
5720 mem_tbl = mem_tbl_5706; 5726 mem_tbl = mem_tbl_5706;
@@ -5741,8 +5747,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5741 unsigned char *packet; 5747 unsigned char *packet;
5742 u16 rx_start_idx, rx_idx; 5748 u16 rx_start_idx, rx_idx;
5743 dma_addr_t map; 5749 dma_addr_t map;
5744 struct tx_bd *txbd; 5750 struct bnx2_tx_bd *txbd;
5745 struct sw_bd *rx_buf; 5751 struct bnx2_sw_bd *rx_buf;
5746 struct l2_fhdr *rx_hdr; 5752 struct l2_fhdr *rx_hdr;
5747 int ret = -ENODEV; 5753 int ret = -ENODEV;
5748 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi; 5754 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
@@ -5784,17 +5790,17 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5784 return -EIO; 5790 return -EIO;
5785 } 5791 }
5786 5792
5787 REG_WR(bp, BNX2_HC_COMMAND, 5793 BNX2_WR(bp, BNX2_HC_COMMAND,
5788 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 5794 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5789 5795
5790 REG_RD(bp, BNX2_HC_COMMAND); 5796 BNX2_RD(bp, BNX2_HC_COMMAND);
5791 5797
5792 udelay(5); 5798 udelay(5);
5793 rx_start_idx = bnx2_get_hw_rx_cons(bnapi); 5799 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5794 5800
5795 num_pkts = 0; 5801 num_pkts = 0;
5796 5802
5797 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)]; 5803 txbd = &txr->tx_desc_ring[BNX2_TX_RING_IDX(txr->tx_prod)];
5798 5804
5799 txbd->tx_bd_haddr_hi = (u64) map >> 32; 5805 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5800 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; 5806 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
@@ -5802,18 +5808,18 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5802 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END; 5808 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5803 5809
5804 num_pkts++; 5810 num_pkts++;
5805 txr->tx_prod = NEXT_TX_BD(txr->tx_prod); 5811 txr->tx_prod = BNX2_NEXT_TX_BD(txr->tx_prod);
5806 txr->tx_prod_bseq += pkt_size; 5812 txr->tx_prod_bseq += pkt_size;
5807 5813
5808 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod); 5814 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5809 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); 5815 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5810 5816
5811 udelay(100); 5817 udelay(100);
5812 5818
5813 REG_WR(bp, BNX2_HC_COMMAND, 5819 BNX2_WR(bp, BNX2_HC_COMMAND,
5814 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 5820 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5815 5821
5816 REG_RD(bp, BNX2_HC_COMMAND); 5822 BNX2_RD(bp, BNX2_HC_COMMAND);
5817 5823
5818 udelay(5); 5824 udelay(5);
5819 5825
@@ -5962,14 +5968,14 @@ bnx2_test_intr(struct bnx2 *bp)
5962 if (!netif_running(bp->dev)) 5968 if (!netif_running(bp->dev))
5963 return -ENODEV; 5969 return -ENODEV;
5964 5970
5965 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff; 5971 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5966 5972
5967 /* This register is not touched during run-time. */ 5973 /* This register is not touched during run-time. */
5968 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); 5974 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5969 REG_RD(bp, BNX2_HC_COMMAND); 5975 BNX2_RD(bp, BNX2_HC_COMMAND);
5970 5976
5971 for (i = 0; i < 10; i++) { 5977 for (i = 0; i < 10; i++) {
5972 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) != 5978 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5973 status_idx) { 5979 status_idx) {
5974 5980
5975 break; 5981 break;
@@ -6132,11 +6138,11 @@ bnx2_timer(unsigned long data)
6132 6138
6133 /* workaround occasional corrupted counters */ 6139 /* workaround occasional corrupted counters */
6134 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks) 6140 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6135 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | 6141 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6136 BNX2_HC_COMMAND_STATS_NOW); 6142 BNX2_HC_COMMAND_STATS_NOW);
6137 6143
6138 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) { 6144 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6139 if (CHIP_NUM(bp) == CHIP_NUM_5706) 6145 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6140 bnx2_5706_serdes_timer(bp); 6146 bnx2_5706_serdes_timer(bp);
6141 else 6147 else
6142 bnx2_5708_serdes_timer(bp); 6148 bnx2_5708_serdes_timer(bp);
@@ -6205,13 +6211,13 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6205 const int len = sizeof(bp->irq_tbl[0].name); 6211 const int len = sizeof(bp->irq_tbl[0].name);
6206 6212
6207 bnx2_setup_msix_tbl(bp); 6213 bnx2_setup_msix_tbl(bp);
6208 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1); 6214 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6209 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE); 6215 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6210 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE); 6216 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6211 6217
6212 /* Need to flush the previous three writes to ensure MSI-X 6218 /* Need to flush the previous three writes to ensure MSI-X
6213 * is setup properly */ 6219 * is setup properly */
6214 REG_RD(bp, BNX2_PCI_MSIX_CONTROL); 6220 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6215 6221
6216 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) { 6222 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6217 msix_ent[i].entry = i; 6223 msix_ent[i].entry = i;
@@ -6274,7 +6280,7 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6274 !(bp->flags & BNX2_FLAG_USING_MSIX)) { 6280 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6275 if (pci_enable_msi(bp->pdev) == 0) { 6281 if (pci_enable_msi(bp->pdev) == 0) {
6276 bp->flags |= BNX2_FLAG_USING_MSI; 6282 bp->flags |= BNX2_FLAG_USING_MSI;
6277 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 6283 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6278 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI; 6284 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6279 bp->irq_tbl[0].handler = bnx2_msi_1shot; 6285 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6280 } else 6286 } else
@@ -6464,22 +6470,22 @@ bnx2_dump_ftq(struct bnx2 *bp)
6464 netdev_err(dev, "<--- end FTQ dump --->\n"); 6470 netdev_err(dev, "<--- end FTQ dump --->\n");
6465 netdev_err(dev, "<--- start TBDC dump --->\n"); 6471 netdev_err(dev, "<--- start TBDC dump --->\n");
6466 netdev_err(dev, "TBDC free cnt: %ld\n", 6472 netdev_err(dev, "TBDC free cnt: %ld\n",
6467 REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT); 6473 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6468 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n"); 6474 netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
6469 for (i = 0; i < 0x20; i++) { 6475 for (i = 0; i < 0x20; i++) {
6470 int j = 0; 6476 int j = 0;
6471 6477
6472 REG_WR(bp, BNX2_TBDC_BD_ADDR, i); 6478 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6473 REG_WR(bp, BNX2_TBDC_CAM_OPCODE, 6479 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6474 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ); 6480 BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
6475 REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB); 6481 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6476 while ((REG_RD(bp, BNX2_TBDC_COMMAND) & 6482 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6477 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100) 6483 BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
6478 j++; 6484 j++;
6479 6485
6480 cid = REG_RD(bp, BNX2_TBDC_CID); 6486 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6481 bdidx = REG_RD(bp, BNX2_TBDC_BIDX); 6487 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6482 valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE); 6488 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6483 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n", 6489 netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
6484 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX, 6490 i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
6485 bdidx >> 24, (valid >> 8) & 0x0ff); 6491 bdidx >> 24, (valid >> 8) & 0x0ff);
@@ -6500,15 +6506,15 @@ bnx2_dump_state(struct bnx2 *bp)
6500 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2); 6506 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6501 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2); 6507 netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6502 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n", 6508 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6503 REG_RD(bp, BNX2_EMAC_TX_STATUS), 6509 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6504 REG_RD(bp, BNX2_EMAC_RX_STATUS)); 6510 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6505 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n", 6511 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6506 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); 6512 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6507 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n", 6513 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6508 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); 6514 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6509 if (bp->flags & BNX2_FLAG_USING_MSIX) 6515 if (bp->flags & BNX2_FLAG_USING_MSIX)
6510 netdev_err(dev, "DEBUG: PBA[%08x]\n", 6516 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6511 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE)); 6517 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6512} 6518}
6513 6519
6514static void 6520static void
@@ -6533,8 +6539,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6533{ 6539{
6534 struct bnx2 *bp = netdev_priv(dev); 6540 struct bnx2 *bp = netdev_priv(dev);
6535 dma_addr_t mapping; 6541 dma_addr_t mapping;
6536 struct tx_bd *txbd; 6542 struct bnx2_tx_bd *txbd;
6537 struct sw_tx_bd *tx_buf; 6543 struct bnx2_sw_tx_bd *tx_buf;
6538 u32 len, vlan_tag_flags, last_frag, mss; 6544 u32 len, vlan_tag_flags, last_frag, mss;
6539 u16 prod, ring_prod; 6545 u16 prod, ring_prod;
6540 int i; 6546 int i;
@@ -6557,7 +6563,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6557 } 6563 }
6558 len = skb_headlen(skb); 6564 len = skb_headlen(skb);
6559 prod = txr->tx_prod; 6565 prod = txr->tx_prod;
6560 ring_prod = TX_RING_IDX(prod); 6566 ring_prod = BNX2_TX_RING_IDX(prod);
6561 6567
6562 vlan_tag_flags = 0; 6568 vlan_tag_flags = 0;
6563 if (skb->ip_summed == CHECKSUM_PARTIAL) { 6569 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -6627,8 +6633,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6627 for (i = 0; i < last_frag; i++) { 6633 for (i = 0; i < last_frag; i++) {
6628 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6634 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6629 6635
6630 prod = NEXT_TX_BD(prod); 6636 prod = BNX2_NEXT_TX_BD(prod);
6631 ring_prod = TX_RING_IDX(prod); 6637 ring_prod = BNX2_TX_RING_IDX(prod);
6632 txbd = &txr->tx_desc_ring[ring_prod]; 6638 txbd = &txr->tx_desc_ring[ring_prod];
6633 6639
6634 len = skb_frag_size(frag); 6640 len = skb_frag_size(frag);
@@ -6652,11 +6658,11 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6652 6658
6653 netdev_tx_sent_queue(txq, skb->len); 6659 netdev_tx_sent_queue(txq, skb->len);
6654 6660
6655 prod = NEXT_TX_BD(prod); 6661 prod = BNX2_NEXT_TX_BD(prod);
6656 txr->tx_prod_bseq += skb->len; 6662 txr->tx_prod_bseq += skb->len;
6657 6663
6658 REG_WR16(bp, txr->tx_bidx_addr, prod); 6664 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6659 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq); 6665 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6660 6666
6661 mmiowb(); 6667 mmiowb();
6662 6668
@@ -6682,7 +6688,7 @@ dma_error:
6682 6688
6683 /* start back at beginning and unmap skb */ 6689 /* start back at beginning and unmap skb */
6684 prod = txr->tx_prod; 6690 prod = txr->tx_prod;
6685 ring_prod = TX_RING_IDX(prod); 6691 ring_prod = BNX2_TX_RING_IDX(prod);
6686 tx_buf = &txr->tx_buf_ring[ring_prod]; 6692 tx_buf = &txr->tx_buf_ring[ring_prod];
6687 tx_buf->skb = NULL; 6693 tx_buf->skb = NULL;
6688 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), 6694 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
@@ -6690,8 +6696,8 @@ dma_error:
6690 6696
6691 /* unmap remaining mapped pages */ 6697 /* unmap remaining mapped pages */
6692 for (i = 0; i < last_frag; i++) { 6698 for (i = 0; i < last_frag; i++) {
6693 prod = NEXT_TX_BD(prod); 6699 prod = BNX2_NEXT_TX_BD(prod);
6694 ring_prod = TX_RING_IDX(prod); 6700 ring_prod = BNX2_TX_RING_IDX(prod);
6695 tx_buf = &txr->tx_buf_ring[ring_prod]; 6701 tx_buf = &txr->tx_buf_ring[ring_prod];
6696 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), 6702 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6697 skb_frag_size(&skb_shinfo(skb)->frags[i]), 6703 skb_frag_size(&skb_shinfo(skb)->frags[i]),
@@ -6810,8 +6816,8 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6810 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) + 6816 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6811 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions); 6817 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6812 6818
6813 if ((CHIP_NUM(bp) == CHIP_NUM_5706) || 6819 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6814 (CHIP_ID(bp) == CHIP_ID_5708_A0)) 6820 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6815 net_stats->tx_carrier_errors = 0; 6821 net_stats->tx_carrier_errors = 0;
6816 else { 6822 else {
6817 net_stats->tx_carrier_errors = 6823 net_stats->tx_carrier_errors =
@@ -7030,7 +7036,7 @@ bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
7030 offset = reg_boundaries[0]; 7036 offset = reg_boundaries[0];
7031 p += offset; 7037 p += offset;
7032 while (offset < BNX2_REGDUMP_LEN) { 7038 while (offset < BNX2_REGDUMP_LEN) {
7033 *p++ = REG_RD(bp, offset); 7039 *p++ = BNX2_RD(bp, offset);
7034 offset += 4; 7040 offset += 4;
7035 if (offset == reg_boundaries[i + 1]) { 7041 if (offset == reg_boundaries[i + 1]) {
7036 offset = reg_boundaries[i + 2]; 7042 offset = reg_boundaries[i + 2];
@@ -7254,13 +7260,13 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7254{ 7260{
7255 struct bnx2 *bp = netdev_priv(dev); 7261 struct bnx2 *bp = netdev_priv(dev);
7256 7262
7257 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; 7263 ering->rx_max_pending = BNX2_MAX_TOTAL_RX_DESC_CNT;
7258 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT; 7264 ering->rx_jumbo_max_pending = BNX2_MAX_TOTAL_RX_PG_DESC_CNT;
7259 7265
7260 ering->rx_pending = bp->rx_ring_size; 7266 ering->rx_pending = bp->rx_ring_size;
7261 ering->rx_jumbo_pending = bp->rx_pg_ring_size; 7267 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7262 7268
7263 ering->tx_max_pending = MAX_TX_DESC_CNT; 7269 ering->tx_max_pending = BNX2_MAX_TX_DESC_CNT;
7264 ering->tx_pending = bp->tx_ring_size; 7270 ering->tx_pending = bp->tx_ring_size;
7265} 7271}
7266 7272
@@ -7326,8 +7332,8 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7326 struct bnx2 *bp = netdev_priv(dev); 7332 struct bnx2 *bp = netdev_priv(dev);
7327 int rc; 7333 int rc;
7328 7334
7329 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || 7335 if ((ering->rx_pending > BNX2_MAX_TOTAL_RX_DESC_CNT) ||
7330 (ering->tx_pending > MAX_TX_DESC_CNT) || 7336 (ering->tx_pending > BNX2_MAX_TX_DESC_CNT) ||
7331 (ering->tx_pending <= MAX_SKB_FRAGS)) { 7337 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7332 7338
7333 return -EINVAL; 7339 return -EINVAL;
@@ -7614,10 +7620,10 @@ bnx2_get_ethtool_stats(struct net_device *dev,
7614 return; 7620 return;
7615 } 7621 }
7616 7622
7617 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) || 7623 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7618 (CHIP_ID(bp) == CHIP_ID_5706_A1) || 7624 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7619 (CHIP_ID(bp) == CHIP_ID_5706_A2) || 7625 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7620 (CHIP_ID(bp) == CHIP_ID_5708_A0)) 7626 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7621 stats_len_arr = bnx2_5706_stats_len_arr; 7627 stats_len_arr = bnx2_5706_stats_len_arr;
7622 else 7628 else
7623 stats_len_arr = bnx2_5708_stats_len_arr; 7629 stats_len_arr = bnx2_5708_stats_len_arr;
@@ -7655,26 +7661,26 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
7655 case ETHTOOL_ID_ACTIVE: 7661 case ETHTOOL_ID_ACTIVE:
7656 bnx2_set_power_state(bp, PCI_D0); 7662 bnx2_set_power_state(bp, PCI_D0);
7657 7663
7658 bp->leds_save = REG_RD(bp, BNX2_MISC_CFG); 7664 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7659 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC); 7665 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7660 return 1; /* cycle on/off once per second */ 7666 return 1; /* cycle on/off once per second */
7661 7667
7662 case ETHTOOL_ID_ON: 7668 case ETHTOOL_ID_ON:
7663 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE | 7669 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7664 BNX2_EMAC_LED_1000MB_OVERRIDE | 7670 BNX2_EMAC_LED_1000MB_OVERRIDE |
7665 BNX2_EMAC_LED_100MB_OVERRIDE | 7671 BNX2_EMAC_LED_100MB_OVERRIDE |
7666 BNX2_EMAC_LED_10MB_OVERRIDE | 7672 BNX2_EMAC_LED_10MB_OVERRIDE |
7667 BNX2_EMAC_LED_TRAFFIC_OVERRIDE | 7673 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7668 BNX2_EMAC_LED_TRAFFIC); 7674 BNX2_EMAC_LED_TRAFFIC);
7669 break; 7675 break;
7670 7676
7671 case ETHTOOL_ID_OFF: 7677 case ETHTOOL_ID_OFF:
7672 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE); 7678 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7673 break; 7679 break;
7674 7680
7675 case ETHTOOL_ID_INACTIVE: 7681 case ETHTOOL_ID_INACTIVE:
7676 REG_WR(bp, BNX2_EMAC_LED, 0); 7682 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7677 REG_WR(bp, BNX2_MISC_CFG, bp->leds_save); 7683 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7678 7684
7679 if (!netif_running(dev)) 7685 if (!netif_running(dev))
7680 bnx2_set_power_state(bp, PCI_D3hot); 7686 bnx2_set_power_state(bp, PCI_D3hot);
@@ -7896,10 +7902,10 @@ poll_bnx2(struct net_device *dev)
7896} 7902}
7897#endif 7903#endif
7898 7904
7899static void __devinit 7905static void
7900bnx2_get_5709_media(struct bnx2 *bp) 7906bnx2_get_5709_media(struct bnx2 *bp)
7901{ 7907{
7902 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL); 7908 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7903 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID; 7909 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7904 u32 strap; 7910 u32 strap;
7905 7911
@@ -7934,18 +7940,18 @@ bnx2_get_5709_media(struct bnx2 *bp)
7934 } 7940 }
7935} 7941}
7936 7942
7937static void __devinit 7943static void
7938bnx2_get_pci_speed(struct bnx2 *bp) 7944bnx2_get_pci_speed(struct bnx2 *bp)
7939{ 7945{
7940 u32 reg; 7946 u32 reg;
7941 7947
7942 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS); 7948 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7943 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) { 7949 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7944 u32 clkreg; 7950 u32 clkreg;
7945 7951
7946 bp->flags |= BNX2_FLAG_PCIX; 7952 bp->flags |= BNX2_FLAG_PCIX;
7947 7953
7948 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS); 7954 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7949 7955
7950 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET; 7956 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7951 switch (clkreg) { 7957 switch (clkreg) {
@@ -7986,7 +7992,7 @@ bnx2_get_pci_speed(struct bnx2 *bp)
7986 7992
7987} 7993}
7988 7994
7989static void __devinit 7995static void
7990bnx2_read_vpd_fw_ver(struct bnx2 *bp) 7996bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7991{ 7997{
7992 int rc, i, j; 7998 int rc, i, j;
@@ -8054,7 +8060,7 @@ vpd_done:
8054 kfree(data); 8060 kfree(data);
8055} 8061}
8056 8062
8057static int __devinit 8063static int
8058bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) 8064bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8059{ 8065{
8060 struct bnx2 *bp; 8066 struct bnx2 *bp;
@@ -8131,20 +8137,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8131 * Rely on CPU to do target byte swapping on big endian systems 8137 * Rely on CPU to do target byte swapping on big endian systems
8132 * The chip's target access swapping will not swap all accesses 8138 * The chip's target access swapping will not swap all accesses
8133 */ 8139 */
8134 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, 8140 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8135 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 8141 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
8136 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 8142 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
8137 8143
8138 bp->chip_id = REG_RD(bp, BNX2_MISC_ID); 8144 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8139 8145
8140 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8146 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8141 if (!pci_is_pcie(pdev)) { 8147 if (!pci_is_pcie(pdev)) {
8142 dev_err(&pdev->dev, "Not PCIE, aborting\n"); 8148 dev_err(&pdev->dev, "Not PCIE, aborting\n");
8143 rc = -EIO; 8149 rc = -EIO;
8144 goto err_out_unmap; 8150 goto err_out_unmap;
8145 } 8151 }
8146 bp->flags |= BNX2_FLAG_PCIE; 8152 bp->flags |= BNX2_FLAG_PCIE;
8147 if (CHIP_REV(bp) == CHIP_REV_Ax) 8153 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8148 bp->flags |= BNX2_FLAG_JUMBO_BROKEN; 8154 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8149 8155
8150 /* AER (Advanced Error Reporting) hooks */ 8156 /* AER (Advanced Error Reporting) hooks */
@@ -8163,18 +8169,20 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8163 bp->flags |= BNX2_FLAG_BROKEN_STATS; 8169 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8164 } 8170 }
8165 8171
8166 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) { 8172 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8173 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8167 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) 8174 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
8168 bp->flags |= BNX2_FLAG_MSIX_CAP; 8175 bp->flags |= BNX2_FLAG_MSIX_CAP;
8169 } 8176 }
8170 8177
8171 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) { 8178 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8179 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8172 if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) 8180 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8173 bp->flags |= BNX2_FLAG_MSI_CAP; 8181 bp->flags |= BNX2_FLAG_MSI_CAP;
8174 } 8182 }
8175 8183
8176 /* 5708 cannot support DMA addresses > 40-bit. */ 8184 /* 5708 cannot support DMA addresses > 40-bit. */
8177 if (CHIP_NUM(bp) == CHIP_NUM_5708) 8185 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8178 persist_dma_mask = dma_mask = DMA_BIT_MASK(40); 8186 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8179 else 8187 else
8180 persist_dma_mask = dma_mask = DMA_BIT_MASK(64); 8188 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
@@ -8197,12 +8205,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8197 bnx2_get_pci_speed(bp); 8205 bnx2_get_pci_speed(bp);
8198 8206
8199 /* 5706A0 may falsely detect SERR and PERR. */ 8207 /* 5706A0 may falsely detect SERR and PERR. */
8200 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 8208 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8201 reg = REG_RD(bp, PCI_COMMAND); 8209 reg = BNX2_RD(bp, PCI_COMMAND);
8202 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); 8210 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8203 REG_WR(bp, PCI_COMMAND, reg); 8211 BNX2_WR(bp, PCI_COMMAND, reg);
8204 } 8212 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8205 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8206 !(bp->flags & BNX2_FLAG_PCIX)) { 8213 !(bp->flags & BNX2_FLAG_PCIX)) {
8207 8214
8208 dev_err(&pdev->dev, 8215 dev_err(&pdev->dev,
@@ -8299,7 +8306,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8299 bp->mac_addr[4] = (u8) (reg >> 8); 8306 bp->mac_addr[4] = (u8) (reg >> 8);
8300 bp->mac_addr[5] = (u8) reg; 8307 bp->mac_addr[5] = (u8) reg;
8301 8308
8302 bp->tx_ring_size = MAX_TX_DESC_CNT; 8309 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8303 bnx2_set_rx_ring_size(bp, 255); 8310 bnx2_set_rx_ring_size(bp, 255);
8304 8311
8305 bp->tx_quick_cons_trip_int = 2; 8312 bp->tx_quick_cons_trip_int = 2;
@@ -8319,9 +8326,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8319 bp->phy_addr = 1; 8326 bp->phy_addr = 1;
8320 8327
8321 /* Disable WOL support if we are running on a SERDES chip. */ 8328 /* Disable WOL support if we are running on a SERDES chip. */
8322 if (CHIP_NUM(bp) == CHIP_NUM_5709) 8329 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8323 bnx2_get_5709_media(bp); 8330 bnx2_get_5709_media(bp);
8324 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) 8331 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8325 bp->phy_flags |= BNX2_PHY_FLAG_SERDES; 8332 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8326 8333
8327 bp->phy_port = PORT_TP; 8334 bp->phy_port = PORT_TP;
@@ -8332,7 +8339,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8332 bp->flags |= BNX2_FLAG_NO_WOL; 8339 bp->flags |= BNX2_FLAG_NO_WOL;
8333 bp->wol = 0; 8340 bp->wol = 0;
8334 } 8341 }
8335 if (CHIP_NUM(bp) == CHIP_NUM_5706) { 8342 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8336 /* Don't do parallel detect on this board because of 8343 /* Don't do parallel detect on this board because of
8337 * some board problems. The link will not go down 8344 * some board problems. The link will not go down
8338 * if we do parallel detect. 8345 * if we do parallel detect.
@@ -8345,25 +8352,25 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8345 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G) 8352 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8346 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE; 8353 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8347 } 8354 }
8348 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 || 8355 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8349 CHIP_NUM(bp) == CHIP_NUM_5708) 8356 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8350 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX; 8357 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8351 else if (CHIP_NUM(bp) == CHIP_NUM_5709 && 8358 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8352 (CHIP_REV(bp) == CHIP_REV_Ax || 8359 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8353 CHIP_REV(bp) == CHIP_REV_Bx)) 8360 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8354 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC; 8361 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8355 8362
8356 bnx2_init_fw_cap(bp); 8363 bnx2_init_fw_cap(bp);
8357 8364
8358 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) || 8365 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8359 (CHIP_ID(bp) == CHIP_ID_5708_B0) || 8366 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8360 (CHIP_ID(bp) == CHIP_ID_5708_B1) || 8367 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8361 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) { 8368 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8362 bp->flags |= BNX2_FLAG_NO_WOL; 8369 bp->flags |= BNX2_FLAG_NO_WOL;
8363 bp->wol = 0; 8370 bp->wol = 0;
8364 } 8371 }
8365 8372
8366 if (CHIP_ID(bp) == CHIP_ID_5706_A0) { 8373 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8367 bp->tx_quick_cons_trip_int = 8374 bp->tx_quick_cons_trip_int =
8368 bp->tx_quick_cons_trip; 8375 bp->tx_quick_cons_trip;
8369 bp->tx_ticks_int = bp->tx_ticks; 8376 bp->tx_ticks_int = bp->tx_ticks;
@@ -8385,7 +8392,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8385 * AMD believes this incompatibility is unique to the 5706, and 8392 * AMD believes this incompatibility is unique to the 5706, and
8386 * prefers to locally disable MSI rather than globally disabling it. 8393 * prefers to locally disable MSI rather than globally disabling it.
8387 */ 8394 */
8388 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) { 8395 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8389 struct pci_dev *amd_8132 = NULL; 8396 struct pci_dev *amd_8132 = NULL;
8390 8397
8391 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD, 8398 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
@@ -8414,6 +8421,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8414 bp->cnic_eth_dev.max_iscsi_conn = 8421 bp->cnic_eth_dev.max_iscsi_conn =
8415 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) & 8422 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8416 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT; 8423 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
8424 bp->cnic_probe = bnx2_cnic_probe;
8417#endif 8425#endif
8418 pci_save_state(pdev); 8426 pci_save_state(pdev);
8419 8427
@@ -8439,7 +8447,7 @@ err_out:
8439 return rc; 8447 return rc;
8440} 8448}
8441 8449
8442static char * __devinit 8450static char *
8443bnx2_bus_string(struct bnx2 *bp, char *str) 8451bnx2_bus_string(struct bnx2 *bp, char *str)
8444{ 8452{
8445 char *s = str; 8453 char *s = str;
@@ -8505,7 +8513,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
8505#endif 8513#endif
8506}; 8514};
8507 8515
8508static int __devinit 8516static int
8509bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8517bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8510{ 8518{
8511 static int version_printed = 0; 8519 static int version_printed = 0;
@@ -8541,7 +8549,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8541 NETIF_F_TSO | NETIF_F_TSO_ECN | 8549 NETIF_F_TSO | NETIF_F_TSO_ECN |
8542 NETIF_F_RXHASH | NETIF_F_RXCSUM; 8550 NETIF_F_RXHASH | NETIF_F_RXCSUM;
8543 8551
8544 if (CHIP_NUM(bp) == CHIP_NUM_5709) 8552 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8545 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; 8553 dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
8546 8554
8547 dev->vlan_features = dev->hw_features; 8555 dev->vlan_features = dev->hw_features;
@@ -8556,8 +8564,8 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8556 8564
8557 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, " 8565 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8558 "node addr %pM\n", board_info[ent->driver_data].name, 8566 "node addr %pM\n", board_info[ent->driver_data].name,
8559 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 8567 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8560 ((CHIP_ID(bp) & 0x0ff0) >> 4), 8568 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8561 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0), 8569 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8562 pdev->irq, dev->dev_addr); 8570 pdev->irq, dev->dev_addr);
8563 8571
@@ -8573,7 +8581,7 @@ err_free:
8573 return rc; 8581 return rc;
8574} 8582}
8575 8583
8576static void __devexit 8584static void
8577bnx2_remove_one(struct pci_dev *pdev) 8585bnx2_remove_one(struct pci_dev *pdev)
8578{ 8586{
8579 struct net_device *dev = pci_get_drvdata(pdev); 8587 struct net_device *dev = pci_get_drvdata(pdev);
@@ -8752,7 +8760,7 @@ static struct pci_driver bnx2_pci_driver = {
8752 .name = DRV_MODULE_NAME, 8760 .name = DRV_MODULE_NAME,
8753 .id_table = bnx2_pci_tbl, 8761 .id_table = bnx2_pci_tbl,
8754 .probe = bnx2_init_one, 8762 .probe = bnx2_init_one,
8755 .remove = __devexit_p(bnx2_remove_one), 8763 .remove = bnx2_remove_one,
8756 .suspend = bnx2_suspend, 8764 .suspend = bnx2_suspend,
8757 .resume = bnx2_resume, 8765 .resume = bnx2_resume,
8758 .err_handler = &bnx2_err_handler, 8766 .err_handler = &bnx2_err_handler,
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index af6451dec295..172efbecfea2 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -20,7 +20,7 @@
20/* 20/*
21 * tx_bd definition 21 * tx_bd definition
22 */ 22 */
23struct tx_bd { 23struct bnx2_tx_bd {
24 u32 tx_bd_haddr_hi; 24 u32 tx_bd_haddr_hi;
25 u32 tx_bd_haddr_lo; 25 u32 tx_bd_haddr_lo;
26 u32 tx_bd_mss_nbytes; 26 u32 tx_bd_mss_nbytes;
@@ -48,7 +48,7 @@ struct tx_bd {
48/* 48/*
49 * rx_bd definition 49 * rx_bd definition
50 */ 50 */
51struct rx_bd { 51struct bnx2_rx_bd {
52 u32 rx_bd_haddr_hi; 52 u32 rx_bd_haddr_hi;
53 u32 rx_bd_haddr_lo; 53 u32 rx_bd_haddr_lo;
54 u32 rx_bd_len; 54 u32 rx_bd_len;
@@ -6538,37 +6538,38 @@ struct l2_fhdr {
6538 6538
6539/* Use CPU native page size up to 16K for the ring sizes. */ 6539/* Use CPU native page size up to 16K for the ring sizes. */
6540#if (PAGE_SHIFT > 14) 6540#if (PAGE_SHIFT > 14)
6541#define BCM_PAGE_BITS 14 6541#define BNX2_PAGE_BITS 14
6542#else 6542#else
6543#define BCM_PAGE_BITS PAGE_SHIFT 6543#define BNX2_PAGE_BITS PAGE_SHIFT
6544#endif 6544#endif
6545#define BCM_PAGE_SIZE (1 << BCM_PAGE_BITS) 6545#define BNX2_PAGE_SIZE (1 << BNX2_PAGE_BITS)
6546 6546
6547#define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) 6547#define BNX2_TX_DESC_CNT (BNX2_PAGE_SIZE / sizeof(struct bnx2_tx_bd))
6548#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) 6548#define BNX2_MAX_TX_DESC_CNT (BNX2_TX_DESC_CNT - 1)
6549 6549
6550#define MAX_RX_RINGS 8 6550#define BNX2_MAX_RX_RINGS 8
6551#define MAX_RX_PG_RINGS 32 6551#define BNX2_MAX_RX_PG_RINGS 32
6552#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) 6552#define BNX2_RX_DESC_CNT (BNX2_PAGE_SIZE / sizeof(struct bnx2_rx_bd))
6553#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) 6553#define BNX2_MAX_RX_DESC_CNT (BNX2_RX_DESC_CNT - 1)
6554#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS) 6554#define BNX2_MAX_TOTAL_RX_DESC_CNT (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_RINGS)
6555#define MAX_TOTAL_RX_PG_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_PG_RINGS) 6555#define BNX2_MAX_TOTAL_RX_PG_DESC_CNT \
6556 (BNX2_MAX_RX_DESC_CNT * BNX2_MAX_RX_PG_RINGS)
6556 6557
6557#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ 6558#define BNX2_NEXT_TX_BD(x) (((x) & (BNX2_MAX_TX_DESC_CNT - 1)) == \
6558 (MAX_TX_DESC_CNT - 1)) ? \ 6559 (BNX2_MAX_TX_DESC_CNT - 1)) ? \
6559 (x) + 2 : (x) + 1 6560 (x) + 2 : (x) + 1
6560 6561
6561#define TX_RING_IDX(x) ((x) & MAX_TX_DESC_CNT) 6562#define BNX2_TX_RING_IDX(x) ((x) & BNX2_MAX_TX_DESC_CNT)
6562 6563
6563#define NEXT_RX_BD(x) (((x) & (MAX_RX_DESC_CNT - 1)) == \ 6564#define BNX2_NEXT_RX_BD(x) (((x) & (BNX2_MAX_RX_DESC_CNT - 1)) == \
6564 (MAX_RX_DESC_CNT - 1)) ? \ 6565 (BNX2_MAX_RX_DESC_CNT - 1)) ? \
6565 (x) + 2 : (x) + 1 6566 (x) + 2 : (x) + 1
6566 6567
6567#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx) 6568#define BNX2_RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
6568#define RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx) 6569#define BNX2_RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx)
6569 6570
6570#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> (BCM_PAGE_BITS - 4)) 6571#define BNX2_RX_RING(x) (((x) & ~BNX2_MAX_RX_DESC_CNT) >> (BNX2_PAGE_BITS - 4))
6571#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT) 6572#define BNX2_RX_IDX(x) ((x) & BNX2_MAX_RX_DESC_CNT)
6572 6573
6573/* Context size. */ 6574/* Context size. */
6574#define CTX_SHIFT 7 6575#define CTX_SHIFT 7
@@ -6609,7 +6610,7 @@ struct l2_fhdr {
6609 * RX ring buffer contains pointer to kmalloc() data only, 6610 * RX ring buffer contains pointer to kmalloc() data only,
6610 * skb are built only after Hardware filled the frame. 6611 * skb are built only after Hardware filled the frame.
6611 */ 6612 */
6612struct sw_bd { 6613struct bnx2_sw_bd {
6613 u8 *data; 6614 u8 *data;
6614 DEFINE_DMA_UNMAP_ADDR(mapping); 6615 DEFINE_DMA_UNMAP_ADDR(mapping);
6615}; 6616};
@@ -6623,23 +6624,23 @@ static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
6623} 6624}
6624 6625
6625 6626
6626struct sw_pg { 6627struct bnx2_sw_pg {
6627 struct page *page; 6628 struct page *page;
6628 DEFINE_DMA_UNMAP_ADDR(mapping); 6629 DEFINE_DMA_UNMAP_ADDR(mapping);
6629}; 6630};
6630 6631
6631struct sw_tx_bd { 6632struct bnx2_sw_tx_bd {
6632 struct sk_buff *skb; 6633 struct sk_buff *skb;
6633 DEFINE_DMA_UNMAP_ADDR(mapping); 6634 DEFINE_DMA_UNMAP_ADDR(mapping);
6634 unsigned short is_gso; 6635 unsigned short is_gso;
6635 unsigned short nr_frags; 6636 unsigned short nr_frags;
6636}; 6637};
6637 6638
6638#define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT) 6639#define SW_RXBD_RING_SIZE (sizeof(struct bnx2_sw_bd) * BNX2_RX_DESC_CNT)
6639#define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT) 6640#define SW_RXPG_RING_SIZE (sizeof(struct bnx2_sw_pg) * BNX2_RX_DESC_CNT)
6640#define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) 6641#define RXBD_RING_SIZE (sizeof(struct bnx2_rx_bd) * BNX2_RX_DESC_CNT)
6641#define SW_TXBD_RING_SIZE (sizeof(struct sw_tx_bd) * TX_DESC_CNT) 6642#define SW_TXBD_RING_SIZE (sizeof(struct bnx2_sw_tx_bd) * BNX2_TX_DESC_CNT)
6642#define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) 6643#define TXBD_RING_SIZE (sizeof(struct bnx2_tx_bd) * BNX2_TX_DESC_CNT)
6643 6644
6644/* Buffered flash (Atmel: AT45DB011B) specific information */ 6645/* Buffered flash (Atmel: AT45DB011B) specific information */
6645#define SEEPROM_PAGE_BITS 2 6646#define SEEPROM_PAGE_BITS 2
@@ -6720,8 +6721,8 @@ struct bnx2_tx_ring_info {
6720 u32 tx_bidx_addr; 6721 u32 tx_bidx_addr;
6721 u32 tx_bseq_addr; 6722 u32 tx_bseq_addr;
6722 6723
6723 struct tx_bd *tx_desc_ring; 6724 struct bnx2_tx_bd *tx_desc_ring;
6724 struct sw_tx_bd *tx_buf_ring; 6725 struct bnx2_sw_tx_bd *tx_buf_ring;
6725 6726
6726 u16 tx_cons; 6727 u16 tx_cons;
6727 u16 hw_tx_cons; 6728 u16 hw_tx_cons;
@@ -6741,13 +6742,13 @@ struct bnx2_rx_ring_info {
6741 u16 rx_pg_prod; 6742 u16 rx_pg_prod;
6742 u16 rx_pg_cons; 6743 u16 rx_pg_cons;
6743 6744
6744 struct sw_bd *rx_buf_ring; 6745 struct bnx2_sw_bd *rx_buf_ring;
6745 struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; 6746 struct bnx2_rx_bd *rx_desc_ring[BNX2_MAX_RX_RINGS];
6746 struct sw_pg *rx_pg_ring; 6747 struct bnx2_sw_pg *rx_pg_ring;
6747 struct rx_bd *rx_pg_desc_ring[MAX_RX_PG_RINGS]; 6748 struct bnx2_rx_bd *rx_pg_desc_ring[BNX2_MAX_RX_PG_RINGS];
6748 6749
6749 dma_addr_t rx_desc_mapping[MAX_RX_RINGS]; 6750 dma_addr_t rx_desc_mapping[BNX2_MAX_RX_RINGS];
6750 dma_addr_t rx_pg_desc_mapping[MAX_RX_PG_RINGS]; 6751 dma_addr_t rx_pg_desc_mapping[BNX2_MAX_RX_PG_RINGS];
6751}; 6752};
6752 6753
6753struct bnx2_napi { 6754struct bnx2_napi {
@@ -6853,33 +6854,31 @@ struct bnx2 {
6853 6854
6854 u32 chip_id; 6855 u32 chip_id;
6855 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */ 6856 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6856#define CHIP_NUM(bp) (((bp)->chip_id) & 0xffff0000) 6857#define BNX2_CHIP(bp) (((bp)->chip_id) & 0xffff0000)
6857#define CHIP_NUM_5706 0x57060000 6858#define BNX2_CHIP_5706 0x57060000
6858#define CHIP_NUM_5708 0x57080000 6859#define BNX2_CHIP_5708 0x57080000
6859#define CHIP_NUM_5709 0x57090000 6860#define BNX2_CHIP_5709 0x57090000
6860 6861
6861#define CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000) 6862#define BNX2_CHIP_REV(bp) (((bp)->chip_id) & 0x0000f000)
6862#define CHIP_REV_Ax 0x00000000 6863#define BNX2_CHIP_REV_Ax 0x00000000
6863#define CHIP_REV_Bx 0x00001000 6864#define BNX2_CHIP_REV_Bx 0x00001000
6864#define CHIP_REV_Cx 0x00002000 6865#define BNX2_CHIP_REV_Cx 0x00002000
6865 6866
6866#define CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0) 6867#define BNX2_CHIP_METAL(bp) (((bp)->chip_id) & 0x00000ff0)
6867#define CHIP_BONDING(bp) (((bp)->chip_id) & 0x0000000f) 6868#define BNX2_CHIP_BOND(bp) (((bp)->chip_id) & 0x0000000f)
6868 6869
6869#define CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0) 6870#define BNX2_CHIP_ID(bp) (((bp)->chip_id) & 0xfffffff0)
6870#define CHIP_ID_5706_A0 0x57060000 6871#define BNX2_CHIP_ID_5706_A0 0x57060000
6871#define CHIP_ID_5706_A1 0x57060010 6872#define BNX2_CHIP_ID_5706_A1 0x57060010
6872#define CHIP_ID_5706_A2 0x57060020 6873#define BNX2_CHIP_ID_5706_A2 0x57060020
6873#define CHIP_ID_5708_A0 0x57080000 6874#define BNX2_CHIP_ID_5708_A0 0x57080000
6874#define CHIP_ID_5708_B0 0x57081000 6875#define BNX2_CHIP_ID_5708_B0 0x57081000
6875#define CHIP_ID_5708_B1 0x57081010 6876#define BNX2_CHIP_ID_5708_B1 0x57081010
6876#define CHIP_ID_5709_A0 0x57090000 6877#define BNX2_CHIP_ID_5709_A0 0x57090000
6877#define CHIP_ID_5709_A1 0x57090010 6878#define BNX2_CHIP_ID_5709_A1 0x57090010
6878
6879#define CHIP_BOND_ID(bp) (((bp)->chip_id) & 0xf)
6880 6879
6881/* A serdes chip will have the first bit of the bond id set. */ 6880/* A serdes chip will have the first bit of the bond id set. */
6882#define CHIP_BOND_ID_SERDES_BIT 0x01 6881#define BNX2_CHIP_BOND_SERDES_BIT 0x01
6883 6882
6884 u32 phy_addr; 6883 u32 phy_addr;
6885 u32 phy_id; 6884 u32 phy_id;
@@ -6985,19 +6984,20 @@ struct bnx2 {
6985#ifdef BCM_CNIC 6984#ifdef BCM_CNIC
6986 struct mutex cnic_lock; 6985 struct mutex cnic_lock;
6987 struct cnic_eth_dev cnic_eth_dev; 6986 struct cnic_eth_dev cnic_eth_dev;
6987 struct cnic_eth_dev *(*cnic_probe)(struct net_device *);
6988#endif 6988#endif
6989 6989
6990 const struct firmware *mips_firmware; 6990 const struct firmware *mips_firmware;
6991 const struct firmware *rv2p_firmware; 6991 const struct firmware *rv2p_firmware;
6992}; 6992};
6993 6993
6994#define REG_RD(bp, offset) \ 6994#define BNX2_RD(bp, offset) \
6995 readl(bp->regview + offset) 6995 readl(bp->regview + offset)
6996 6996
6997#define REG_WR(bp, offset, val) \ 6997#define BNX2_WR(bp, offset, val) \
6998 writel(val, bp->regview + offset) 6998 writel(val, bp->regview + offset)
6999 6999
7000#define REG_WR16(bp, offset, val) \ 7000#define BNX2_WR16(bp, offset, val) \
7001 writew(val, bp->regview + offset) 7001 writew(val, bp->regview + offset)
7002 7002
7003struct cpu_reg { 7003struct cpu_reg {
@@ -7052,7 +7052,7 @@ struct bnx2_rv2p_fw_file {
7052 7052
7053#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0 7053#define RV2P_P1_FIXUP_PAGE_SIZE_IDX 0
7054#define RV2P_BD_PAGE_SIZE_MSK 0xffff 7054#define RV2P_BD_PAGE_SIZE_MSK 0xffff
7055#define RV2P_BD_PAGE_SIZE ((BCM_PAGE_SIZE / 16) - 1) 7055#define RV2P_BD_PAGE_SIZE ((BNX2_PAGE_SIZE / 16) - 1)
7056 7056
7057#define RV2P_PROC1 0 7057#define RV2P_PROC1 0
7058#define RV2P_PROC2 1 7058#define RV2P_PROC2 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 72897c47b8c8..e8d4db10c8f3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -34,25 +34,16 @@
34 34
35#include "bnx2x_hsi.h" 35#include "bnx2x_hsi.h"
36 36
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1
39#include "../cnic_if.h" 37#include "../cnic_if.h"
40#endif
41 38
42#ifdef BCM_CNIC 39
43#define BNX2X_MIN_MSIX_VEC_CNT 3 40#define BNX2X_MIN_MSIX_VEC_CNT(bp) ((bp)->min_msix_vec_cnt)
44#define BNX2X_MSIX_VEC_FP_START 2
45#else
46#define BNX2X_MIN_MSIX_VEC_CNT 2
47#define BNX2X_MSIX_VEC_FP_START 1
48#endif
49 41
50#include <linux/mdio.h> 42#include <linux/mdio.h>
51 43
52#include "bnx2x_reg.h" 44#include "bnx2x_reg.h"
53#include "bnx2x_fw_defs.h" 45#include "bnx2x_fw_defs.h"
54#include "bnx2x_mfw_req.h" 46#include "bnx2x_mfw_req.h"
55#include "bnx2x_hsi.h"
56#include "bnx2x_link.h" 47#include "bnx2x_link.h"
57#include "bnx2x_sp.h" 48#include "bnx2x_sp.h"
58#include "bnx2x_dcb.h" 49#include "bnx2x_dcb.h"
@@ -256,15 +247,10 @@ enum {
256 /* FCoE L2 */ 247 /* FCoE L2 */
257#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1) 248#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
258 249
259/** Additional rings budgeting */ 250#define CNIC_SUPPORT(bp) ((bp)->cnic_support)
260#ifdef BCM_CNIC 251#define CNIC_ENABLED(bp) ((bp)->cnic_enabled)
261#define CNIC_PRESENT 1 252#define CNIC_LOADED(bp) ((bp)->cnic_loaded)
262#define FCOE_PRESENT 1 253#define FCOE_INIT(bp) ((bp)->fcoe_init)
263#else
264#define CNIC_PRESENT 0
265#define FCOE_PRESENT 0
266#endif /* BCM_CNIC */
267#define NON_ETH_CONTEXT_USE (FCOE_PRESENT)
268 254
269#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 255#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
270 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 256 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -297,9 +283,7 @@ enum {
297 OOO_TXQ_IDX_OFFSET, 283 OOO_TXQ_IDX_OFFSET,
298}; 284};
299#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) 285#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
300#ifdef BCM_CNIC
301#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET) 286#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
302#endif
303 287
304/* fast path */ 288/* fast path */
305/* 289/*
@@ -505,7 +489,7 @@ struct bnx2x_fastpath {
505 u32 ustorm_rx_prods_offset; 489 u32 ustorm_rx_prods_offset;
506 490
507 u32 rx_buf_size; 491 u32 rx_buf_size;
508 492 u32 rx_frag_size; /* 0 if kmalloced(), or rx_buf_size + NET_SKB_PAD */
509 dma_addr_t status_blk_mapping; 493 dma_addr_t status_blk_mapping;
510 494
511 enum bnx2x_tpa_mode_t mode; 495 enum bnx2x_tpa_mode_t mode;
@@ -585,15 +569,9 @@ struct bnx2x_fastpath {
585 ->var) 569 ->var)
586 570
587 571
588#define IS_ETH_FP(fp) (fp->index < \ 572#define IS_ETH_FP(fp) ((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
589 BNX2X_NUM_ETH_QUEUES(fp->bp)) 573#define IS_FCOE_FP(fp) ((fp)->index == FCOE_IDX((fp)->bp))
590#ifdef BCM_CNIC 574#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
591#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
592#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
593#else
594#define IS_FCOE_FP(fp) false
595#define IS_FCOE_IDX(idx) false
596#endif
597 575
598 576
599/* MC hsi */ 577/* MC hsi */
@@ -886,6 +864,18 @@ struct bnx2x_common {
886 (CHIP_REV(bp) == CHIP_REV_Bx)) 864 (CHIP_REV(bp) == CHIP_REV_Bx))
887#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \ 865#define CHIP_IS_E3A0(bp) (CHIP_IS_E3(bp) && \
888 (CHIP_REV(bp) == CHIP_REV_Ax)) 866 (CHIP_REV(bp) == CHIP_REV_Ax))
867/* This define is used in two main places:
868 * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
869 * to nic-only mode or to offload mode. Offload mode is configured if either the
870 * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
871 * registered for this port (which means that the user wants storage services).
872 * 2. During cnic-related load, to know if offload mode is already configured in
873 * the HW or needs to be configrued.
874 * Since the transition from nic-mode to offload-mode in HW causes traffic
875 * coruption, nic-mode is configured only in ports on which storage services
876 * where never requested.
877 */
878#define CONFIGURE_NIC_MODE(bp) (!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
889 879
890 int flash_size; 880 int flash_size;
891#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ 881#define BNX2X_NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -925,6 +915,7 @@ struct bnx2x_common {
925#define BNX2X_IGU_STAS_MSG_VF_CNT 64 915#define BNX2X_IGU_STAS_MSG_VF_CNT 64
926#define BNX2X_IGU_STAS_MSG_PF_CNT 4 916#define BNX2X_IGU_STAS_MSG_PF_CNT 4
927 917
918#define MAX_IGU_ATTN_ACK_TO 100
928/* end of common */ 919/* end of common */
929 920
930/* port */ 921/* port */
@@ -946,7 +937,6 @@ struct bnx2x_port {
946 937
947 /* used to synchronize phy accesses */ 938 /* used to synchronize phy accesses */
948 struct mutex phy_mutex; 939 struct mutex phy_mutex;
949 int need_hw_lock;
950 940
951 u32 port_stx; 941 u32 port_stx;
952 942
@@ -1003,18 +993,15 @@ union cdu_context {
1003#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ 993#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
1004#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) 994#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
1005 995
1006#ifdef BCM_CNIC
1007#define CNIC_ISCSI_CID_MAX 256 996#define CNIC_ISCSI_CID_MAX 256
1008#define CNIC_FCOE_CID_MAX 2048 997#define CNIC_FCOE_CID_MAX 2048
1009#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX) 998#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
1010#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) 999#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
1011#endif
1012 1000
1013#define QM_ILT_PAGE_SZ_HW 0 1001#define QM_ILT_PAGE_SZ_HW 0
1014#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */ 1002#define QM_ILT_PAGE_SZ (4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
1015#define QM_CID_ROUND 1024 1003#define QM_CID_ROUND 1024
1016 1004
1017#ifdef BCM_CNIC
1018/* TM (timers) host DB constants */ 1005/* TM (timers) host DB constants */
1019#define TM_ILT_PAGE_SZ_HW 0 1006#define TM_ILT_PAGE_SZ_HW 0
1020#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */ 1007#define TM_ILT_PAGE_SZ (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
@@ -1032,8 +1019,6 @@ union cdu_context {
1032#define SRC_T2_SZ SRC_ILT_SZ 1019#define SRC_T2_SZ SRC_ILT_SZ
1033#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ) 1020#define SRC_ILT_LINES DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
1034 1021
1035#endif
1036
1037#define MAX_DMAE_C 8 1022#define MAX_DMAE_C 8
1038 1023
1039/* DMA memory not used in fastpath */ 1024/* DMA memory not used in fastpath */
@@ -1201,6 +1186,7 @@ struct bnx2x_prev_path_list {
1201 u8 slot; 1186 u8 slot;
1202 u8 path; 1187 u8 path;
1203 struct list_head list; 1188 struct list_head list;
1189 u8 undi;
1204}; 1190};
1205 1191
1206struct bnx2x_sp_objs { 1192struct bnx2x_sp_objs {
@@ -1227,7 +1213,6 @@ struct bnx2x {
1227 struct bnx2x_sp_objs *sp_objs; 1213 struct bnx2x_sp_objs *sp_objs;
1228 struct bnx2x_fp_stats *fp_stats; 1214 struct bnx2x_fp_stats *fp_stats;
1229 struct bnx2x_fp_txdata *bnx2x_txq; 1215 struct bnx2x_fp_txdata *bnx2x_txq;
1230 int bnx2x_txq_size;
1231 void __iomem *regview; 1216 void __iomem *regview;
1232 void __iomem *doorbells; 1217 void __iomem *doorbells;
1233 u16 db_size; 1218 u16 db_size;
@@ -1350,6 +1335,16 @@ struct bnx2x {
1350#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1335#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
1351#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) 1336#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
1352 1337
1338 u8 cnic_support;
1339 bool cnic_enabled;
1340 bool cnic_loaded;
1341 struct cnic_eth_dev *(*cnic_probe)(struct net_device *);
1342
1343 /* Flag that indicates that we can start looking for FCoE L2 queue
1344 * completions in the default status block.
1345 */
1346 bool fcoe_init;
1347
1353 int pm_cap; 1348 int pm_cap;
1354 int mrrs; 1349 int mrrs;
1355 1350
@@ -1420,6 +1415,8 @@ struct bnx2x {
1420#define BNX2X_MAX_COS 3 1415#define BNX2X_MAX_COS 3
1421#define BNX2X_MAX_TX_COS 2 1416#define BNX2X_MAX_TX_COS 2
1422 int num_queues; 1417 int num_queues;
1418 uint num_ethernet_queues;
1419 uint num_cnic_queues;
1423 int num_napi_queues; 1420 int num_napi_queues;
1424 int disable_tpa; 1421 int disable_tpa;
1425 1422
@@ -1433,6 +1430,7 @@ struct bnx2x {
1433 u8 igu_dsb_id; 1430 u8 igu_dsb_id;
1434 u8 igu_base_sb; 1431 u8 igu_base_sb;
1435 u8 igu_sb_cnt; 1432 u8 igu_sb_cnt;
1433 u8 min_msix_vec_cnt;
1436 1434
1437 dma_addr_t def_status_blk_mapping; 1435 dma_addr_t def_status_blk_mapping;
1438 1436
@@ -1478,26 +1476,23 @@ struct bnx2x {
1478 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes 1476 * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
1479 * to CNIC. 1477 * to CNIC.
1480 */ 1478 */
1481#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_PRESENT) 1479#define BNX2X_MAX_RSS_COUNT(bp) ((bp)->igu_sb_cnt - CNIC_SUPPORT(bp))
1482 1480
1483/* 1481/*
1484 * Maximum CID count that might be required by the bnx2x: 1482 * Maximum CID count that might be required by the bnx2x:
1485 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI 1483 * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
1486 */ 1484 */
1487#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ 1485#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
1488 + NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1486 + 2 * CNIC_SUPPORT(bp))
1489#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ 1487#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
1490 + NON_ETH_CONTEXT_USE + CNIC_PRESENT) 1488 + 2 * CNIC_SUPPORT(bp))
1491#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ 1489#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
1492 ILT_PAGE_CIDS)) 1490 ILT_PAGE_CIDS))
1493 1491
1494 int qm_cid_count; 1492 int qm_cid_count;
1495 1493
1496 int dropless_fc; 1494 bool dropless_fc;
1497 1495
1498#ifdef BCM_CNIC
1499 u32 cnic_flags;
1500#define BNX2X_CNIC_FLAG_MAC_SET 1
1501 void *t2; 1496 void *t2;
1502 dma_addr_t t2_mapping; 1497 dma_addr_t t2_mapping;
1503 struct cnic_ops __rcu *cnic_ops; 1498 struct cnic_ops __rcu *cnic_ops;
@@ -1518,7 +1513,6 @@ struct bnx2x {
1518 1513
1519 /* Start index of the "special" (CNIC related) L2 cleints */ 1514 /* Start index of the "special" (CNIC related) L2 cleints */
1520 u8 cnic_base_cl_id; 1515 u8 cnic_base_cl_id;
1521#endif
1522 1516
1523 int dmae_ready; 1517 int dmae_ready;
1524 /* used to synchronize dmae accesses */ 1518 /* used to synchronize dmae accesses */
@@ -1647,9 +1641,9 @@ struct bnx2x {
1647/* Tx queues may be less or equal to Rx queues */ 1641/* Tx queues may be less or equal to Rx queues */
1648extern int num_queues; 1642extern int num_queues;
1649#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1643#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1650#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) 1644#define BNX2X_NUM_ETH_QUEUES(bp) ((bp)->num_ethernet_queues)
1651#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \ 1645#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
1652 NON_ETH_CONTEXT_USE) 1646 (bp)->num_cnic_queues)
1653#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) 1647#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
1654 1648
1655#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1649#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1689,6 +1683,13 @@ struct bnx2x_func_init_params {
1689 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ 1683 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1690}; 1684};
1691 1685
1686#define for_each_cnic_queue(bp, var) \
1687 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1688 (var)++) \
1689 if (skip_queue(bp, var)) \
1690 continue; \
1691 else
1692
1692#define for_each_eth_queue(bp, var) \ 1693#define for_each_eth_queue(bp, var) \
1693 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++) 1694 for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
1694 1695
@@ -1702,6 +1703,22 @@ struct bnx2x_func_init_params {
1702 else 1703 else
1703 1704
1704/* Skip forwarding FP */ 1705/* Skip forwarding FP */
1706#define for_each_valid_rx_queue(bp, var) \
1707 for ((var) = 0; \
1708 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
1709 BNX2X_NUM_ETH_QUEUES(bp)); \
1710 (var)++) \
1711 if (skip_rx_queue(bp, var)) \
1712 continue; \
1713 else
1714
1715#define for_each_rx_queue_cnic(bp, var) \
1716 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1717 (var)++) \
1718 if (skip_rx_queue(bp, var)) \
1719 continue; \
1720 else
1721
1705#define for_each_rx_queue(bp, var) \ 1722#define for_each_rx_queue(bp, var) \
1706 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1723 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1707 if (skip_rx_queue(bp, var)) \ 1724 if (skip_rx_queue(bp, var)) \
@@ -1709,6 +1726,22 @@ struct bnx2x_func_init_params {
1709 else 1726 else
1710 1727
1711/* Skip OOO FP */ 1728/* Skip OOO FP */
1729#define for_each_valid_tx_queue(bp, var) \
1730 for ((var) = 0; \
1731 (var) < (CNIC_LOADED(bp) ? BNX2X_NUM_QUEUES(bp) : \
1732 BNX2X_NUM_ETH_QUEUES(bp)); \
1733 (var)++) \
1734 if (skip_tx_queue(bp, var)) \
1735 continue; \
1736 else
1737
1738#define for_each_tx_queue_cnic(bp, var) \
1739 for ((var) = BNX2X_NUM_ETH_QUEUES(bp); (var) < BNX2X_NUM_QUEUES(bp); \
1740 (var)++) \
1741 if (skip_tx_queue(bp, var)) \
1742 continue; \
1743 else
1744
1712#define for_each_tx_queue(bp, var) \ 1745#define for_each_tx_queue(bp, var) \
1713 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ 1746 for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
1714 if (skip_tx_queue(bp, var)) \ 1747 if (skip_tx_queue(bp, var)) \
@@ -2179,7 +2212,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2179#define BNX2X_MF_SD_PROTOCOL(bp) \ 2212#define BNX2X_MF_SD_PROTOCOL(bp) \
2180 ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK) 2213 ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
2181 2214
2182#ifdef BCM_CNIC
2183#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \ 2215#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
2184 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI) 2216 (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
2185 2217
@@ -2196,9 +2228,12 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2196#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ 2228#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
2197 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ 2229 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
2198 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 2230 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
2199#else
2200#define IS_MF_FCOE_AFEX(bp) false
2201#endif
2202 2231
2232enum {
2233 SWITCH_UPDATE,
2234 AFEX_UPDATE,
2235};
2236
2237#define NUM_MACS 8
2203 2238
2204#endif /* bnx2x.h */ 2239#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4833b6a9031c..a2998bea5d4b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -552,6 +552,23 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
552 return 0; 552 return 0;
553} 553}
554 554
555static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
556{
557 if (fp->rx_frag_size)
558 put_page(virt_to_head_page(data));
559 else
560 kfree(data);
561}
562
563static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
564{
565 if (fp->rx_frag_size)
566 return netdev_alloc_frag(fp->rx_frag_size);
567
568 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
569}
570
571
555static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 572static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
556 struct bnx2x_agg_info *tpa_info, 573 struct bnx2x_agg_info *tpa_info,
557 u16 pages, 574 u16 pages,
@@ -574,15 +591,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
574 goto drop; 591 goto drop;
575 592
576 /* Try to allocate the new data */ 593 /* Try to allocate the new data */
577 new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 594 new_data = bnx2x_frag_alloc(fp);
578
579 /* Unmap skb in the pool anyway, as we are going to change 595 /* Unmap skb in the pool anyway, as we are going to change
580 pool entry status to BNX2X_TPA_STOP even if new skb allocation 596 pool entry status to BNX2X_TPA_STOP even if new skb allocation
581 fails. */ 597 fails. */
582 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 598 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
583 fp->rx_buf_size, DMA_FROM_DEVICE); 599 fp->rx_buf_size, DMA_FROM_DEVICE);
584 if (likely(new_data)) 600 if (likely(new_data))
585 skb = build_skb(data, 0); 601 skb = build_skb(data, fp->rx_frag_size);
586 602
587 if (likely(skb)) { 603 if (likely(skb)) {
588#ifdef BNX2X_STOP_ON_ERROR 604#ifdef BNX2X_STOP_ON_ERROR
@@ -619,7 +635,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
619 635
620 return; 636 return;
621 } 637 }
622 kfree(new_data); 638 bnx2x_frag_free(fp, new_data);
623drop: 639drop:
624 /* drop the packet and keep the buffer in the bin */ 640 /* drop the packet and keep the buffer in the bin */
625 DP(NETIF_MSG_RX_STATUS, 641 DP(NETIF_MSG_RX_STATUS,
@@ -635,7 +651,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
635 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; 651 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
636 dma_addr_t mapping; 652 dma_addr_t mapping;
637 653
638 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); 654 data = bnx2x_frag_alloc(fp);
639 if (unlikely(data == NULL)) 655 if (unlikely(data == NULL))
640 return -ENOMEM; 656 return -ENOMEM;
641 657
@@ -643,7 +659,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
643 fp->rx_buf_size, 659 fp->rx_buf_size,
644 DMA_FROM_DEVICE); 660 DMA_FROM_DEVICE);
645 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 661 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
646 kfree(data); 662 bnx2x_frag_free(fp, data);
647 BNX2X_ERR("Can't map rx data\n"); 663 BNX2X_ERR("Can't map rx data\n");
648 return -ENOMEM; 664 return -ENOMEM;
649 } 665 }
@@ -845,9 +861,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
845 dma_unmap_addr(rx_buf, mapping), 861 dma_unmap_addr(rx_buf, mapping),
846 fp->rx_buf_size, 862 fp->rx_buf_size,
847 DMA_FROM_DEVICE); 863 DMA_FROM_DEVICE);
848 skb = build_skb(data, 0); 864 skb = build_skb(data, fp->rx_frag_size);
849 if (unlikely(!skb)) { 865 if (unlikely(!skb)) {
850 kfree(data); 866 bnx2x_frag_free(fp, data);
851 bnx2x_fp_qstats(bp, fp)-> 867 bnx2x_fp_qstats(bp, fp)->
852 rx_skb_alloc_failed++; 868 rx_skb_alloc_failed++;
853 goto next_rx; 869 goto next_rx;
@@ -948,14 +964,12 @@ void bnx2x_acquire_phy_lock(struct bnx2x *bp)
948{ 964{
949 mutex_lock(&bp->port.phy_mutex); 965 mutex_lock(&bp->port.phy_mutex);
950 966
951 if (bp->port.need_hw_lock) 967 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
952 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
953} 968}
954 969
955void bnx2x_release_phy_lock(struct bnx2x *bp) 970void bnx2x_release_phy_lock(struct bnx2x *bp)
956{ 971{
957 if (bp->port.need_hw_lock) 972 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
959 973
960 mutex_unlock(&bp->port.phy_mutex); 974 mutex_unlock(&bp->port.phy_mutex);
961} 975}
@@ -1147,11 +1161,30 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1147 dma_unmap_single(&bp->pdev->dev, 1161 dma_unmap_single(&bp->pdev->dev,
1148 dma_unmap_addr(first_buf, mapping), 1162 dma_unmap_addr(first_buf, mapping),
1149 fp->rx_buf_size, DMA_FROM_DEVICE); 1163 fp->rx_buf_size, DMA_FROM_DEVICE);
1150 kfree(data); 1164 bnx2x_frag_free(fp, data);
1151 first_buf->data = NULL; 1165 first_buf->data = NULL;
1152 } 1166 }
1153} 1167}
1154 1168
1169void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1170{
1171 int j;
1172
1173 for_each_rx_queue_cnic(bp, j) {
1174 struct bnx2x_fastpath *fp = &bp->fp[j];
1175
1176 fp->rx_bd_cons = 0;
1177
1178 /* Activate BD ring */
1179 /* Warning!
1180 * this will generate an interrupt (to the TSTORM)
1181 * must only be done after chip is initialized
1182 */
1183 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1184 fp->rx_sge_prod);
1185 }
1186}
1187
1155void bnx2x_init_rx_rings(struct bnx2x *bp) 1188void bnx2x_init_rx_rings(struct bnx2x *bp)
1156{ 1189{
1157 int func = BP_FUNC(bp); 1190 int func = BP_FUNC(bp);
@@ -1159,7 +1192,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1159 int i, j; 1192 int i, j;
1160 1193
1161 /* Allocate TPA resources */ 1194 /* Allocate TPA resources */
1162 for_each_rx_queue(bp, j) { 1195 for_each_eth_queue(bp, j) {
1163 struct bnx2x_fastpath *fp = &bp->fp[j]; 1196 struct bnx2x_fastpath *fp = &bp->fp[j];
1164 1197
1165 DP(NETIF_MSG_IFUP, 1198 DP(NETIF_MSG_IFUP,
@@ -1173,8 +1206,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1173 struct sw_rx_bd *first_buf = 1206 struct sw_rx_bd *first_buf =
1174 &tpa_info->first_buf; 1207 &tpa_info->first_buf;
1175 1208
1176 first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, 1209 first_buf->data = bnx2x_frag_alloc(fp);
1177 GFP_ATOMIC);
1178 if (!first_buf->data) { 1210 if (!first_buf->data) {
1179 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", 1211 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1180 j); 1212 j);
@@ -1217,7 +1249,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1217 } 1249 }
1218 } 1250 }
1219 1251
1220 for_each_rx_queue(bp, j) { 1252 for_each_eth_queue(bp, j) {
1221 struct bnx2x_fastpath *fp = &bp->fp[j]; 1253 struct bnx2x_fastpath *fp = &bp->fp[j];
1222 1254
1223 fp->rx_bd_cons = 0; 1255 fp->rx_bd_cons = 0;
@@ -1244,29 +1276,45 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
1244 } 1276 }
1245} 1277}
1246 1278
1247static void bnx2x_free_tx_skbs(struct bnx2x *bp) 1279static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1248{ 1280{
1249 int i;
1250 u8 cos; 1281 u8 cos;
1282 struct bnx2x *bp = fp->bp;
1251 1283
1252 for_each_tx_queue(bp, i) { 1284 for_each_cos_in_tx_queue(fp, cos) {
1253 struct bnx2x_fastpath *fp = &bp->fp[i]; 1285 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1254 for_each_cos_in_tx_queue(fp, cos) { 1286 unsigned pkts_compl = 0, bytes_compl = 0;
1255 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1256 unsigned pkts_compl = 0, bytes_compl = 0;
1257 1287
1258 u16 sw_prod = txdata->tx_pkt_prod; 1288 u16 sw_prod = txdata->tx_pkt_prod;
1259 u16 sw_cons = txdata->tx_pkt_cons; 1289 u16 sw_cons = txdata->tx_pkt_cons;
1260 1290
1261 while (sw_cons != sw_prod) { 1291 while (sw_cons != sw_prod) {
1262 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons), 1292 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1263 &pkts_compl, &bytes_compl); 1293 &pkts_compl, &bytes_compl);
1264 sw_cons++; 1294 sw_cons++;
1265 }
1266 netdev_tx_reset_queue(
1267 netdev_get_tx_queue(bp->dev,
1268 txdata->txq_index));
1269 } 1295 }
1296
1297 netdev_tx_reset_queue(
1298 netdev_get_tx_queue(bp->dev,
1299 txdata->txq_index));
1300 }
1301}
1302
1303static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1304{
1305 int i;
1306
1307 for_each_tx_queue_cnic(bp, i) {
1308 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1309 }
1310}
1311
1312static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1313{
1314 int i;
1315
1316 for_each_eth_queue(bp, i) {
1317 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1270 } 1318 }
1271} 1319}
1272 1320
@@ -1290,7 +1338,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1290 fp->rx_buf_size, DMA_FROM_DEVICE); 1338 fp->rx_buf_size, DMA_FROM_DEVICE);
1291 1339
1292 rx_buf->data = NULL; 1340 rx_buf->data = NULL;
1293 kfree(data); 1341 bnx2x_frag_free(fp, data);
1342 }
1343}
1344
1345static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1346{
1347 int j;
1348
1349 for_each_rx_queue_cnic(bp, j) {
1350 bnx2x_free_rx_bds(&bp->fp[j]);
1294 } 1351 }
1295} 1352}
1296 1353
@@ -1298,7 +1355,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1298{ 1355{
1299 int j; 1356 int j;
1300 1357
1301 for_each_rx_queue(bp, j) { 1358 for_each_eth_queue(bp, j) {
1302 struct bnx2x_fastpath *fp = &bp->fp[j]; 1359 struct bnx2x_fastpath *fp = &bp->fp[j];
1303 1360
1304 bnx2x_free_rx_bds(fp); 1361 bnx2x_free_rx_bds(fp);
@@ -1308,6 +1365,12 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1308 } 1365 }
1309} 1366}
1310 1367
1368void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1369{
1370 bnx2x_free_tx_skbs_cnic(bp);
1371 bnx2x_free_rx_skbs_cnic(bp);
1372}
1373
1311void bnx2x_free_skbs(struct bnx2x *bp) 1374void bnx2x_free_skbs(struct bnx2x *bp)
1312{ 1375{
1313 bnx2x_free_tx_skbs(bp); 1376 bnx2x_free_tx_skbs(bp);
@@ -1347,11 +1410,12 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1347 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 1410 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1348 bp->msix_table[offset].vector); 1411 bp->msix_table[offset].vector);
1349 offset++; 1412 offset++;
1350#ifdef BCM_CNIC 1413
1351 if (nvecs == offset) 1414 if (CNIC_SUPPORT(bp)) {
1352 return; 1415 if (nvecs == offset)
1353 offset++; 1416 return;
1354#endif 1417 offset++;
1418 }
1355 1419
1356 for_each_eth_queue(bp, i) { 1420 for_each_eth_queue(bp, i) {
1357 if (nvecs == offset) 1421 if (nvecs == offset)
@@ -1368,7 +1432,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
1368 if (bp->flags & USING_MSIX_FLAG && 1432 if (bp->flags & USING_MSIX_FLAG &&
1369 !(bp->flags & USING_SINGLE_MSIX_FLAG)) 1433 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1370 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1434 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1371 CNIC_PRESENT + 1); 1435 CNIC_SUPPORT(bp) + 1);
1372 else 1436 else
1373 free_irq(bp->dev->irq, bp->dev); 1437 free_irq(bp->dev->irq, bp->dev);
1374} 1438}
@@ -1382,12 +1446,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1382 bp->msix_table[0].entry); 1446 bp->msix_table[0].entry);
1383 msix_vec++; 1447 msix_vec++;
1384 1448
1385#ifdef BCM_CNIC 1449 /* Cnic requires an msix vector for itself */
1386 bp->msix_table[msix_vec].entry = msix_vec; 1450 if (CNIC_SUPPORT(bp)) {
1387 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n", 1451 bp->msix_table[msix_vec].entry = msix_vec;
1388 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); 1452 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1389 msix_vec++; 1453 msix_vec, bp->msix_table[msix_vec].entry);
1390#endif 1454 msix_vec++;
1455 }
1456
1391 /* We need separate vectors for ETH queues only (not FCoE) */ 1457 /* We need separate vectors for ETH queues only (not FCoE) */
1392 for_each_eth_queue(bp, i) { 1458 for_each_eth_queue(bp, i) {
1393 bp->msix_table[msix_vec].entry = msix_vec; 1459 bp->msix_table[msix_vec].entry = msix_vec;
@@ -1396,7 +1462,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1396 msix_vec++; 1462 msix_vec++;
1397 } 1463 }
1398 1464
1399 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1; 1465 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
1400 1466
1401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); 1467 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1402 1468
@@ -1404,7 +1470,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1404 * reconfigure number of tx/rx queues according to available 1470 * reconfigure number of tx/rx queues according to available
1405 * MSI-X vectors 1471 * MSI-X vectors
1406 */ 1472 */
1407 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) { 1473 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
1408 /* how less vectors we will have? */ 1474 /* how less vectors we will have? */
1409 int diff = req_cnt - rc; 1475 int diff = req_cnt - rc;
1410 1476
@@ -1419,7 +1485,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1419 /* 1485 /*
1420 * decrease number of queues by number of unallocated entries 1486 * decrease number of queues by number of unallocated entries
1421 */ 1487 */
1422 bp->num_queues -= diff; 1488 bp->num_ethernet_queues -= diff;
1489 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1423 1490
1424 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1491 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1425 bp->num_queues); 1492 bp->num_queues);
@@ -1435,6 +1502,9 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1435 BNX2X_DEV_INFO("Using single MSI-X vector\n"); 1502 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1436 bp->flags |= USING_SINGLE_MSIX_FLAG; 1503 bp->flags |= USING_SINGLE_MSIX_FLAG;
1437 1504
1505 BNX2X_DEV_INFO("set number of queues to 1\n");
1506 bp->num_ethernet_queues = 1;
1507 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1438 } else if (rc < 0) { 1508 } else if (rc < 0) {
1439 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1509 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1440 goto no_msix; 1510 goto no_msix;
@@ -1464,9 +1534,9 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1464 return -EBUSY; 1534 return -EBUSY;
1465 } 1535 }
1466 1536
1467#ifdef BCM_CNIC 1537 if (CNIC_SUPPORT(bp))
1468 offset++; 1538 offset++;
1469#endif 1539
1470 for_each_eth_queue(bp, i) { 1540 for_each_eth_queue(bp, i) {
1471 struct bnx2x_fastpath *fp = &bp->fp[i]; 1541 struct bnx2x_fastpath *fp = &bp->fp[i];
1472 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1542 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
@@ -1485,7 +1555,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1485 } 1555 }
1486 1556
1487 i = BNX2X_NUM_ETH_QUEUES(bp); 1557 i = BNX2X_NUM_ETH_QUEUES(bp);
1488 offset = 1 + CNIC_PRESENT; 1558 offset = 1 + CNIC_SUPPORT(bp);
1489 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 1559 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1490 bp->msix_table[0].vector, 1560 bp->msix_table[0].vector,
1491 0, bp->msix_table[offset].vector, 1561 0, bp->msix_table[offset].vector,
@@ -1556,19 +1626,35 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
1556 return 0; 1626 return 0;
1557} 1627}
1558 1628
1629static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1630{
1631 int i;
1632
1633 for_each_rx_queue_cnic(bp, i)
1634 napi_enable(&bnx2x_fp(bp, i, napi));
1635}
1636
1559static void bnx2x_napi_enable(struct bnx2x *bp) 1637static void bnx2x_napi_enable(struct bnx2x *bp)
1560{ 1638{
1561 int i; 1639 int i;
1562 1640
1563 for_each_rx_queue(bp, i) 1641 for_each_eth_queue(bp, i)
1564 napi_enable(&bnx2x_fp(bp, i, napi)); 1642 napi_enable(&bnx2x_fp(bp, i, napi));
1565} 1643}
1566 1644
1645static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1646{
1647 int i;
1648
1649 for_each_rx_queue_cnic(bp, i)
1650 napi_disable(&bnx2x_fp(bp, i, napi));
1651}
1652
1567static void bnx2x_napi_disable(struct bnx2x *bp) 1653static void bnx2x_napi_disable(struct bnx2x *bp)
1568{ 1654{
1569 int i; 1655 int i;
1570 1656
1571 for_each_rx_queue(bp, i) 1657 for_each_eth_queue(bp, i)
1572 napi_disable(&bnx2x_fp(bp, i, napi)); 1658 napi_disable(&bnx2x_fp(bp, i, napi));
1573} 1659}
1574 1660
@@ -1576,6 +1662,8 @@ void bnx2x_netif_start(struct bnx2x *bp)
1576{ 1662{
1577 if (netif_running(bp->dev)) { 1663 if (netif_running(bp->dev)) {
1578 bnx2x_napi_enable(bp); 1664 bnx2x_napi_enable(bp);
1665 if (CNIC_LOADED(bp))
1666 bnx2x_napi_enable_cnic(bp);
1579 bnx2x_int_enable(bp); 1667 bnx2x_int_enable(bp);
1580 if (bp->state == BNX2X_STATE_OPEN) 1668 if (bp->state == BNX2X_STATE_OPEN)
1581 netif_tx_wake_all_queues(bp->dev); 1669 netif_tx_wake_all_queues(bp->dev);
@@ -1586,14 +1674,15 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1586{ 1674{
1587 bnx2x_int_disable_sync(bp, disable_hw); 1675 bnx2x_int_disable_sync(bp, disable_hw);
1588 bnx2x_napi_disable(bp); 1676 bnx2x_napi_disable(bp);
1677 if (CNIC_LOADED(bp))
1678 bnx2x_napi_disable_cnic(bp);
1589} 1679}
1590 1680
1591u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) 1681u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1592{ 1682{
1593 struct bnx2x *bp = netdev_priv(dev); 1683 struct bnx2x *bp = netdev_priv(dev);
1594 1684
1595#ifdef BCM_CNIC 1685 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1596 if (!NO_FCOE(bp)) {
1597 struct ethhdr *hdr = (struct ethhdr *)skb->data; 1686 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1598 u16 ether_type = ntohs(hdr->h_proto); 1687 u16 ether_type = ntohs(hdr->h_proto);
1599 1688
@@ -1609,7 +1698,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1609 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP)) 1698 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1610 return bnx2x_fcoe_tx(bp, txq_index); 1699 return bnx2x_fcoe_tx(bp, txq_index);
1611 } 1700 }
1612#endif 1701
1613 /* select a non-FCoE queue */ 1702 /* select a non-FCoE queue */
1614 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1703 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1615} 1704}
@@ -1618,15 +1707,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1618void bnx2x_set_num_queues(struct bnx2x *bp) 1707void bnx2x_set_num_queues(struct bnx2x *bp)
1619{ 1708{
1620 /* RSS queues */ 1709 /* RSS queues */
1621 bp->num_queues = bnx2x_calc_num_queues(bp); 1710 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1622 1711
1623#ifdef BCM_CNIC
1624 /* override in STORAGE SD modes */ 1712 /* override in STORAGE SD modes */
1625 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) 1713 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1626 bp->num_queues = 1; 1714 bp->num_ethernet_queues = 1;
1627#endif 1715
1628 /* Add special queues */ 1716 /* Add special queues */
1629 bp->num_queues += NON_ETH_CONTEXT_USE; 1717 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1718 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1630 1719
1631 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 1720 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1632} 1721}
@@ -1653,20 +1742,18 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1653 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1742 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1654 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1743 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1655 */ 1744 */
1656static int bnx2x_set_real_num_queues(struct bnx2x *bp) 1745static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1657{ 1746{
1658 int rc, tx, rx; 1747 int rc, tx, rx;
1659 1748
1660 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; 1749 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1661 rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE; 1750 rx = BNX2X_NUM_ETH_QUEUES(bp);
1662 1751
1663/* account for fcoe queue */ 1752/* account for fcoe queue */
1664#ifdef BCM_CNIC 1753 if (include_cnic && !NO_FCOE(bp)) {
1665 if (!NO_FCOE(bp)) { 1754 rx++;
1666 rx += FCOE_PRESENT; 1755 tx++;
1667 tx += FCOE_PRESENT;
1668 } 1756 }
1669#endif
1670 1757
1671 rc = netif_set_real_num_tx_queues(bp->dev, tx); 1758 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1672 if (rc) { 1759 if (rc) {
@@ -1710,6 +1797,10 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1710 mtu + 1797 mtu +
1711 BNX2X_FW_RX_ALIGN_END; 1798 BNX2X_FW_RX_ALIGN_END;
1712 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ 1799 /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
1800 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
1801 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
1802 else
1803 fp->rx_frag_size = 0;
1713 } 1804 }
1714} 1805}
1715 1806
@@ -1859,14 +1950,26 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
1859 (bp)->state = BNX2X_STATE_ERROR; \ 1950 (bp)->state = BNX2X_STATE_ERROR; \
1860 goto label; \ 1951 goto label; \
1861 } while (0) 1952 } while (0)
1862#else 1953
1954#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1955 do { \
1956 bp->cnic_loaded = false; \
1957 goto label; \
1958 } while (0)
1959#else /*BNX2X_STOP_ON_ERROR*/
1863#define LOAD_ERROR_EXIT(bp, label) \ 1960#define LOAD_ERROR_EXIT(bp, label) \
1864 do { \ 1961 do { \
1865 (bp)->state = BNX2X_STATE_ERROR; \ 1962 (bp)->state = BNX2X_STATE_ERROR; \
1866 (bp)->panic = 1; \ 1963 (bp)->panic = 1; \
1867 return -EBUSY; \ 1964 return -EBUSY; \
1868 } while (0) 1965 } while (0)
1869#endif 1966#define LOAD_ERROR_EXIT_CNIC(bp, label) \
1967 do { \
1968 bp->cnic_loaded = false; \
1969 (bp)->panic = 1; \
1970 return -EBUSY; \
1971 } while (0)
1972#endif /*BNX2X_STOP_ON_ERROR*/
1870 1973
1871bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) 1974bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1872{ 1975{
@@ -1959,10 +2062,8 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1959 fp->max_cos = 1; 2062 fp->max_cos = 1;
1960 2063
1961 /* Init txdata pointers */ 2064 /* Init txdata pointers */
1962#ifdef BCM_CNIC
1963 if (IS_FCOE_FP(fp)) 2065 if (IS_FCOE_FP(fp))
1964 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; 2066 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
1965#endif
1966 if (IS_ETH_FP(fp)) 2067 if (IS_ETH_FP(fp))
1967 for_each_cos_in_tx_queue(fp, cos) 2068 for_each_cos_in_tx_queue(fp, cos)
1968 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * 2069 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
@@ -1980,11 +2081,95 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1980 else if (bp->flags & GRO_ENABLE_FLAG) 2081 else if (bp->flags & GRO_ENABLE_FLAG)
1981 fp->mode = TPA_MODE_GRO; 2082 fp->mode = TPA_MODE_GRO;
1982 2083
1983#ifdef BCM_CNIC
1984 /* We don't want TPA on an FCoE L2 ring */ 2084 /* We don't want TPA on an FCoE L2 ring */
1985 if (IS_FCOE_FP(fp)) 2085 if (IS_FCOE_FP(fp))
1986 fp->disable_tpa = 1; 2086 fp->disable_tpa = 1;
1987#endif 2087}
2088
2089int bnx2x_load_cnic(struct bnx2x *bp)
2090{
2091 int i, rc, port = BP_PORT(bp);
2092
2093 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2094
2095 mutex_init(&bp->cnic_mutex);
2096
2097 rc = bnx2x_alloc_mem_cnic(bp);
2098 if (rc) {
2099 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2100 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2101 }
2102
2103 rc = bnx2x_alloc_fp_mem_cnic(bp);
2104 if (rc) {
2105 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2106 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2107 }
2108
2109 /* Update the number of queues with the cnic queues */
2110 rc = bnx2x_set_real_num_queues(bp, 1);
2111 if (rc) {
2112 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2113 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2114 }
2115
2116 /* Add all CNIC NAPI objects */
2117 bnx2x_add_all_napi_cnic(bp);
2118 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2119 bnx2x_napi_enable_cnic(bp);
2120
2121 rc = bnx2x_init_hw_func_cnic(bp);
2122 if (rc)
2123 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2124
2125 bnx2x_nic_init_cnic(bp);
2126
2127 /* Enable Timer scan */
2128 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2129
2130 for_each_cnic_queue(bp, i) {
2131 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2132 if (rc) {
2133 BNX2X_ERR("Queue setup failed\n");
2134 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2135 }
2136 }
2137
2138 /* Initialize Rx filter. */
2139 netif_addr_lock_bh(bp->dev);
2140 bnx2x_set_rx_mode(bp->dev);
2141 netif_addr_unlock_bh(bp->dev);
2142
2143 /* re-read iscsi info */
2144 bnx2x_get_iscsi_info(bp);
2145 bnx2x_setup_cnic_irq_info(bp);
2146 bnx2x_setup_cnic_info(bp);
2147 bp->cnic_loaded = true;
2148 if (bp->state == BNX2X_STATE_OPEN)
2149 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2150
2151
2152 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2153
2154 return 0;
2155
2156#ifndef BNX2X_STOP_ON_ERROR
2157load_error_cnic2:
2158 /* Disable Timer scan */
2159 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2160
2161load_error_cnic1:
2162 bnx2x_napi_disable_cnic(bp);
2163 /* Update the number of queues without the cnic queues */
2164 rc = bnx2x_set_real_num_queues(bp, 0);
2165 if (rc)
2166 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2167load_error_cnic0:
2168 BNX2X_ERR("CNIC-related load failed\n");
2169 bnx2x_free_fp_mem_cnic(bp);
2170 bnx2x_free_mem_cnic(bp);
2171 return rc;
2172#endif /* ! BNX2X_STOP_ON_ERROR */
1988} 2173}
1989 2174
1990 2175
@@ -1995,6 +2180,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1995 u32 load_code; 2180 u32 load_code;
1996 int i, rc; 2181 int i, rc;
1997 2182
2183 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2184 DP(NETIF_MSG_IFUP,
2185 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2186
1998#ifdef BNX2X_STOP_ON_ERROR 2187#ifdef BNX2X_STOP_ON_ERROR
1999 if (unlikely(bp->panic)) { 2188 if (unlikely(bp->panic)) {
2000 BNX2X_ERR("Can't load NIC when there is panic\n"); 2189 BNX2X_ERR("Can't load NIC when there is panic\n");
@@ -2022,9 +2211,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2022 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); 2211 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2023 for_each_queue(bp, i) 2212 for_each_queue(bp, i)
2024 bnx2x_bz_fp(bp, i); 2213 bnx2x_bz_fp(bp, i);
2025 memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size * 2214 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2026 sizeof(struct bnx2x_fp_txdata)); 2215 bp->num_cnic_queues) *
2216 sizeof(struct bnx2x_fp_txdata));
2027 2217
2218 bp->fcoe_init = false;
2028 2219
2029 /* Set the receive queues buffer size */ 2220 /* Set the receive queues buffer size */
2030 bnx2x_set_rx_buf_size(bp); 2221 bnx2x_set_rx_buf_size(bp);
@@ -2034,9 +2225,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2034 2225
2035 /* As long as bnx2x_alloc_mem() may possibly update 2226 /* As long as bnx2x_alloc_mem() may possibly update
2036 * bp->num_queues, bnx2x_set_real_num_queues() should always 2227 * bp->num_queues, bnx2x_set_real_num_queues() should always
2037 * come after it. 2228 * come after it. At this stage cnic queues are not counted.
2038 */ 2229 */
2039 rc = bnx2x_set_real_num_queues(bp); 2230 rc = bnx2x_set_real_num_queues(bp, 0);
2040 if (rc) { 2231 if (rc) {
2041 BNX2X_ERR("Unable to set real_num_queues\n"); 2232 BNX2X_ERR("Unable to set real_num_queues\n");
2042 LOAD_ERROR_EXIT(bp, load_error0); 2233 LOAD_ERROR_EXIT(bp, load_error0);
@@ -2050,6 +2241,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2050 2241
2051 /* Add all NAPI objects */ 2242 /* Add all NAPI objects */
2052 bnx2x_add_all_napi(bp); 2243 bnx2x_add_all_napi(bp);
2244 DP(NETIF_MSG_IFUP, "napi added\n");
2053 bnx2x_napi_enable(bp); 2245 bnx2x_napi_enable(bp);
2054 2246
2055 /* set pf load just before approaching the MCP */ 2247 /* set pf load just before approaching the MCP */
@@ -2073,7 +2265,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2073 DRV_PULSE_SEQ_MASK); 2265 DRV_PULSE_SEQ_MASK);
2074 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq); 2266 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2075 2267
2076 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); 2268 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
2269 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
2077 if (!load_code) { 2270 if (!load_code) {
2078 BNX2X_ERR("MCP response failure, aborting\n"); 2271 BNX2X_ERR("MCP response failure, aborting\n");
2079 rc = -EBUSY; 2272 rc = -EBUSY;
@@ -2191,23 +2384,18 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2191 LOAD_ERROR_EXIT(bp, load_error3); 2384 LOAD_ERROR_EXIT(bp, load_error3);
2192 } 2385 }
2193 2386
2194#ifdef BCM_CNIC 2387 for_each_nondefault_eth_queue(bp, i) {
2195 /* Enable Timer scan */
2196 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2197#endif
2198
2199 for_each_nondefault_queue(bp, i) {
2200 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0); 2388 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2201 if (rc) { 2389 if (rc) {
2202 BNX2X_ERR("Queue setup failed\n"); 2390 BNX2X_ERR("Queue setup failed\n");
2203 LOAD_ERROR_EXIT(bp, load_error4); 2391 LOAD_ERROR_EXIT(bp, load_error3);
2204 } 2392 }
2205 } 2393 }
2206 2394
2207 rc = bnx2x_init_rss_pf(bp); 2395 rc = bnx2x_init_rss_pf(bp);
2208 if (rc) { 2396 if (rc) {
2209 BNX2X_ERR("PF RSS init failed\n"); 2397 BNX2X_ERR("PF RSS init failed\n");
2210 LOAD_ERROR_EXIT(bp, load_error4); 2398 LOAD_ERROR_EXIT(bp, load_error3);
2211 } 2399 }
2212 2400
2213 /* Now when Clients are configured we are ready to work */ 2401 /* Now when Clients are configured we are ready to work */
@@ -2217,7 +2405,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2217 rc = bnx2x_set_eth_mac(bp, true); 2405 rc = bnx2x_set_eth_mac(bp, true);
2218 if (rc) { 2406 if (rc) {
2219 BNX2X_ERR("Setting Ethernet MAC failed\n"); 2407 BNX2X_ERR("Setting Ethernet MAC failed\n");
2220 LOAD_ERROR_EXIT(bp, load_error4); 2408 LOAD_ERROR_EXIT(bp, load_error3);
2221 } 2409 }
2222 2410
2223 if (bp->pending_max) { 2411 if (bp->pending_max) {
@@ -2227,6 +2415,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2227 2415
2228 if (bp->port.pmf) 2416 if (bp->port.pmf)
2229 bnx2x_initial_phy_init(bp, load_mode); 2417 bnx2x_initial_phy_init(bp, load_mode);
2418 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2230 2419
2231 /* Start fast path */ 2420 /* Start fast path */
2232 2421
@@ -2257,21 +2446,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2257 } 2446 }
2258 2447
2259 if (bp->port.pmf) 2448 if (bp->port.pmf)
2260 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); 2449 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2261 else 2450 else
2262 bnx2x__link_status_update(bp); 2451 bnx2x__link_status_update(bp);
2263 2452
2264 /* start the timer */ 2453 /* start the timer */
2265 mod_timer(&bp->timer, jiffies + bp->current_interval); 2454 mod_timer(&bp->timer, jiffies + bp->current_interval);
2266 2455
2267#ifdef BCM_CNIC 2456 if (CNIC_ENABLED(bp))
2268 /* re-read iscsi info */ 2457 bnx2x_load_cnic(bp);
2269 bnx2x_get_iscsi_info(bp);
2270 bnx2x_setup_cnic_irq_info(bp);
2271 bnx2x_setup_cnic_info(bp);
2272 if (bp->state == BNX2X_STATE_OPEN)
2273 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2274#endif
2275 2458
2276 /* mark driver is loaded in shmem2 */ 2459 /* mark driver is loaded in shmem2 */
2277 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2460 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
@@ -2293,14 +2476,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2293 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) 2476 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2294 bnx2x_dcbx_init(bp, false); 2477 bnx2x_dcbx_init(bp, false);
2295 2478
2479 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2480
2296 return 0; 2481 return 0;
2297 2482
2298#ifndef BNX2X_STOP_ON_ERROR 2483#ifndef BNX2X_STOP_ON_ERROR
2299load_error4:
2300#ifdef BCM_CNIC
2301 /* Disable Timer scan */
2302 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2303#endif
2304load_error3: 2484load_error3:
2305 bnx2x_int_disable_sync(bp, 1); 2485 bnx2x_int_disable_sync(bp, 1);
2306 2486
@@ -2338,6 +2518,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2338 int i; 2518 int i;
2339 bool global = false; 2519 bool global = false;
2340 2520
2521 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2522
2341 /* mark driver is unloaded in shmem2 */ 2523 /* mark driver is unloaded in shmem2 */
2342 if (SHMEM2_HAS(bp, drv_capabilities_flag)) { 2524 if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
2343 u32 val; 2525 u32 val;
@@ -2373,14 +2555,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2373 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 2555 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2374 smp_mb(); 2556 smp_mb();
2375 2557
2558 if (CNIC_LOADED(bp))
2559 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2560
2376 /* Stop Tx */ 2561 /* Stop Tx */
2377 bnx2x_tx_disable(bp); 2562 bnx2x_tx_disable(bp);
2378 netdev_reset_tc(bp->dev); 2563 netdev_reset_tc(bp->dev);
2379 2564
2380#ifdef BCM_CNIC
2381 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2382#endif
2383
2384 bp->rx_mode = BNX2X_RX_MODE_NONE; 2565 bp->rx_mode = BNX2X_RX_MODE_NONE;
2385 2566
2386 del_timer_sync(&bp->timer); 2567 del_timer_sync(&bp->timer);
@@ -2414,7 +2595,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2414 bnx2x_netif_stop(bp, 1); 2595 bnx2x_netif_stop(bp, 1);
2415 /* Delete all NAPI objects */ 2596 /* Delete all NAPI objects */
2416 bnx2x_del_all_napi(bp); 2597 bnx2x_del_all_napi(bp);
2417 2598 if (CNIC_LOADED(bp))
2599 bnx2x_del_all_napi_cnic(bp);
2418 /* Release IRQs */ 2600 /* Release IRQs */
2419 bnx2x_free_irq(bp); 2601 bnx2x_free_irq(bp);
2420 2602
@@ -2435,12 +2617,19 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2435 2617
2436 /* Free SKBs, SGEs, TPA pool and driver internals */ 2618 /* Free SKBs, SGEs, TPA pool and driver internals */
2437 bnx2x_free_skbs(bp); 2619 bnx2x_free_skbs(bp);
2620 if (CNIC_LOADED(bp))
2621 bnx2x_free_skbs_cnic(bp);
2438 for_each_rx_queue(bp, i) 2622 for_each_rx_queue(bp, i)
2439 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 2623 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2440 2624
2625 if (CNIC_LOADED(bp)) {
2626 bnx2x_free_fp_mem_cnic(bp);
2627 bnx2x_free_mem_cnic(bp);
2628 }
2441 bnx2x_free_mem(bp); 2629 bnx2x_free_mem(bp);
2442 2630
2443 bp->state = BNX2X_STATE_CLOSED; 2631 bp->state = BNX2X_STATE_CLOSED;
2632 bp->cnic_loaded = false;
2444 2633
2445 /* Check if there are pending parity attentions. If there are - set 2634 /* Check if there are pending parity attentions. If there are - set
2446 * RECOVERY_IN_PROGRESS. 2635 * RECOVERY_IN_PROGRESS.
@@ -2460,6 +2649,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2460 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp))) 2649 if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
2461 bnx2x_disable_close_the_gate(bp); 2650 bnx2x_disable_close_the_gate(bp);
2462 2651
2652 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
2653
2463 return 0; 2654 return 0;
2464} 2655}
2465 2656
@@ -2550,7 +2741,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2550 2741
2551 /* Fall out from the NAPI loop if needed */ 2742 /* Fall out from the NAPI loop if needed */
2552 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 2743 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2553#ifdef BCM_CNIC 2744
2554 /* No need to update SB for FCoE L2 ring as long as 2745 /* No need to update SB for FCoE L2 ring as long as
2555 * it's connected to the default SB and the SB 2746 * it's connected to the default SB and the SB
2556 * has been updated when NAPI was scheduled. 2747 * has been updated when NAPI was scheduled.
@@ -2559,8 +2750,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
2559 napi_complete(napi); 2750 napi_complete(napi);
2560 break; 2751 break;
2561 } 2752 }
2562#endif
2563
2564 bnx2x_update_fpsb_idx(fp); 2753 bnx2x_update_fpsb_idx(fp);
2565 /* bnx2x_has_rx_work() reads the status block, 2754 /* bnx2x_has_rx_work() reads the status block,
2566 * thus we need to ensure that status block indices 2755 * thus we need to ensure that status block indices
@@ -2940,7 +3129,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2940 txq_index = skb_get_queue_mapping(skb); 3129 txq_index = skb_get_queue_mapping(skb);
2941 txq = netdev_get_tx_queue(dev, txq_index); 3130 txq = netdev_get_tx_queue(dev, txq_index);
2942 3131
2943 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); 3132 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
2944 3133
2945 txdata = &bp->bnx2x_txq[txq_index]; 3134 txdata = &bp->bnx2x_txq[txq_index];
2946 3135
@@ -2958,11 +3147,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2958 BDS_PER_TX_PKT + 3147 BDS_PER_TX_PKT +
2959 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) { 3148 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
2960 /* Handle special storage cases separately */ 3149 /* Handle special storage cases separately */
2961 if (txdata->tx_ring_size != 0) { 3150 if (txdata->tx_ring_size == 0) {
2962 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 3151 struct bnx2x_eth_q_stats *q_stats =
3152 bnx2x_fp_qstats(bp, txdata->parent_fp);
3153 q_stats->driver_filtered_tx_pkt++;
3154 dev_kfree_skb(skb);
3155 return NETDEV_TX_OK;
3156 }
2963 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; 3157 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
2964 netif_tx_stop_queue(txq); 3158 netif_tx_stop_queue(txq);
2965 } 3159 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2966 3160
2967 return NETDEV_TX_BUSY; 3161 return NETDEV_TX_BUSY;
2968 } 3162 }
@@ -3339,13 +3533,11 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3339 return -EINVAL; 3533 return -EINVAL;
3340 } 3534 }
3341 3535
3342#ifdef BCM_CNIC
3343 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) && 3536 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3344 !is_zero_ether_addr(addr->sa_data)) { 3537 !is_zero_ether_addr(addr->sa_data)) {
3345 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); 3538 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3346 return -EINVAL; 3539 return -EINVAL;
3347 } 3540 }
3348#endif
3349 3541
3350 if (netif_running(dev)) { 3542 if (netif_running(dev)) {
3351 rc = bnx2x_set_eth_mac(bp, false); 3543 rc = bnx2x_set_eth_mac(bp, false);
@@ -3369,13 +3561,11 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3369 u8 cos; 3561 u8 cos;
3370 3562
3371 /* Common */ 3563 /* Common */
3372#ifdef BCM_CNIC 3564
3373 if (IS_FCOE_IDX(fp_index)) { 3565 if (IS_FCOE_IDX(fp_index)) {
3374 memset(sb, 0, sizeof(union host_hc_status_block)); 3566 memset(sb, 0, sizeof(union host_hc_status_block));
3375 fp->status_blk_mapping = 0; 3567 fp->status_blk_mapping = 0;
3376
3377 } else { 3568 } else {
3378#endif
3379 /* status blocks */ 3569 /* status blocks */
3380 if (!CHIP_IS_E1x(bp)) 3570 if (!CHIP_IS_E1x(bp))
3381 BNX2X_PCI_FREE(sb->e2_sb, 3571 BNX2X_PCI_FREE(sb->e2_sb,
@@ -3387,9 +3577,8 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3387 bnx2x_fp(bp, fp_index, 3577 bnx2x_fp(bp, fp_index,
3388 status_blk_mapping), 3578 status_blk_mapping),
3389 sizeof(struct host_hc_status_block_e1x)); 3579 sizeof(struct host_hc_status_block_e1x));
3390#ifdef BCM_CNIC
3391 } 3580 }
3392#endif 3581
3393 /* Rx */ 3582 /* Rx */
3394 if (!skip_rx_queue(bp, fp_index)) { 3583 if (!skip_rx_queue(bp, fp_index)) {
3395 bnx2x_free_rx_bds(fp); 3584 bnx2x_free_rx_bds(fp);
@@ -3431,10 +3620,17 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3431 /* end of fastpath */ 3620 /* end of fastpath */
3432} 3621}
3433 3622
3623void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
3624{
3625 int i;
3626 for_each_cnic_queue(bp, i)
3627 bnx2x_free_fp_mem_at(bp, i);
3628}
3629
3434void bnx2x_free_fp_mem(struct bnx2x *bp) 3630void bnx2x_free_fp_mem(struct bnx2x *bp)
3435{ 3631{
3436 int i; 3632 int i;
3437 for_each_queue(bp, i) 3633 for_each_eth_queue(bp, i)
3438 bnx2x_free_fp_mem_at(bp, i); 3634 bnx2x_free_fp_mem_at(bp, i);
3439} 3635}
3440 3636
@@ -3519,14 +3715,11 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3519 u8 cos; 3715 u8 cos;
3520 int rx_ring_size = 0; 3716 int rx_ring_size = 0;
3521 3717
3522#ifdef BCM_CNIC
3523 if (!bp->rx_ring_size && 3718 if (!bp->rx_ring_size &&
3524 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 3719 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3525 rx_ring_size = MIN_RX_SIZE_NONTPA; 3720 rx_ring_size = MIN_RX_SIZE_NONTPA;
3526 bp->rx_ring_size = rx_ring_size; 3721 bp->rx_ring_size = rx_ring_size;
3527 } else 3722 } else if (!bp->rx_ring_size) {
3528#endif
3529 if (!bp->rx_ring_size) {
3530 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp); 3723 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
3531 3724
3532 if (CHIP_IS_E3(bp)) { 3725 if (CHIP_IS_E3(bp)) {
@@ -3550,9 +3743,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3550 3743
3551 /* Common */ 3744 /* Common */
3552 sb = &bnx2x_fp(bp, index, status_blk); 3745 sb = &bnx2x_fp(bp, index, status_blk);
3553#ifdef BCM_CNIC 3746
3554 if (!IS_FCOE_IDX(index)) { 3747 if (!IS_FCOE_IDX(index)) {
3555#endif
3556 /* status blocks */ 3748 /* status blocks */
3557 if (!CHIP_IS_E1x(bp)) 3749 if (!CHIP_IS_E1x(bp))
3558 BNX2X_PCI_ALLOC(sb->e2_sb, 3750 BNX2X_PCI_ALLOC(sb->e2_sb,
@@ -3562,9 +3754,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3562 BNX2X_PCI_ALLOC(sb->e1x_sb, 3754 BNX2X_PCI_ALLOC(sb->e1x_sb,
3563 &bnx2x_fp(bp, index, status_blk_mapping), 3755 &bnx2x_fp(bp, index, status_blk_mapping),
3564 sizeof(struct host_hc_status_block_e1x)); 3756 sizeof(struct host_hc_status_block_e1x));
3565#ifdef BCM_CNIC
3566 } 3757 }
3567#endif
3568 3758
3569 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 3759 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3570 * set shortcuts for it. 3760 * set shortcuts for it.
@@ -3641,31 +3831,31 @@ alloc_mem_err:
3641 return 0; 3831 return 0;
3642} 3832}
3643 3833
3834int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
3835{
3836 if (!NO_FCOE(bp))
3837 /* FCoE */
3838 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3839 /* we will fail load process instead of mark
3840 * NO_FCOE_FLAG
3841 */
3842 return -ENOMEM;
3843
3844 return 0;
3845}
3846
3644int bnx2x_alloc_fp_mem(struct bnx2x *bp) 3847int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3645{ 3848{
3646 int i; 3849 int i;
3647 3850
3648 /** 3851 /* 1. Allocate FP for leading - fatal if error
3649 * 1. Allocate FP for leading - fatal if error 3852 * 2. Allocate RSS - fix number of queues if error
3650 * 2. {CNIC} Allocate FCoE FP - fatal if error
3651 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3652 * 4. Allocate RSS - fix number of queues if error
3653 */ 3853 */
3654 3854
3655 /* leading */ 3855 /* leading */
3656 if (bnx2x_alloc_fp_mem_at(bp, 0)) 3856 if (bnx2x_alloc_fp_mem_at(bp, 0))
3657 return -ENOMEM; 3857 return -ENOMEM;
3658 3858
3659#ifdef BCM_CNIC
3660 if (!NO_FCOE(bp))
3661 /* FCoE */
3662 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
3663 /* we will fail load process instead of mark
3664 * NO_FCOE_FLAG
3665 */
3666 return -ENOMEM;
3667#endif
3668
3669 /* RSS */ 3859 /* RSS */
3670 for_each_nondefault_eth_queue(bp, i) 3860 for_each_nondefault_eth_queue(bp, i)
3671 if (bnx2x_alloc_fp_mem_at(bp, i)) 3861 if (bnx2x_alloc_fp_mem_at(bp, i))
@@ -3676,17 +3866,17 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3676 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; 3866 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3677 3867
3678 WARN_ON(delta < 0); 3868 WARN_ON(delta < 0);
3679#ifdef BCM_CNIC 3869 if (CNIC_SUPPORT(bp))
3680 /** 3870 /* move non eth FPs next to last eth FP
3681 * move non eth FPs next to last eth FP 3871 * must be done in that order
3682 * must be done in that order 3872 * FCOE_IDX < FWD_IDX < OOO_IDX
3683 * FCOE_IDX < FWD_IDX < OOO_IDX 3873 */
3684 */
3685 3874
3686 /* move FCoE fp even NO_FCOE_FLAG is on */ 3875 /* move FCoE fp even NO_FCOE_FLAG is on */
3687 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); 3876 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
3688#endif 3877 bp->num_ethernet_queues -= delta;
3689 bp->num_queues -= delta; 3878 bp->num_queues = bp->num_ethernet_queues +
3879 bp->num_cnic_queues;
3690 BNX2X_ERR("Adjusted num of queues from %d to %d\n", 3880 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3691 bp->num_queues + delta, bp->num_queues); 3881 bp->num_queues + delta, bp->num_queues);
3692 } 3882 }
@@ -3705,13 +3895,13 @@ void bnx2x_free_mem_bp(struct bnx2x *bp)
3705 kfree(bp->ilt); 3895 kfree(bp->ilt);
3706} 3896}
3707 3897
3708int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) 3898int bnx2x_alloc_mem_bp(struct bnx2x *bp)
3709{ 3899{
3710 struct bnx2x_fastpath *fp; 3900 struct bnx2x_fastpath *fp;
3711 struct msix_entry *tbl; 3901 struct msix_entry *tbl;
3712 struct bnx2x_ilt *ilt; 3902 struct bnx2x_ilt *ilt;
3713 int msix_table_size = 0; 3903 int msix_table_size = 0;
3714 int fp_array_size; 3904 int fp_array_size, txq_array_size;
3715 int i; 3905 int i;
3716 3906
3717 /* 3907 /*
@@ -3721,7 +3911,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3721 msix_table_size = bp->igu_sb_cnt + 1; 3911 msix_table_size = bp->igu_sb_cnt + 1;
3722 3912
3723 /* fp array: RSS plus CNIC related L2 queues */ 3913 /* fp array: RSS plus CNIC related L2 queues */
3724 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE; 3914 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
3725 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size); 3915 BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
3726 3916
3727 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL); 3917 fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
@@ -3750,12 +3940,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3750 goto alloc_err; 3940 goto alloc_err;
3751 3941
3752 /* Allocate memory for the transmission queues array */ 3942 /* Allocate memory for the transmission queues array */
3753 bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS; 3943 txq_array_size =
3754#ifdef BCM_CNIC 3944 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
3755 bp->bnx2x_txq_size++; 3945 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
3756#endif 3946
3757 bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size, 3947 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
3758 sizeof(struct bnx2x_fp_txdata), GFP_KERNEL); 3948 GFP_KERNEL);
3759 if (!bp->bnx2x_txq) 3949 if (!bp->bnx2x_txq)
3760 goto alloc_err; 3950 goto alloc_err;
3761 3951
@@ -3838,7 +4028,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3838 return LINK_CONFIG_IDX(sel_phy_idx); 4028 return LINK_CONFIG_IDX(sel_phy_idx);
3839} 4029}
3840 4030
3841#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 4031#ifdef NETDEV_FCOE_WWNN
3842int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 4032int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3843{ 4033{
3844 struct bnx2x *bp = netdev_priv(dev); 4034 struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 9c5ea6c5b4c7..0991534f61da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -144,7 +144,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
144 * @bp: driver handle 144 * @bp: driver handle
145 * @load_mode: current mode 145 * @load_mode: current mode
146 */ 146 */
147u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); 147int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
148 148
149/** 149/**
150 * bnx2x_link_set - configure hw according to link parameters structure. 150 * bnx2x_link_set - configure hw according to link parameters structure.
@@ -238,7 +238,6 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
238 * @dev_instance: private instance 238 * @dev_instance: private instance
239 */ 239 */
240irqreturn_t bnx2x_interrupt(int irq, void *dev_instance); 240irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
241#ifdef BCM_CNIC
242 241
243/** 242/**
244 * bnx2x_cnic_notify - send command to cnic driver 243 * bnx2x_cnic_notify - send command to cnic driver
@@ -262,8 +261,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
262 */ 261 */
263void bnx2x_setup_cnic_info(struct bnx2x *bp); 262void bnx2x_setup_cnic_info(struct bnx2x *bp);
264 263
265#endif
266
267/** 264/**
268 * bnx2x_int_enable - enable HW interrupts. 265 * bnx2x_int_enable - enable HW interrupts.
269 * 266 *
@@ -283,7 +280,7 @@ void bnx2x_int_enable(struct bnx2x *bp);
283void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw); 280void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
284 281
285/** 282/**
286 * bnx2x_nic_init - init driver internals. 283 * bnx2x_nic_init_cnic - init driver internals for cnic.
287 * 284 *
288 * @bp: driver handle 285 * @bp: driver handle
289 * @load_code: COMMON, PORT or FUNCTION 286 * @load_code: COMMON, PORT or FUNCTION
@@ -293,9 +290,26 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
293 * - status blocks 290 * - status blocks
294 * - etc. 291 * - etc.
295 */ 292 */
296void bnx2x_nic_init(struct bnx2x *bp, u32 load_code); 293void bnx2x_nic_init_cnic(struct bnx2x *bp);
297 294
298/** 295/**
296 * bnx2x_nic_init - init driver internals.
297 *
298 * @bp: driver handle
299 *
300 * Initializes:
301 * - rings
302 * - status blocks
303 * - etc.
304 */
305void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
306/**
307 * bnx2x_alloc_mem_cnic - allocate driver's memory for cnic.
308 *
309 * @bp: driver handle
310 */
311int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
312/**
299 * bnx2x_alloc_mem - allocate driver's memory. 313 * bnx2x_alloc_mem - allocate driver's memory.
300 * 314 *
301 * @bp: driver handle 315 * @bp: driver handle
@@ -303,6 +317,12 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
303int bnx2x_alloc_mem(struct bnx2x *bp); 317int bnx2x_alloc_mem(struct bnx2x *bp);
304 318
305/** 319/**
320 * bnx2x_free_mem_cnic - release driver's memory for cnic.
321 *
322 * @bp: driver handle
323 */
324void bnx2x_free_mem_cnic(struct bnx2x *bp);
325/**
306 * bnx2x_free_mem - release driver's memory. 326 * bnx2x_free_mem - release driver's memory.
307 * 327 *
308 * @bp: driver handle 328 * @bp: driver handle
@@ -407,6 +427,7 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
407void bnx2x_set_reset_in_progress(struct bnx2x *bp); 427void bnx2x_set_reset_in_progress(struct bnx2x *bp);
408void bnx2x_set_reset_global(struct bnx2x *bp); 428void bnx2x_set_reset_global(struct bnx2x *bp);
409void bnx2x_disable_close_the_gate(struct bnx2x *bp); 429void bnx2x_disable_close_the_gate(struct bnx2x *bp);
430int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
410 431
411/** 432/**
412 * bnx2x_sp_event - handle ramrods completion. 433 * bnx2x_sp_event - handle ramrods completion.
@@ -424,6 +445,14 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
424void bnx2x_ilt_set_info(struct bnx2x *bp); 445void bnx2x_ilt_set_info(struct bnx2x *bp);
425 446
426/** 447/**
448 * bnx2x_ilt_set_cnic_info - prepare ILT configurations for SRC
449 * and TM.
450 *
451 * @bp: driver handle
452 */
453void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
454
455/**
427 * bnx2x_dcbx_init - initialize dcbx protocol. 456 * bnx2x_dcbx_init - initialize dcbx protocol.
428 * 457 *
429 * @bp: driver handle 458 * @bp: driver handle
@@ -491,12 +520,17 @@ int bnx2x_resume(struct pci_dev *pdev);
491/* Release IRQ vectors */ 520/* Release IRQ vectors */
492void bnx2x_free_irq(struct bnx2x *bp); 521void bnx2x_free_irq(struct bnx2x *bp);
493 522
523void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
494void bnx2x_free_fp_mem(struct bnx2x *bp); 524void bnx2x_free_fp_mem(struct bnx2x *bp);
525int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
495int bnx2x_alloc_fp_mem(struct bnx2x *bp); 526int bnx2x_alloc_fp_mem(struct bnx2x *bp);
496void bnx2x_init_rx_rings(struct bnx2x *bp); 527void bnx2x_init_rx_rings(struct bnx2x *bp);
528void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
529void bnx2x_free_skbs_cnic(struct bnx2x *bp);
497void bnx2x_free_skbs(struct bnx2x *bp); 530void bnx2x_free_skbs(struct bnx2x *bp);
498void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw); 531void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
499void bnx2x_netif_start(struct bnx2x *bp); 532void bnx2x_netif_start(struct bnx2x *bp);
533int bnx2x_load_cnic(struct bnx2x *bp);
500 534
501/** 535/**
502 * bnx2x_enable_msix - set msix configuration. 536 * bnx2x_enable_msix - set msix configuration.
@@ -529,7 +563,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget);
529 * 563 *
530 * @bp: driver handle 564 * @bp: driver handle
531 */ 565 */
532int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp); 566int bnx2x_alloc_mem_bp(struct bnx2x *bp);
533 567
534/** 568/**
535 * bnx2x_free_mem_bp - release memories outsize main driver structure 569 * bnx2x_free_mem_bp - release memories outsize main driver structure
@@ -547,7 +581,7 @@ void bnx2x_free_mem_bp(struct bnx2x *bp);
547 */ 581 */
548int bnx2x_change_mtu(struct net_device *dev, int new_mtu); 582int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
549 583
550#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 584#ifdef NETDEV_FCOE_WWNN
551/** 585/**
552 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port 586 * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
553 * 587 *
@@ -793,23 +827,39 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
793 sge->addr_lo = 0; 827 sge->addr_lo = 0;
794} 828}
795 829
796static inline void bnx2x_add_all_napi(struct bnx2x *bp) 830static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
797{ 831{
798 int i; 832 int i;
799 833
800 bp->num_napi_queues = bp->num_queues; 834 /* Add NAPI objects */
835 for_each_rx_queue_cnic(bp, i)
836 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
837 bnx2x_poll, BNX2X_NAPI_WEIGHT);
838}
839
840static inline void bnx2x_add_all_napi(struct bnx2x *bp)
841{
842 int i;
801 843
802 /* Add NAPI objects */ 844 /* Add NAPI objects */
803 for_each_rx_queue(bp, i) 845 for_each_eth_queue(bp, i)
804 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 846 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
805 bnx2x_poll, BNX2X_NAPI_WEIGHT); 847 bnx2x_poll, BNX2X_NAPI_WEIGHT);
806} 848}
807 849
850static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
851{
852 int i;
853
854 for_each_rx_queue_cnic(bp, i)
855 netif_napi_del(&bnx2x_fp(bp, i, napi));
856}
857
808static inline void bnx2x_del_all_napi(struct bnx2x *bp) 858static inline void bnx2x_del_all_napi(struct bnx2x *bp)
809{ 859{
810 int i; 860 int i;
811 861
812 for_each_rx_queue(bp, i) 862 for_each_eth_queue(bp, i)
813 netif_napi_del(&bnx2x_fp(bp, i, napi)); 863 netif_napi_del(&bnx2x_fp(bp, i, napi));
814} 864}
815 865
@@ -979,11 +1029,9 @@ static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
979{ 1029{
980 struct bnx2x *bp = fp->bp; 1030 struct bnx2x *bp = fp->bp;
981 if (!CHIP_IS_E1x(bp)) { 1031 if (!CHIP_IS_E1x(bp)) {
982#ifdef BCM_CNIC
983 /* there are special statistics counters for FCoE 136..140 */ 1032 /* there are special statistics counters for FCoE 136..140 */
984 if (IS_FCOE_FP(fp)) 1033 if (IS_FCOE_FP(fp))
985 return bp->cnic_base_cl_id + (bp->pf_num >> 1); 1034 return bp->cnic_base_cl_id + (bp->pf_num >> 1);
986#endif
987 return fp->cl_id; 1035 return fp->cl_id;
988 } 1036 }
989 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x; 1037 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
@@ -1102,7 +1150,6 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
1102 txdata->cid, txdata->txq_index); 1150 txdata->cid, txdata->txq_index);
1103} 1151}
1104 1152
1105#ifdef BCM_CNIC
1106static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) 1153static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1107{ 1154{
1108 return bp->cnic_base_cl_id + cl_idx + 1155 return bp->cnic_base_cl_id + cl_idx +
@@ -1162,7 +1209,6 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1162 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id, 1209 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
1163 fp->igu_sb_id); 1210 fp->igu_sb_id);
1164} 1211}
1165#endif
1166 1212
1167static inline int bnx2x_clean_tx_queue(struct bnx2x *bp, 1213static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1168 struct bnx2x_fp_txdata *txdata) 1214 struct bnx2x_fp_txdata *txdata)
@@ -1280,7 +1326,7 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1280 */ 1326 */
1281 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1327 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1282} 1328}
1283#ifdef BCM_CNIC 1329
1284/** 1330/**
1285 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1331 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
1286 * 1332 *
@@ -1288,7 +1334,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1288 * 1334 *
1289 */ 1335 */
1290void bnx2x_get_iscsi_info(struct bnx2x *bp); 1336void bnx2x_get_iscsi_info(struct bnx2x *bp);
1291#endif
1292 1337
1293/** 1338/**
1294 * bnx2x_link_sync_notify - send notification to other functions. 1339 * bnx2x_link_sync_notify - send notification to other functions.
@@ -1340,13 +1385,11 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
1340 1385
1341static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr) 1386static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1342{ 1387{
1343 if (is_valid_ether_addr(addr)) 1388 if (is_valid_ether_addr(addr) ||
1389 (is_zero_ether_addr(addr) &&
1390 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))))
1344 return true; 1391 return true;
1345#ifdef BCM_CNIC 1392
1346 if (is_zero_ether_addr(addr) &&
1347 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
1348 return true;
1349#endif
1350 return false; 1393 return false;
1351} 1394}
1352 1395
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 2245c3895409..10bc093d2ca4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -413,8 +413,11 @@ static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
413 413
414static void bnx2x_pfc_set_pfc(struct bnx2x *bp) 414static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
415{ 415{
416 int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
417 GET_FLAGS(SHMEM2_RD(bp, drv_flags),
418 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
416 if (bp->dcbx_port_params.pfc.enabled && 419 if (bp->dcbx_port_params.pfc.enabled &&
417 !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) 420 (!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured))
418 /* 421 /*
419 * 1. Fills up common PFC structures if required 422 * 1. Fills up common PFC structures if required
420 * 2. Configure NIG, MAC and BRB via the elink 423 * 2. Configure NIG, MAC and BRB via the elink
@@ -552,10 +555,13 @@ static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
552 555
553static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp) 556static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
554{ 557{
558 int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
559 GET_FLAGS(SHMEM2_RD(bp, drv_flags),
560 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
555 bnx2x_ets_disabled(&bp->link_params, &bp->link_vars); 561 bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
556 562
557 if (!bp->dcbx_port_params.ets.enabled || 563 if (!bp->dcbx_port_params.ets.enabled ||
558 (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)) 564 ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured))
559 return; 565 return;
560 566
561 if (CHIP_IS_E3B0(bp)) 567 if (CHIP_IS_E3B0(bp))
@@ -1802,11 +1808,14 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
1802 u8 cos = 0, pri = 0; 1808 u8 cos = 0, pri = 0;
1803 struct priority_cos *tt2cos; 1809 struct priority_cos *tt2cos;
1804 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; 1810 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1811 int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
1812 GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1813 1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
1805 1814
1806 memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg)); 1815 memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
1807 1816
1808 /* to disable DCB - the structure must be zeroed */ 1817 /* to disable DCB - the structure must be zeroed */
1809 if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) 1818 if ((bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) && !mfw_configured)
1810 return; 1819 return;
1811 1820
1812 /*shortcut*/ 1821 /*shortcut*/
@@ -1895,6 +1904,11 @@ static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
1895 struct bnx2x *bp = netdev_priv(netdev); 1904 struct bnx2x *bp = netdev_priv(netdev);
1896 DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off"); 1905 DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
1897 1906
1907 if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ||
1908 (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) {
1909 DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n");
1910 return 1;
1911 }
1898 bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled); 1912 bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
1899 return 0; 1913 return 0;
1900} 1914}
@@ -1908,10 +1922,10 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
1908 /* first the HW mac address */ 1922 /* first the HW mac address */
1909 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); 1923 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
1910 1924
1911#ifdef BCM_CNIC 1925 if (CNIC_LOADED(bp))
1912 /* second SAN address */ 1926 /* second SAN address */
1913 memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len); 1927 memcpy(perm_addr+netdev->addr_len, bp->fip_mac,
1914#endif 1928 netdev->addr_len);
1915} 1929}
1916 1930
1917static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio, 1931static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
@@ -2038,10 +2052,13 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
2038 if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES) 2052 if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
2039 return; 2053 return;
2040 2054
2041 bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
2042 2055
2043 if (setting) 2056 if (setting) {
2057 bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio);
2044 bp->dcbx_config_params.admin_pfc_tx_enable = 1; 2058 bp->dcbx_config_params.admin_pfc_tx_enable = 1;
2059 } else {
2060 bp->dcbx_config_params.admin_pfc_bitmap &= ~(1 << prio);
2061 }
2045} 2062}
2046 2063
2047static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, 2064static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
@@ -2073,8 +2090,12 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
2073 "Handling parity error recovery. Try again later\n"); 2090 "Handling parity error recovery. Try again later\n");
2074 return 1; 2091 return 1;
2075 } 2092 }
2076 if (netif_running(bp->dev)) 2093 if (netif_running(bp->dev)) {
2094 bnx2x_update_drv_flags(bp,
2095 1 << DRV_FLAGS_DCB_MFW_CONFIGURED,
2096 1);
2077 bnx2x_dcbx_init(bp, true); 2097 bnx2x_dcbx_init(bp, true);
2098 }
2078 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); 2099 DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
2079 if (rc) 2100 if (rc)
2080 return 1; 2101 return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 6e5bdd1a31d9..277f17e3c8f8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -62,7 +62,9 @@ static const struct {
62 8, "[%s]: tpa_aggregations" }, 62 8, "[%s]: tpa_aggregations" },
63 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi), 63 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
64 8, "[%s]: tpa_aggregated_frames"}, 64 8, "[%s]: tpa_aggregated_frames"},
65 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"} 65 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
66 { Q_STATS_OFFSET32(driver_filtered_tx_pkt),
67 4, "[%s]: driver_filtered_tx_pkt" }
66}; 68};
67 69
68#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr) 70#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
@@ -177,6 +179,8 @@ static const struct {
177 4, STATS_FLAGS_FUNC, "recoverable_errors" }, 179 4, STATS_FLAGS_FUNC, "recoverable_errors" },
178 { STATS_OFFSET32(unrecoverable_error), 180 { STATS_OFFSET32(unrecoverable_error),
179 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, 181 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
182 { STATS_OFFSET32(driver_filtered_tx_pkt),
183 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" },
180 { STATS_OFFSET32(eee_tx_lpi), 184 { STATS_OFFSET32(eee_tx_lpi),
181 4, STATS_FLAGS_PORT, "Tx LPI entry count"} 185 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
182}; 186};
@@ -227,18 +231,14 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
227 cmd->advertising &= ~(ADVERTISED_10000baseT_Full); 231 cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
228 } 232 }
229 233
230 if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) { 234 if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
231 if (!(bp->flags & MF_FUNC_DIS)) { 235 !(bp->flags & MF_FUNC_DIS)) {
232 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
233 cmd->duplex = bp->link_vars.duplex; 236 cmd->duplex = bp->link_vars.duplex;
234 } else {
235 ethtool_cmd_speed_set(
236 cmd, bp->link_params.req_line_speed[cfg_idx]);
237 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
238 }
239 237
240 if (IS_MF(bp) && !BP_NOMCP(bp)) 238 if (IS_MF(bp) && !BP_NOMCP(bp))
241 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp)); 239 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
240 else
241 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
242 } else { 242 } else {
243 cmd->duplex = DUPLEX_UNKNOWN; 243 cmd->duplex = DUPLEX_UNKNOWN;
244 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); 244 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
@@ -2660,20 +2660,25 @@ static int bnx2x_set_phys_id(struct net_device *dev,
2660 return 1; /* cycle on/off once per second */ 2660 return 1; /* cycle on/off once per second */
2661 2661
2662 case ETHTOOL_ID_ON: 2662 case ETHTOOL_ID_ON:
2663 bnx2x_acquire_phy_lock(bp);
2663 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2664 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2664 LED_MODE_ON, SPEED_1000); 2665 LED_MODE_ON, SPEED_1000);
2666 bnx2x_release_phy_lock(bp);
2665 break; 2667 break;
2666 2668
2667 case ETHTOOL_ID_OFF: 2669 case ETHTOOL_ID_OFF:
2670 bnx2x_acquire_phy_lock(bp);
2668 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2671 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2669 LED_MODE_FRONT_PANEL_OFF, 0); 2672 LED_MODE_FRONT_PANEL_OFF, 0);
2670 2673 bnx2x_release_phy_lock(bp);
2671 break; 2674 break;
2672 2675
2673 case ETHTOOL_ID_INACTIVE: 2676 case ETHTOOL_ID_INACTIVE:
2677 bnx2x_acquire_phy_lock(bp);
2674 bnx2x_set_led(&bp->link_params, &bp->link_vars, 2678 bnx2x_set_led(&bp->link_params, &bp->link_vars,
2675 LED_MODE_OPER, 2679 LED_MODE_OPER,
2676 bp->link_vars.line_speed); 2680 bp->link_vars.line_speed);
2681 bnx2x_release_phy_lock(bp);
2677 } 2682 }
2678 2683
2679 return 0; 2684 return 0;
@@ -2901,7 +2906,9 @@ static void bnx2x_get_channels(struct net_device *dev,
2901static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) 2906static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
2902{ 2907{
2903 bnx2x_disable_msi(bp); 2908 bnx2x_disable_msi(bp);
2904 BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; 2909 bp->num_ethernet_queues = num_rss;
2910 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
2911 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
2905 bnx2x_set_int_mode(bp); 2912 bnx2x_set_int_mode(bp);
2906} 2913}
2907 2914
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 620fe939ecfd..60a83ad10370 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -23,6 +23,11 @@
23 (IRO[159].base + ((funcId) * IRO[159].m1)) 23 (IRO[159].base + ((funcId) * IRO[159].m1))
24#define CSTORM_FUNC_EN_OFFSET(funcId) \ 24#define CSTORM_FUNC_EN_OFFSET(funcId) \
25 (IRO[149].base + ((funcId) * IRO[149].m1)) 25 (IRO[149].base + ((funcId) * IRO[149].m1))
26#define CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hcIndex, sbId) \
27 (IRO[139].base + ((hcIndex) * IRO[139].m1) + ((sbId) * IRO[139].m2))
28#define CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hcIndex, sbId) \
29 (IRO[138].base + (((hcIndex)>>2) * IRO[138].m1) + (((hcIndex)&3) \
30 * IRO[138].m2) + ((sbId) * IRO[138].m3))
26#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) 31#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
27#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 32#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
28 (IRO[316].base + ((pfId) * IRO[316].m1)) 33 (IRO[316].base + ((pfId) * IRO[316].m1))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 18704929e642..3369a50ac6b4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -500,7 +500,15 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
500 u32 e3_cmn_pin_cfg1; /* 0x170 */ 500 u32 e3_cmn_pin_cfg1; /* 0x170 */
501 #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF 501 #define PORT_HW_CFG_E3_OVER_CURRENT_MASK 0x000000FF
502 #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0 502 #define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT 0
503 u32 reserved0[7]; /* 0x174 */ 503
504 /* pause on host ring */
505 u32 generic_features; /* 0x174 */
506 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_MASK 0x00000001
507 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_SHIFT 0
508 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
509 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
510
511 u32 reserved0[6]; /* 0x178 */
504 512
505 u32 aeu_int_mask; /* 0x190 */ 513 u32 aeu_int_mask; /* 0x190 */
506 514
@@ -695,6 +703,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
695 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00 703 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE 0x00000e00
696 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00 704 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722 0x00000f00
697 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000 705 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616 0x00001000
706 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834 0x00001100
698 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00 707 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE 0x0000fd00
699 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00 708 #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN 0x0000ff00
700 709
@@ -751,6 +760,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
751 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00 760 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE 0x00000e00
752 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00 761 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722 0x00000f00
753 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000 762 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616 0x00001000
763 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834 0x00001100
754 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00 764 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC 0x0000fc00
755 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 765 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
756 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 766 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
@@ -1246,6 +1256,7 @@ struct drv_func_mb {
1246 #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000 1256 #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
1247 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002 1257 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
1248 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 1258 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
1259 #define REQ_BC_VER_4_MT_SUPPORTED 0x00070201
1249 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 1260 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
1250 #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209 1261 #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
1251 1262
@@ -1515,12 +1526,13 @@ enum mf_cfg_afex_vlan_mode {
1515/* This structure is not applicable and should not be accessed on 57711 */ 1526/* This structure is not applicable and should not be accessed on 57711 */
1516struct func_ext_cfg { 1527struct func_ext_cfg {
1517 u32 func_cfg; 1528 u32 func_cfg;
1518 #define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF 1529 #define MACP_FUNC_CFG_FLAGS_MASK 0x0000007F
1519 #define MACP_FUNC_CFG_FLAGS_SHIFT 0 1530 #define MACP_FUNC_CFG_FLAGS_SHIFT 0
1520 #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001 1531 #define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
1521 #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002 1532 #define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
1522 #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004 1533 #define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
1523 #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008 1534 #define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
1535 #define MACP_FUNC_CFG_PAUSE_ON_HOST_RING 0x00000080
1524 1536
1525 u32 iscsi_mac_addr_upper; 1537 u32 iscsi_mac_addr_upper;
1526 u32 iscsi_mac_addr_lower; 1538 u32 iscsi_mac_addr_lower;
@@ -2085,8 +2097,13 @@ struct shmem2_region {
2085 2097
2086 /* generic flags controlled by the driver */ 2098 /* generic flags controlled by the driver */
2087 u32 drv_flags; 2099 u32 drv_flags;
2088 #define DRV_FLAGS_DCB_CONFIGURED 0x1 2100 #define DRV_FLAGS_DCB_CONFIGURED 0x0
2101 #define DRV_FLAGS_DCB_CONFIGURATION_ABORTED 0x1
2102 #define DRV_FLAGS_DCB_MFW_CONFIGURED 0x2
2089 2103
2104 #define DRV_FLAGS_PORT_MASK ((1 << DRV_FLAGS_DCB_CONFIGURED) | \
2105 (1 << DRV_FLAGS_DCB_CONFIGURATION_ABORTED) | \
2106 (1 << DRV_FLAGS_DCB_MFW_CONFIGURED))
2090 /* pointer to extended dev_info shared data copied from nvm image */ 2107 /* pointer to extended dev_info shared data copied from nvm image */
2091 u32 extended_dev_info_shared_addr; 2108 u32 extended_dev_info_shared_addr;
2092 u32 ncsi_oem_data_addr; 2109 u32 ncsi_oem_data_addr;
@@ -2159,6 +2176,16 @@ struct shmem2_region {
2159 #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000 2176 #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
2160 2177
2161 u32 sizeof_port_stats; 2178 u32 sizeof_port_stats;
2179
2180 /* Link Flap Avoidance */
2181 u32 lfa_host_addr[PORT_MAX];
2182 u32 reserved1;
2183
2184 u32 reserved2; /* Offset 0x148 */
2185 u32 reserved3; /* Offset 0x14C */
2186 u32 reserved4; /* Offset 0x150 */
2187 u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
2188 #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0)
2162}; 2189};
2163 2190
2164 2191
@@ -4845,9 +4872,17 @@ struct vif_list_event_data {
4845 __le32 reserved2; 4872 __le32 reserved2;
4846}; 4873};
4847 4874
4848/* 4875/* function update event data */
4849 * union for all event ring message types 4876struct function_update_event_data {
4850 */ 4877 u8 echo;
4878 u8 reserved;
4879 __le16 reserved0;
4880 __le32 reserved1;
4881 __le32 reserved2;
4882};
4883
4884
4885/* union for all event ring message types */
4851union event_data { 4886union event_data {
4852 struct vf_pf_event_data vf_pf_event; 4887 struct vf_pf_event_data vf_pf_event;
4853 struct eth_event_data eth_event; 4888 struct eth_event_data eth_event;
@@ -4855,6 +4890,7 @@ union event_data {
4855 struct vf_flr_event_data vf_flr_event; 4890 struct vf_flr_event_data vf_flr_event;
4856 struct malicious_vf_event_data malicious_vf_event; 4891 struct malicious_vf_event_data malicious_vf_event;
4857 struct vif_list_event_data vif_list_event; 4892 struct vif_list_event_data vif_list_event;
4893 struct function_update_event_data function_update_event;
4858}; 4894};
4859 4895
4860 4896
@@ -4984,8 +5020,10 @@ struct function_update_data {
4984 u8 allowed_priorities; 5020 u8 allowed_priorities;
4985 u8 network_cos_mode; 5021 u8 network_cos_mode;
4986 u8 lb_mode_en; 5022 u8 lb_mode_en;
4987 u8 reserved0; 5023 u8 tx_switch_suspend_change_flg;
4988 __le32 reserved1; 5024 u8 tx_switch_suspend;
5025 u8 echo;
5026 __le16 reserved1;
4989}; 5027};
4990 5028
4991 5029
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index fe66d902dc62..d755acfe7a40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -648,15 +648,25 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
648 return rc; 648 return rc;
649} 649}
650 650
651static int bnx2x_ilt_mem_op_cnic(struct bnx2x *bp, u8 memop)
652{
653 int rc = 0;
654
655 if (CONFIGURE_NIC_MODE(bp))
656 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
657 if (!rc)
658 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
659
660 return rc;
661}
662
651static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop) 663static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
652{ 664{
653 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop); 665 int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
654 if (!rc) 666 if (!rc)
655 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop); 667 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
656 if (!rc) 668 if (!rc && CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
657 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop); 669 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
658 if (!rc)
659 rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
660 670
661 return rc; 671 return rc;
662} 672}
@@ -781,12 +791,19 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
781 bnx2x_ilt_client_init_op(bp, ilt_cli, initop); 791 bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
782} 792}
783 793
794static void bnx2x_ilt_init_op_cnic(struct bnx2x *bp, u8 initop)
795{
796 if (CONFIGURE_NIC_MODE(bp))
797 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
798 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
799}
800
784static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop) 801static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
785{ 802{
786 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop); 803 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
787 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop); 804 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
788 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop); 805 if (CNIC_SUPPORT(bp) && !CONFIGURE_NIC_MODE(bp))
789 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop); 806 bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
790} 807}
791 808
792static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num, 809static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
@@ -890,7 +907,6 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
890/**************************************************************************** 907/****************************************************************************
891* SRC initializations 908* SRC initializations
892****************************************************************************/ 909****************************************************************************/
893#ifdef BCM_CNIC
894/* called during init func stage */ 910/* called during init func stage */
895static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, 911static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
896 dma_addr_t t2_mapping, int src_cid_count) 912 dma_addr_t t2_mapping, int src_cid_count)
@@ -915,5 +931,4 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
915 U64_HI((u64)t2_mapping + 931 U64_HI((u64)t2_mapping +
916 (src_cid_count-1) * sizeof(struct src_ent))); 932 (src_cid_count-1) * sizeof(struct src_ent)));
917} 933}
918#endif
919#endif /* BNX2X_INIT_OPS_H */ 934#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index f6cfdc6cf20f..09096b43a6e9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -121,6 +121,7 @@
121#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 121#define GP_STATUS_10G_XFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
122#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 122#define GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
123#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 123#define GP_STATUS_10G_SFI MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
124#define GP_STATUS_20G_KR2 MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2
124#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD 125#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD
125#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD 126#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD
126#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD 127#define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
@@ -253,6 +254,12 @@ static int bnx2x_check_lfa(struct link_params *params)
253 if (!(link_status & LINK_STATUS_LINK_UP)) 254 if (!(link_status & LINK_STATUS_LINK_UP))
254 return LFA_LINK_DOWN; 255 return LFA_LINK_DOWN;
255 256
257 /* if loaded after BOOT from SAN, don't flap the link in any case and
258 * rely on link set by preboot driver
259 */
260 if (params->feature_config_flags & FEATURE_CONFIG_BOOT_FROM_SAN)
261 return 0;
262
256 /* Verify that loopback mode is not set */ 263 /* Verify that loopback mode is not set */
257 if (params->loopback_mode) 264 if (params->loopback_mode)
258 return LFA_LOOPBACK_ENABLED; 265 return LFA_LOOPBACK_ENABLED;
@@ -1440,30 +1447,47 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
1440/******************************************************************/ 1447/******************************************************************/
1441/* MAC/PBF section */ 1448/* MAC/PBF section */
1442/******************************************************************/ 1449/******************************************************************/
1443static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) 1450static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id,
1451 u32 emac_base)
1444{ 1452{
1445 u32 mode, emac_base; 1453 u32 new_mode, cur_mode;
1454 u32 clc_cnt;
1446 /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz 1455 /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1447 * (a value of 49==0x31) and make sure that the AUTO poll is off 1456 * (a value of 49==0x31) and make sure that the AUTO poll is off
1448 */ 1457 */
1458 cur_mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1449 1459
1450 if (CHIP_IS_E2(bp))
1451 emac_base = GRCBASE_EMAC0;
1452 else
1453 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1454 mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1455 mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
1456 EMAC_MDIO_MODE_CLOCK_CNT);
1457 if (USES_WARPCORE(bp)) 1460 if (USES_WARPCORE(bp))
1458 mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); 1461 clc_cnt = 74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
1459 else 1462 else
1460 mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT); 1463 clc_cnt = 49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT;
1461 1464
1462 mode |= (EMAC_MDIO_MODE_CLAUSE_45); 1465 if (((cur_mode & EMAC_MDIO_MODE_CLOCK_CNT) == clc_cnt) &&
1463 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode); 1466 (cur_mode & (EMAC_MDIO_MODE_CLAUSE_45)))
1467 return;
1464 1468
1469 new_mode = cur_mode &
1470 ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1471 new_mode |= clc_cnt;
1472 new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
1473
1474 DP(NETIF_MSG_LINK, "Changing emac_mode from 0x%x to 0x%x\n",
1475 cur_mode, new_mode);
1476 REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
1465 udelay(40); 1477 udelay(40);
1466} 1478}
1479
1480static void bnx2x_set_mdio_emac_per_phy(struct bnx2x *bp,
1481 struct link_params *params)
1482{
1483 u8 phy_index;
1484 /* Set mdio clock per phy */
1485 for (phy_index = INT_PHY; phy_index < params->num_phys;
1486 phy_index++)
1487 bnx2x_set_mdio_clk(bp, params->chip_id,
1488 params->phy[phy_index].mdio_ctrl);
1489}
1490
1467static u8 bnx2x_is_4_port_mode(struct bnx2x *bp) 1491static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
1468{ 1492{
1469 u32 port4mode_ovwr_val; 1493 u32 port4mode_ovwr_val;
@@ -1508,7 +1532,8 @@ static void bnx2x_emac_init(struct link_params *params,
1508 } 1532 }
1509 timeout--; 1533 timeout--;
1510 } while (val & EMAC_MODE_RESET); 1534 } while (val & EMAC_MODE_RESET);
1511 bnx2x_set_mdio_clk(bp, params->chip_id, port); 1535
1536 bnx2x_set_mdio_emac_per_phy(bp, params);
1512 /* Set mac address */ 1537 /* Set mac address */
1513 val = ((params->mac_addr[0] << 8) | 1538 val = ((params->mac_addr[0] << 8) |
1514 params->mac_addr[1]); 1539 params->mac_addr[1]);
@@ -1664,7 +1689,10 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1664 * ports of the path 1689 * ports of the path
1665 */ 1690 */
1666 1691
1667 if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) && 1692 if (((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) ||
1693 (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) ||
1694 (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE)) &&
1695 is_port4mode &&
1668 (REG_RD(bp, MISC_REG_RESET_REG_2) & 1696 (REG_RD(bp, MISC_REG_RESET_REG_2) &
1669 MISC_REGISTERS_RESET_REG_2_XMAC)) { 1697 MISC_REGISTERS_RESET_REG_2_XMAC)) {
1670 DP(NETIF_MSG_LINK, 1698 DP(NETIF_MSG_LINK,
@@ -1760,6 +1788,18 @@ static int bnx2x_xmac_enable(struct link_params *params,
1760 */ 1788 */
1761 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); 1789 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
1762 1790
1791 /* When XMAC is in XLGMII mode, disable sending idles for fault
1792 * detection.
1793 */
1794 if (!(params->phy[INT_PHY].flags & FLAGS_TX_ERROR_CHECK)) {
1795 REG_WR(bp, xmac_base + XMAC_REG_RX_LSS_CTRL,
1796 (XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE |
1797 XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE));
1798 REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
1799 REG_WR(bp, xmac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
1800 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
1801 XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
1802 }
1763 /* Set Max packet size */ 1803 /* Set Max packet size */
1764 REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710); 1804 REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
1765 1805
@@ -1780,6 +1820,12 @@ static int bnx2x_xmac_enable(struct link_params *params,
1780 /* Enable TX and RX */ 1820 /* Enable TX and RX */
1781 val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN; 1821 val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
1782 1822
1823 /* Set MAC in XLGMII mode for dual-mode */
1824 if ((vars->line_speed == SPEED_20000) &&
1825 (params->phy[INT_PHY].supported &
1826 SUPPORTED_20000baseKR2_Full))
1827 val |= XMAC_CTRL_REG_XLGMII_ALIGN_ENB;
1828
1783 /* Check loopback mode */ 1829 /* Check loopback mode */
1784 if (lb) 1830 if (lb)
1785 val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK; 1831 val |= XMAC_CTRL_REG_LINE_LOCAL_LPBK;
@@ -2096,6 +2142,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
2096 port_mb[params->port].link_status), link_status); 2142 port_mb[params->port].link_status), link_status);
2097} 2143}
2098 2144
2145static void bnx2x_update_link_attr(struct link_params *params, u32 link_attr)
2146{
2147 struct bnx2x *bp = params->bp;
2148
2149 if (SHMEM2_HAS(bp, link_attr_sync))
2150 REG_WR(bp, params->shmem2_base +
2151 offsetof(struct shmem2_region,
2152 link_attr_sync[params->port]), link_attr);
2153}
2154
2099static void bnx2x_update_pfc_nig(struct link_params *params, 2155static void bnx2x_update_pfc_nig(struct link_params *params,
2100 struct link_vars *vars, 2156 struct link_vars *vars,
2101 struct bnx2x_nig_brb_pfc_port_params *nig_params) 2157 struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2126,7 +2182,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2126 if (CHIP_IS_E3(bp)) 2182 if (CHIP_IS_E3(bp))
2127 ppp_enable = 0; 2183 ppp_enable = 0;
2128 else 2184 else
2129 ppp_enable = 1; 2185 ppp_enable = 1;
2130 xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : 2186 xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
2131 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN); 2187 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
2132 xcm_out_en = 0; 2188 xcm_out_en = 0;
@@ -2247,7 +2303,6 @@ int bnx2x_update_pfc(struct link_params *params,
2247 return bnx2x_status; 2303 return bnx2x_status;
2248} 2304}
2249 2305
2250
2251static int bnx2x_bmac1_enable(struct link_params *params, 2306static int bnx2x_bmac1_enable(struct link_params *params,
2252 struct link_vars *vars, 2307 struct link_vars *vars,
2253 u8 is_lb) 2308 u8 is_lb)
@@ -2651,6 +2706,13 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
2651 u32 val; 2706 u32 val;
2652 u16 i; 2707 u16 i;
2653 int rc = 0; 2708 int rc = 0;
2709 u32 chip_id;
2710 if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
2711 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
2712 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
2713 bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
2714 }
2715
2654 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) 2716 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
2655 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 2717 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
2656 EMAC_MDIO_STATUS_10MB); 2718 EMAC_MDIO_STATUS_10MB);
@@ -2719,6 +2781,13 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
2719 u32 tmp; 2781 u32 tmp;
2720 u8 i; 2782 u8 i;
2721 int rc = 0; 2783 int rc = 0;
2784 u32 chip_id;
2785 if (phy->flags & FLAGS_MDC_MDIO_WA_G) {
2786 chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
2787 ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
2788 bnx2x_set_mdio_clk(bp, chip_id, phy->mdio_ctrl);
2789 }
2790
2722 if (phy->flags & FLAGS_MDC_MDIO_WA_B0) 2791 if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
2723 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, 2792 bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
2724 EMAC_MDIO_STATUS_10MB); 2793 EMAC_MDIO_STATUS_10MB);
@@ -3147,6 +3216,15 @@ static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3147 bnx2x_cl45_write(bp, phy, devad, reg, val | or_val); 3216 bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
3148} 3217}
3149 3218
3219static void bnx2x_cl45_read_and_write(struct bnx2x *bp,
3220 struct bnx2x_phy *phy,
3221 u8 devad, u16 reg, u16 and_val)
3222{
3223 u16 val;
3224 bnx2x_cl45_read(bp, phy, devad, reg, &val);
3225 bnx2x_cl45_write(bp, phy, devad, reg, val & and_val);
3226}
3227
3150int bnx2x_phy_read(struct link_params *params, u8 phy_addr, 3228int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
3151 u8 devad, u16 reg, u16 *ret_val) 3229 u8 devad, u16 reg, u16 *ret_val)
3152{ 3230{
@@ -3551,6 +3629,44 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3551 * init configuration, and set/clear SGMII flag. Internal 3629 * init configuration, and set/clear SGMII flag. Internal
3552 * phy init is done purely in phy_init stage. 3630 * phy init is done purely in phy_init stage.
3553 */ 3631 */
3632static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3633 struct link_params *params,
3634 struct link_vars *vars)
3635{
3636 struct bnx2x *bp = params->bp;
3637 u16 i;
3638 static struct bnx2x_reg_set reg_set[] = {
3639 /* Step 1 - Program the TX/RX alignment markers */
3640 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0xa157},
3641 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xcbe2},
3642 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0x7537},
3643 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0xa157},
3644 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xcbe2},
3645 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0x7537},
3646 /* Step 2 - Configure the NP registers */
3647 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000a},
3648 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6400},
3649 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0620},
3650 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0157},
3651 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x6464},
3652 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x3150},
3653 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x3150},
3654 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
3655 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
3656 };
3657 DP(NETIF_MSG_LINK, "Enabling 20G-KR2\n");
3658
3659 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3660 MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
3661
3662 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
3663 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3664 reg_set[i].val);
3665
3666 /* Start KR2 work-around timer which handles BCM8073 link-parner */
3667 vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
3668 bnx2x_update_link_attr(params, vars->link_attr_sync);
3669}
3554 3670
3555static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, 3671static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3556 struct link_params *params) 3672 struct link_params *params)
@@ -3564,6 +3680,21 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3564 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); 3680 MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
3565} 3681}
3566 3682
3683static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
3684 struct link_params *params)
3685{
3686 /* Restart autoneg on the leading lane only */
3687 struct bnx2x *bp = params->bp;
3688 u16 lane = bnx2x_get_warpcore_lane(phy, params);
3689 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3690 MDIO_AER_BLOCK_AER_REG, lane);
3691 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3692 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
3693
3694 /* Restore AER */
3695 bnx2x_set_aer_mmd(params, phy);
3696}
3697
3567static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3698static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3568 struct link_params *params, 3699 struct link_params *params,
3569 struct link_vars *vars) { 3700 struct link_vars *vars) {
@@ -3576,7 +3707,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3576 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, 3707 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
3577 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, 3708 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
3578 /* Disable Autoneg: re-enable it after adv is done. */ 3709 /* Disable Autoneg: re-enable it after adv is done. */
3579 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0} 3710 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0},
3711 {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
3712 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
3580 }; 3713 };
3581 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); 3714 DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
3582 /* Set to default registers that may be overriden by 10G force */ 3715 /* Set to default registers that may be overriden by 10G force */
@@ -3585,11 +3718,11 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3585 reg_set[i].val); 3718 reg_set[i].val);
3586 3719
3587 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3720 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3588 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl); 3721 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
3589 cl72_ctrl &= 0xf8ff; 3722 cl72_ctrl &= 0x08ff;
3590 cl72_ctrl |= 0x3800; 3723 cl72_ctrl |= 0x3800;
3591 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3724 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3592 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl); 3725 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
3593 3726
3594 /* Check adding advertisement for 1G KX */ 3727 /* Check adding advertisement for 1G KX */
3595 if (((vars->line_speed == SPEED_AUTO_NEG) && 3728 if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3624,6 +3757,16 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3624 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 3757 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3625 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | 3758 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3626 (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))); 3759 (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
3760 /* Configure the next lane if dual mode */
3761 if (phy->flags & FLAGS_WC_DUAL_MODE)
3762 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3763 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
3764 ((0x02 <<
3765 MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3766 (0x06 <<
3767 MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3768 (0x09 <<
3769 MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
3627 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3770 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3628 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 3771 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
3629 0x03f0); 3772 0x03f0);
@@ -3670,10 +3813,26 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3670 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3813 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3671 MDIO_WC_REG_DIGITAL3_UP1, 0x1f); 3814 MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
3672 3815
3673 /* Enable Autoneg */ 3816 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
3674 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3817 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
3675 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); 3818 (phy->req_line_speed == SPEED_20000)) {
3676 3819
3820 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3821 MDIO_AER_BLOCK_AER_REG, lane);
3822
3823 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3824 MDIO_WC_REG_RX1_PCI_CTRL + (0x10*lane),
3825 (1<<11));
3826
3827 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3828 MDIO_WC_REG_XGXS_X2_CONTROL3, 0x7);
3829 bnx2x_set_aer_mmd(params, phy);
3830
3831 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
3832 }
3833
3834 /* Enable Autoneg: only on the main lane */
3835 bnx2x_warpcore_restart_AN_KR(phy, params);
3677} 3836}
3678 3837
3679static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, 3838static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
@@ -3692,9 +3851,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3692 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, 3851 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
3693 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, 3852 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
3694 /* Leave cl72 training enable, needed for KR */ 3853 /* Leave cl72 training enable, needed for KR */
3695 {MDIO_PMA_DEVAD, 3854 {MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
3696 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
3697 0x2}
3698 }; 3855 };
3699 3856
3700 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) 3857 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
@@ -3764,27 +3921,21 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3764 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); 3921 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
3765 3922
3766 /* Disable 100FX Enable and Auto-Detect */ 3923 /* Disable 100FX Enable and Auto-Detect */
3767 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3924 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
3768 MDIO_WC_REG_FX100_CTRL1, &val); 3925 MDIO_WC_REG_FX100_CTRL1, 0xFFFA);
3769 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3770 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
3771 3926
3772 /* Disable 100FX Idle detect */ 3927 /* Disable 100FX Idle detect */
3773 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3928 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3774 MDIO_WC_REG_FX100_CTRL3, 0x0080); 3929 MDIO_WC_REG_FX100_CTRL3, 0x0080);
3775 3930
3776 /* Set Block address to Remote PHY & Clear forced_speed[5] */ 3931 /* Set Block address to Remote PHY & Clear forced_speed[5] */
3777 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3932 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
3778 MDIO_WC_REG_DIGITAL4_MISC3, &val); 3933 MDIO_WC_REG_DIGITAL4_MISC3, 0xFF7F);
3779 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3780 MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
3781 3934
3782 /* Turn off auto-detect & fiber mode */ 3935 /* Turn off auto-detect & fiber mode */
3783 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3936 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
3784 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); 3937 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
3785 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3938 0xFFEE);
3786 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
3787 (val & 0xFFEE));
3788 3939
3789 /* Set filter_force_link, disable_false_link and parallel_detect */ 3940 /* Set filter_force_link, disable_false_link and parallel_detect */
3790 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3941 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3846,22 +3997,65 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3846 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100); 3997 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
3847 3998
3848 /* Release tx_fifo_reset */ 3999 /* Release tx_fifo_reset */
4000 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
4001 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
4002 0xFFFE);
4003 /* Release rxSeqStart */
4004 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
4005 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x7FFF);
4006}
4007
4008static void bnx2x_warpcore_set_20G_force_KR2(struct bnx2x_phy *phy,
4009 struct link_params *params)
4010{
4011 u16 val;
4012 struct bnx2x *bp = params->bp;
4013 /* Set global registers, so set AER lane to 0 */
4014 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4015 MDIO_AER_BLOCK_AER_REG, 0);
4016
4017 /* Disable sequencer */
4018 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
4019 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, ~(1<<13));
4020
4021 bnx2x_set_aer_mmd(params, phy);
4022
4023 bnx2x_cl45_read_and_write(bp, phy, MDIO_PMA_DEVAD,
4024 MDIO_WC_REG_PMD_KR_CONTROL, ~(1<<1));
4025 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
4026 MDIO_AN_REG_CTRL, 0);
4027 /* Turn off CL73 */
3849 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4028 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3850 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); 4029 MDIO_WC_REG_CL73_USERB0_CTRL, &val);
4030 val &= ~(1<<5);
4031 val |= (1<<6);
3851 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4032 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3852 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE); 4033 MDIO_WC_REG_CL73_USERB0_CTRL, val);
4034
4035 /* Set 20G KR2 force speed */
4036 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4037 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x1f);
4038
4039 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4040 MDIO_WC_REG_DIGITAL4_MISC3, (1<<7));
3853 4041
3854 /* Release rxSeqStart */
3855 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4042 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3856 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); 4043 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &val);
4044 val &= ~(3<<14);
4045 val |= (1<<15);
3857 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4046 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3858 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF)); 4047 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, val);
3859} 4048 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4049 MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0x835A);
3860 4050
3861static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp, 4051 /* Enable sequencer (over lane 0) */
3862 struct bnx2x_phy *phy) 4052 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3863{ 4053 MDIO_AER_BLOCK_AER_REG, 0);
3864 DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n"); 4054
4055 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4056 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, (1<<13));
4057
4058 bnx2x_set_aer_mmd(params, phy);
3865} 4059}
3866 4060
3867static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp, 4061static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
@@ -3931,20 +4125,16 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
3931 u16 val16, digctrl_kx1, digctrl_kx2; 4125 u16 val16, digctrl_kx1, digctrl_kx2;
3932 4126
3933 /* Clear XFI clock comp in non-10G single lane mode. */ 4127 /* Clear XFI clock comp in non-10G single lane mode. */
3934 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4128 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
3935 MDIO_WC_REG_RX66_CONTROL, &val16); 4129 MDIO_WC_REG_RX66_CONTROL, ~(3<<13));
3936 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3937 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
3938 4130
3939 bnx2x_warpcore_set_lpi_passthrough(phy, params); 4131 bnx2x_warpcore_set_lpi_passthrough(phy, params);
3940 4132
3941 if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { 4133 if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
3942 /* SGMII Autoneg */ 4134 /* SGMII Autoneg */
3943 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4135 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3944 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); 4136 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
3945 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4137 0x1000);
3946 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
3947 val16 | 0x1000);
3948 DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n"); 4138 DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
3949 } else { 4139 } else {
3950 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4140 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4086,7 +4276,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
4086 if ((cfg_pin < PIN_CFG_GPIO0_P0) || 4276 if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
4087 (cfg_pin > PIN_CFG_GPIO3_P1)) { 4277 (cfg_pin > PIN_CFG_GPIO3_P1)) {
4088 DP(NETIF_MSG_LINK, 4278 DP(NETIF_MSG_LINK,
4089 "ERROR: Invalid cfg pin %x for module detect indication\n", 4279 "No cfg pin %x for module detect indication\n",
4090 cfg_pin); 4280 cfg_pin);
4091 return -EINVAL; 4281 return -EINVAL;
4092 } 4282 }
@@ -4097,7 +4287,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
4097 *gpio_num = MISC_REGISTERS_GPIO_3; 4287 *gpio_num = MISC_REGISTERS_GPIO_3;
4098 *gpio_port = port; 4288 *gpio_port = port;
4099 } 4289 }
4100 DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port); 4290
4101 return 0; 4291 return 0;
4102} 4292}
4103 4293
@@ -4120,7 +4310,7 @@ static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
4120 return 0; 4310 return 0;
4121} 4311}
4122static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy, 4312static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
4123 struct link_params *params) 4313 struct link_params *params)
4124{ 4314{
4125 u16 gp2_status_reg0, lane; 4315 u16 gp2_status_reg0, lane;
4126 struct bnx2x *bp = params->bp; 4316 struct bnx2x *bp = params->bp;
@@ -4134,8 +4324,8 @@ static int bnx2x_warpcore_get_sigdet(struct bnx2x_phy *phy,
4134} 4324}
4135 4325
4136static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, 4326static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4137 struct link_params *params, 4327 struct link_params *params,
4138 struct link_vars *vars) 4328 struct link_vars *vars)
4139{ 4329{
4140 struct bnx2x *bp = params->bp; 4330 struct bnx2x *bp = params->bp;
4141 u32 serdes_net_if; 4331 u32 serdes_net_if;
@@ -4163,7 +4353,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4163 case PORT_HW_CFG_NET_SERDES_IF_KR: 4353 case PORT_HW_CFG_NET_SERDES_IF_KR:
4164 /* Do we get link yet? */ 4354 /* Do we get link yet? */
4165 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1, 4355 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 0x81d1,
4166 &gp_status1); 4356 &gp_status1);
4167 lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */ 4357 lnkup = (gp_status1 >> (8+lane)) & 0x1;/* 1G */
4168 /*10G KR*/ 4358 /*10G KR*/
4169 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1; 4359 lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
@@ -4215,6 +4405,27 @@ static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
4215 } 4405 }
4216} 4406}
4217 4407
4408static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
4409 struct bnx2x_phy *phy,
4410 u8 tx_en)
4411{
4412 struct bnx2x *bp = params->bp;
4413 u32 cfg_pin;
4414 u8 port = params->port;
4415
4416 cfg_pin = REG_RD(bp, params->shmem_base +
4417 offsetof(struct shmem_region,
4418 dev_info.port_hw_config[port].e3_sfp_ctrl)) &
4419 PORT_HW_CFG_E3_TX_LASER_MASK;
4420 /* Set the !tx_en since this pin is DISABLE_TX_LASER */
4421 DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
4422
4423 /* For 20G, the expected pin to be used is 3 pins after the current */
4424 bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
4425 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
4426 bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
4427}
4428
4218static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, 4429static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4219 struct link_params *params, 4430 struct link_params *params,
4220 struct link_vars *vars) 4431 struct link_vars *vars)
@@ -4275,9 +4486,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4275 break; 4486 break;
4276 4487
4277 case PORT_HW_CFG_NET_SERDES_IF_SFI: 4488 case PORT_HW_CFG_NET_SERDES_IF_SFI:
4278 /* Issue Module detection */ 4489 /* Issue Module detection if module is plugged, or
4490 * enabled transmitter to avoid current leakage in case
4491 * no module is connected
4492 */
4279 if (bnx2x_is_sfp_module_plugged(phy, params)) 4493 if (bnx2x_is_sfp_module_plugged(phy, params))
4280 bnx2x_sfp_module_detection(phy, params); 4494 bnx2x_sfp_module_detection(phy, params);
4495 else
4496 bnx2x_sfp_e3_set_transmitter(params, phy, 1);
4281 4497
4282 bnx2x_warpcore_config_sfi(phy, params); 4498 bnx2x_warpcore_config_sfi(phy, params);
4283 break; 4499 break;
@@ -4293,16 +4509,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4293 4509
4294 bnx2x_sfp_module_detection(phy, params); 4510 bnx2x_sfp_module_detection(phy, params);
4295 break; 4511 break;
4296
4297 case PORT_HW_CFG_NET_SERDES_IF_KR2: 4512 case PORT_HW_CFG_NET_SERDES_IF_KR2:
4298 if (vars->line_speed != SPEED_20000) { 4513 if (!params->loopback_mode) {
4299 DP(NETIF_MSG_LINK, "Speed not supported yet\n"); 4514 bnx2x_warpcore_enable_AN_KR(phy, params, vars);
4300 return; 4515 } else {
4516 DP(NETIF_MSG_LINK, "Setting KR 20G-Force\n");
4517 bnx2x_warpcore_set_20G_force_KR2(phy, params);
4301 } 4518 }
4302 DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
4303 bnx2x_warpcore_set_20G_KR2(bp, phy);
4304 break; 4519 break;
4305
4306 default: 4520 default:
4307 DP(NETIF_MSG_LINK, 4521 DP(NETIF_MSG_LINK,
4308 "Unsupported Serdes Net Interface 0x%x\n", 4522 "Unsupported Serdes Net Interface 0x%x\n",
@@ -4316,68 +4530,35 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4316 DP(NETIF_MSG_LINK, "Exit config init\n"); 4530 DP(NETIF_MSG_LINK, "Exit config init\n");
4317} 4531}
4318 4532
4319static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
4320 struct bnx2x_phy *phy,
4321 u8 tx_en)
4322{
4323 struct bnx2x *bp = params->bp;
4324 u32 cfg_pin;
4325 u8 port = params->port;
4326
4327 cfg_pin = REG_RD(bp, params->shmem_base +
4328 offsetof(struct shmem_region,
4329 dev_info.port_hw_config[port].e3_sfp_ctrl)) &
4330 PORT_HW_CFG_TX_LASER_MASK;
4331 /* Set the !tx_en since this pin is DISABLE_TX_LASER */
4332 DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
4333 /* For 20G, the expected pin to be used is 3 pins after the current */
4334
4335 bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
4336 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
4337 bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
4338}
4339
4340static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy, 4533static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4341 struct link_params *params) 4534 struct link_params *params)
4342{ 4535{
4343 struct bnx2x *bp = params->bp; 4536 struct bnx2x *bp = params->bp;
4344 u16 val16, lane; 4537 u16 val16, lane;
4345 bnx2x_sfp_e3_set_transmitter(params, phy, 0); 4538 bnx2x_sfp_e3_set_transmitter(params, phy, 0);
4346 bnx2x_set_mdio_clk(bp, params->chip_id, params->port); 4539 bnx2x_set_mdio_emac_per_phy(bp, params);
4347 bnx2x_set_aer_mmd(params, phy); 4540 bnx2x_set_aer_mmd(params, phy);
4348 /* Global register */ 4541 /* Global register */
4349 bnx2x_warpcore_reset_lane(bp, phy, 1); 4542 bnx2x_warpcore_reset_lane(bp, phy, 1);
4350 4543
4351 /* Clear loopback settings (if any) */ 4544 /* Clear loopback settings (if any) */
4352 /* 10G & 20G */ 4545 /* 10G & 20G */
4353 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4546 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
4354 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); 4547 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0xBFFF);
4355 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4356 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
4357 0xBFFF);
4358 4548
4359 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4549 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
4360 MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); 4550 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0xfffe);
4361 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4362 MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
4363 4551
4364 /* Update those 1-copy registers */ 4552 /* Update those 1-copy registers */
4365 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4553 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4366 MDIO_AER_BLOCK_AER_REG, 0); 4554 MDIO_AER_BLOCK_AER_REG, 0);
4367 /* Enable 1G MDIO (1-copy) */ 4555 /* Enable 1G MDIO (1-copy) */
4368 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4556 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
4369 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, 4557 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4370 &val16); 4558 ~0x10);
4371 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4372 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4373 val16 & ~0x10);
4374
4375 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4376 MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
4377 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4378 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
4379 val16 & 0xff00);
4380 4559
4560 bnx2x_cl45_read_and_write(bp, phy, MDIO_WC_DEVAD,
4561 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 0xff00);
4381 lane = bnx2x_get_warpcore_lane(phy, params); 4562 lane = bnx2x_get_warpcore_lane(phy, params);
4382 /* Disable CL36 PCS Tx */ 4563 /* Disable CL36 PCS Tx */
4383 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4564 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4413,8 +4594,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
4413 DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n", 4594 DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
4414 params->loopback_mode, phy->req_line_speed); 4595 params->loopback_mode, phy->req_line_speed);
4415 4596
4416 if (phy->req_line_speed < SPEED_10000) { 4597 if (phy->req_line_speed < SPEED_10000 ||
4417 /* 10/100/1000 */ 4598 phy->supported & SUPPORTED_20000baseKR2_Full) {
4599 /* 10/100/1000/20G-KR2 */
4418 4600
4419 /* Update those 1-copy registers */ 4601 /* Update those 1-copy registers */
4420 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4602 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
@@ -4427,18 +4609,20 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
4427 lane = bnx2x_get_warpcore_lane(phy, params); 4609 lane = bnx2x_get_warpcore_lane(phy, params);
4428 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4610 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4429 MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16); 4611 MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
4612 val16 |= (1<<lane);
4613 if (phy->flags & FLAGS_WC_DUAL_MODE)
4614 val16 |= (2<<lane);
4430 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4615 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4431 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 4616 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
4432 val16 | (1<<lane)); 4617 val16);
4433 4618
4434 /* Switch back to 4-copy registers */ 4619 /* Switch back to 4-copy registers */
4435 bnx2x_set_aer_mmd(params, phy); 4620 bnx2x_set_aer_mmd(params, phy);
4436 } else { 4621 } else {
4437 /* 10G & 20G */ 4622 /* 10G / 20G-DXGXS */
4438 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4623 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4439 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 4624 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
4440 0x4000); 4625 0x4000);
4441
4442 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 4626 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
4443 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1); 4627 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
4444 } 4628 }
@@ -4603,6 +4787,10 @@ void bnx2x_link_status_update(struct link_params *params,
4603 params->feature_config_flags &= 4787 params->feature_config_flags &=
4604 ~FEATURE_CONFIG_PFC_ENABLED; 4788 ~FEATURE_CONFIG_PFC_ENABLED;
4605 4789
4790 if (SHMEM2_HAS(bp, link_attr_sync))
4791 vars->link_attr_sync = SHMEM2_RD(bp,
4792 link_attr_sync[params->port]);
4793
4606 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", 4794 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
4607 vars->link_status, vars->phy_link_up, vars->aeu_int_mask); 4795 vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
4608 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n", 4796 DP(NETIF_MSG_LINK, "line_speed %x duplex %x flow_ctrl 0x%x\n",
@@ -5332,6 +5520,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
5332 vars->link_status |= LINK_10GTFD; 5520 vars->link_status |= LINK_10GTFD;
5333 break; 5521 break;
5334 case GP_STATUS_20G_DXGXS: 5522 case GP_STATUS_20G_DXGXS:
5523 case GP_STATUS_20G_KR2:
5335 vars->line_speed = SPEED_20000; 5524 vars->line_speed = SPEED_20000;
5336 vars->link_status |= LINK_20GTFD; 5525 vars->link_status |= LINK_20GTFD;
5337 break; 5526 break;
@@ -5439,7 +5628,15 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5439 int rc = 0; 5628 int rc = 0;
5440 lane = bnx2x_get_warpcore_lane(phy, params); 5629 lane = bnx2x_get_warpcore_lane(phy, params);
5441 /* Read gp_status */ 5630 /* Read gp_status */
5442 if (phy->req_line_speed > SPEED_10000) { 5631 if ((params->loopback_mode) &&
5632 (phy->flags & FLAGS_WC_DUAL_MODE)) {
5633 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5634 MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
5635 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5636 MDIO_WC_REG_DIGITAL5_LINK_STATUS, &link_up);
5637 link_up &= 0x1;
5638 } else if ((phy->req_line_speed > SPEED_10000) &&
5639 (phy->supported & SUPPORTED_20000baseMLD2_Full)) {
5443 u16 temp_link_up; 5640 u16 temp_link_up;
5444 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5641 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5445 1, &temp_link_up); 5642 1, &temp_link_up);
@@ -5452,12 +5649,22 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5452 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5649 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5453 } else { 5650 } else {
5454 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 5651 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
5455 MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1); 5652 MDIO_WC_REG_GP2_STATUS_GP_2_1,
5653 &gp_status1);
5456 DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1); 5654 DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
5457 /* Check for either KR or generic link up. */ 5655 /* Check for either KR, 1G, or AN up. */
5458 gp_status1 = ((gp_status1 >> 8) & 0xf) | 5656 link_up = ((gp_status1 >> 8) |
5459 ((gp_status1 >> 12) & 0xf); 5657 (gp_status1 >> 12) |
5460 link_up = gp_status1 & (1 << lane); 5658 (gp_status1)) &
5659 (1 << lane);
5660 if (phy->supported & SUPPORTED_20000baseKR2_Full) {
5661 u16 an_link;
5662 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
5663 MDIO_AN_REG_STATUS, &an_link);
5664 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
5665 MDIO_AN_REG_STATUS, &an_link);
5666 link_up |= (an_link & (1<<2));
5667 }
5461 if (link_up && SINGLE_MEDIA_DIRECT(params)) { 5668 if (link_up && SINGLE_MEDIA_DIRECT(params)) {
5462 u16 pd, gp_status4; 5669 u16 pd, gp_status4;
5463 if (phy->req_line_speed == SPEED_AUTO_NEG) { 5670 if (phy->req_line_speed == SPEED_AUTO_NEG) {
@@ -5522,7 +5729,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
5522 if ((lane & 1) == 0) 5729 if ((lane & 1) == 0)
5523 gp_speed <<= 8; 5730 gp_speed <<= 8;
5524 gp_speed &= 0x3f00; 5731 gp_speed &= 0x3f00;
5525 5732 link_up = !!link_up;
5526 5733
5527 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed, 5734 rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
5528 duplex); 5735 duplex);
@@ -6683,7 +6890,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6683 } else if (prev_line_speed != vars->line_speed) { 6890 } else if (prev_line_speed != vars->line_speed) {
6684 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 6891 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
6685 0); 6892 0);
6686 usleep_range(1000, 2000); 6893 usleep_range(1000, 2000);
6687 } 6894 }
6688 } 6895 }
6689 6896
@@ -6753,7 +6960,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
6753{ 6960{
6754 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6961 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6755 MISC_REGISTERS_GPIO_OUTPUT_LOW, port); 6962 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
6756 usleep_range(1000, 2000); 6963 usleep_range(1000, 2000);
6757 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6964 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
6758 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); 6965 MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
6759} 6966}
@@ -6894,7 +7101,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
6894 MDIO_PMA_DEVAD, 7101 MDIO_PMA_DEVAD,
6895 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); 7102 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
6896 7103
6897 usleep_range(1000, 2000); 7104 usleep_range(1000, 2000);
6898 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || 7105 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
6899 ((fw_msgout & 0xff) != 0x03 && (phy->type == 7106 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
6900 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); 7107 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7604,13 +7811,12 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7604 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 7811 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7605 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 7812 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
7606 return 0; 7813 return 0;
7607 usleep_range(1000, 2000); 7814 usleep_range(1000, 2000);
7608 } 7815 }
7609 return -EINVAL; 7816 return -EINVAL;
7610} 7817}
7611 7818
7612static void bnx2x_warpcore_power_module(struct link_params *params, 7819static void bnx2x_warpcore_power_module(struct link_params *params,
7613 struct bnx2x_phy *phy,
7614 u8 power) 7820 u8 power)
7615{ 7821{
7616 u32 pin_cfg; 7822 u32 pin_cfg;
@@ -7652,10 +7858,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7652 addr32 = addr & (~0x3); 7858 addr32 = addr & (~0x3);
7653 do { 7859 do {
7654 if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) { 7860 if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
7655 bnx2x_warpcore_power_module(params, phy, 0); 7861 bnx2x_warpcore_power_module(params, 0);
7656 /* Note that 100us are not enough here */ 7862 /* Note that 100us are not enough here */
7657 usleep_range(1000, 2000); 7863 usleep_range(1000, 2000);
7658 bnx2x_warpcore_power_module(params, phy, 1); 7864 bnx2x_warpcore_power_module(params, 1);
7659 } 7865 }
7660 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, 7866 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
7661 data_array); 7867 data_array);
@@ -7715,7 +7921,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7715 /* Wait appropriate time for two-wire command to finish before 7921 /* Wait appropriate time for two-wire command to finish before
7716 * polling the status register 7922 * polling the status register
7717 */ 7923 */
7718 usleep_range(1000, 2000); 7924 usleep_range(1000, 2000);
7719 7925
7720 /* Wait up to 500us for command complete status */ 7926 /* Wait up to 500us for command complete status */
7721 for (i = 0; i < 100; i++) { 7927 for (i = 0; i < 100; i++) {
@@ -7751,7 +7957,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7751 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 7957 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
7752 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 7958 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
7753 return 0; 7959 return 0;
7754 usleep_range(1000, 2000); 7960 usleep_range(1000, 2000);
7755 } 7961 }
7756 7962
7757 return -EINVAL; 7963 return -EINVAL;
@@ -7786,9 +7992,8 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7786{ 7992{
7787 struct bnx2x *bp = params->bp; 7993 struct bnx2x *bp = params->bp;
7788 u32 sync_offset = 0, phy_idx, media_types; 7994 u32 sync_offset = 0, phy_idx, media_types;
7789 u8 val[2], check_limiting_mode = 0; 7995 u8 gport, val[2], check_limiting_mode = 0;
7790 *edc_mode = EDC_MODE_LIMITING; 7996 *edc_mode = EDC_MODE_LIMITING;
7791
7792 phy->media_type = ETH_PHY_UNSPECIFIED; 7997 phy->media_type = ETH_PHY_UNSPECIFIED;
7793 /* First check for copper cable */ 7998 /* First check for copper cable */
7794 if (bnx2x_read_sfp_module_eeprom(phy, 7999 if (bnx2x_read_sfp_module_eeprom(phy,
@@ -7843,8 +8048,15 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7843 SFP_EEPROM_COMP_CODE_LR_MASK | 8048 SFP_EEPROM_COMP_CODE_LR_MASK |
7844 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { 8049 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
7845 DP(NETIF_MSG_LINK, "1G Optic module detected\n"); 8050 DP(NETIF_MSG_LINK, "1G Optic module detected\n");
8051 gport = params->port;
7846 phy->media_type = ETH_PHY_SFP_1G_FIBER; 8052 phy->media_type = ETH_PHY_SFP_1G_FIBER;
7847 phy->req_line_speed = SPEED_1000; 8053 phy->req_line_speed = SPEED_1000;
8054 if (!CHIP_IS_E1x(bp))
8055 gport = BP_PATH(bp) + (params->port << 1);
8056 netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
8057 " Current SFP module in port %d is not"
8058 " compliant with 10G Ethernet\n",
8059 gport);
7848 } else { 8060 } else {
7849 int idx, cfg_idx = 0; 8061 int idx, cfg_idx = 0;
7850 DP(NETIF_MSG_LINK, "10G Optic module detected\n"); 8062 DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8241,7 +8453,7 @@ static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
8241 struct link_params *params) 8453 struct link_params *params)
8242{ 8454{
8243 struct bnx2x *bp = params->bp; 8455 struct bnx2x *bp = params->bp;
8244 bnx2x_warpcore_power_module(params, phy, 0); 8456 bnx2x_warpcore_power_module(params, 0);
8245 /* Put Warpcore in low power mode */ 8457 /* Put Warpcore in low power mode */
8246 REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e); 8458 REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
8247 8459
@@ -8264,7 +8476,7 @@ static void bnx2x_power_sfp_module(struct link_params *params,
8264 bnx2x_8727_power_module(params->bp, phy, power); 8476 bnx2x_8727_power_module(params->bp, phy, power);
8265 break; 8477 break;
8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 8478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8267 bnx2x_warpcore_power_module(params, phy, power); 8479 bnx2x_warpcore_power_module(params, power);
8268 break; 8480 break;
8269 default: 8481 default:
8270 break; 8482 break;
@@ -8337,7 +8549,8 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
8337 u32 val = REG_RD(bp, params->shmem_base + 8549 u32 val = REG_RD(bp, params->shmem_base +
8338 offsetof(struct shmem_region, dev_info. 8550 offsetof(struct shmem_region, dev_info.
8339 port_feature_config[params->port].config)); 8551 port_feature_config[params->port].config));
8340 8552 /* Enabled transmitter by default */
8553 bnx2x_sfp_set_transmitter(params, phy, 1);
8341 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n", 8554 DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
8342 params->port); 8555 params->port);
8343 /* Power up module */ 8556 /* Power up module */
@@ -8370,14 +8583,12 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
8370 */ 8583 */
8371 bnx2x_set_limiting_mode(params, phy, edc_mode); 8584 bnx2x_set_limiting_mode(params, phy, edc_mode);
8372 8585
8373 /* Enable transmit for this module if the module is approved, or 8586 /* Disable transmit for this module if the module is not approved, and
8374 * if unapproved modules should also enable the Tx laser 8587 * laser needs to be disabled.
8375 */ 8588 */
8376 if (rc == 0 || 8589 if ((rc) &&
8377 (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != 8590 ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
8378 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) 8591 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER))
8379 bnx2x_sfp_set_transmitter(params, phy, 1);
8380 else
8381 bnx2x_sfp_set_transmitter(params, phy, 0); 8592 bnx2x_sfp_set_transmitter(params, phy, 0);
8382 8593
8383 return rc; 8594 return rc;
@@ -8389,11 +8600,13 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8389 struct bnx2x_phy *phy; 8600 struct bnx2x_phy *phy;
8390 u32 gpio_val; 8601 u32 gpio_val;
8391 u8 gpio_num, gpio_port; 8602 u8 gpio_num, gpio_port;
8392 if (CHIP_IS_E3(bp)) 8603 if (CHIP_IS_E3(bp)) {
8393 phy = &params->phy[INT_PHY]; 8604 phy = &params->phy[INT_PHY];
8394 else 8605 /* Always enable TX laser,will be disabled in case of fault */
8606 bnx2x_sfp_set_transmitter(params, phy, 1);
8607 } else {
8395 phy = &params->phy[EXT_PHY1]; 8608 phy = &params->phy[EXT_PHY1];
8396 8609 }
8397 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base, 8610 if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
8398 params->port, &gpio_num, &gpio_port) == 8611 params->port, &gpio_num, &gpio_port) ==
8399 -EINVAL) { 8612 -EINVAL) {
@@ -8409,7 +8622,7 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8409 8622
8410 /* Call the handling function in case module is detected */ 8623 /* Call the handling function in case module is detected */
8411 if (gpio_val == 0) { 8624 if (gpio_val == 0) {
8412 bnx2x_set_mdio_clk(bp, params->chip_id, params->port); 8625 bnx2x_set_mdio_emac_per_phy(bp, params);
8413 bnx2x_set_aer_mmd(params, phy); 8626 bnx2x_set_aer_mmd(params, phy);
8414 8627
8415 bnx2x_power_sfp_module(params, phy, 1); 8628 bnx2x_power_sfp_module(params, phy, 1);
@@ -8438,10 +8651,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8438 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); 8651 DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
8439 } 8652 }
8440 } else { 8653 } else {
8441 u32 val = REG_RD(bp, params->shmem_base +
8442 offsetof(struct shmem_region, dev_info.
8443 port_feature_config[params->port].
8444 config));
8445 bnx2x_set_gpio_int(bp, gpio_num, 8654 bnx2x_set_gpio_int(bp, gpio_num,
8446 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 8655 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
8447 gpio_port); 8656 gpio_port);
@@ -8449,10 +8658,6 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8449 * Disable transmit for this module 8658 * Disable transmit for this module
8450 */ 8659 */
8451 phy->media_type = ETH_PHY_NOT_PRESENT; 8660 phy->media_type = ETH_PHY_NOT_PRESENT;
8452 if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
8453 PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
8454 CHIP_IS_E3(bp))
8455 bnx2x_sfp_set_transmitter(params, phy, 0);
8456 } 8661 }
8457} 8662}
8458 8663
@@ -9192,6 +9397,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9192 bnx2x_cl45_read(bp, phy, 9397 bnx2x_cl45_read(bp, phy,
9193 MDIO_PMA_DEVAD, 9398 MDIO_PMA_DEVAD,
9194 MDIO_PMA_LASI_RXSTAT, &rx_alarm_status); 9399 MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
9400 bnx2x_8727_power_module(params->bp, phy, 0);
9195 return 0; 9401 return 0;
9196 } 9402 }
9197 } /* Over current check */ 9403 } /* Over current check */
@@ -9296,20 +9502,28 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9296 struct bnx2x *bp, 9502 struct bnx2x *bp,
9297 u8 port) 9503 u8 port)
9298{ 9504{
9299 u16 val, fw_ver1, fw_ver2, cnt; 9505 u16 val, fw_ver2, cnt, i;
9506 static struct bnx2x_reg_set reg_set[] = {
9507 {MDIO_PMA_DEVAD, 0xA819, 0x0014},
9508 {MDIO_PMA_DEVAD, 0xA81A, 0xc200},
9509 {MDIO_PMA_DEVAD, 0xA81B, 0x0000},
9510 {MDIO_PMA_DEVAD, 0xA81C, 0x0300},
9511 {MDIO_PMA_DEVAD, 0xA817, 0x0009}
9512 };
9513 u16 fw_ver1;
9300 9514
9301 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9515 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
9516 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
9302 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9517 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9303 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, 9518 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
9304 phy->ver_addr); 9519 phy->ver_addr);
9305 } else { 9520 } else {
9306 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9521 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9307 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9522 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
9308 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); 9523 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set);
9309 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); 9524 i++)
9310 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); 9525 bnx2x_cl45_write(bp, phy, reg_set[i].devad,
9311 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); 9526 reg_set[i].reg, reg_set[i].val);
9312 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
9313 9527
9314 for (cnt = 0; cnt < 100; cnt++) { 9528 for (cnt = 0; cnt < 100; cnt++) {
9315 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); 9529 bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
@@ -9357,8 +9571,16 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9357static void bnx2x_848xx_set_led(struct bnx2x *bp, 9571static void bnx2x_848xx_set_led(struct bnx2x *bp,
9358 struct bnx2x_phy *phy) 9572 struct bnx2x_phy *phy)
9359{ 9573{
9360 u16 val, offset; 9574 u16 val, offset, i;
9361 9575 static struct bnx2x_reg_set reg_set[] = {
9576 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
9577 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
9578 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
9579 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
9580 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
9581 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
9582 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
9583 };
9362 /* PHYC_CTL_LED_CTL */ 9584 /* PHYC_CTL_LED_CTL */
9363 bnx2x_cl45_read(bp, phy, 9585 bnx2x_cl45_read(bp, phy,
9364 MDIO_PMA_DEVAD, 9586 MDIO_PMA_DEVAD,
@@ -9370,49 +9592,20 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9370 MDIO_PMA_DEVAD, 9592 MDIO_PMA_DEVAD,
9371 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 9593 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
9372 9594
9373 bnx2x_cl45_write(bp, phy, 9595 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
9374 MDIO_PMA_DEVAD, 9596 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
9375 MDIO_PMA_REG_8481_LED1_MASK, 9597 reg_set[i].val);
9376 0x80);
9377
9378 bnx2x_cl45_write(bp, phy,
9379 MDIO_PMA_DEVAD,
9380 MDIO_PMA_REG_8481_LED2_MASK,
9381 0x18);
9382
9383 /* Select activity source by Tx and Rx, as suggested by PHY AE */
9384 bnx2x_cl45_write(bp, phy,
9385 MDIO_PMA_DEVAD,
9386 MDIO_PMA_REG_8481_LED3_MASK,
9387 0x0006);
9388
9389 /* Select the closest activity blink rate to that in 10/100/1000 */
9390 bnx2x_cl45_write(bp, phy,
9391 MDIO_PMA_DEVAD,
9392 MDIO_PMA_REG_8481_LED3_BLINK,
9393 0);
9394
9395 /* Configure the blink rate to ~15.9 Hz */
9396 bnx2x_cl45_write(bp, phy,
9397 MDIO_PMA_DEVAD,
9398 MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
9399 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
9400 9598
9401 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 9599 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
9600 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
9402 offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1; 9601 offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
9403 else 9602 else
9404 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; 9603 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
9405 9604
9406 bnx2x_cl45_read(bp, phy, 9605 /* stretch_en for LED3*/
9407 MDIO_PMA_DEVAD, offset, &val); 9606 bnx2x_cl45_read_or_write(bp, phy,
9408 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ 9607 MDIO_PMA_DEVAD, offset,
9409 bnx2x_cl45_write(bp, phy, 9608 MDIO_PMA_REG_84823_LED3_STRETCH_EN);
9410 MDIO_PMA_DEVAD, offset, val);
9411
9412 /* 'Interrupt Mask' */
9413 bnx2x_cl45_write(bp, phy,
9414 MDIO_AN_DEVAD,
9415 0xFFFB, 0xFFFD);
9416} 9609}
9417 9610
9418static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, 9611static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9422,7 +9615,8 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
9422 struct bnx2x *bp = params->bp; 9615 struct bnx2x *bp = params->bp;
9423 switch (action) { 9616 switch (action) {
9424 case PHY_INIT: 9617 case PHY_INIT:
9425 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9618 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
9619 (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
9426 /* Save spirom version */ 9620 /* Save spirom version */
9427 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9621 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9428 } 9622 }
@@ -9443,7 +9637,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9443 struct link_vars *vars) 9637 struct link_vars *vars)
9444{ 9638{
9445 struct bnx2x *bp = params->bp; 9639 struct bnx2x *bp = params->bp;
9446 u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val; 9640 u16 autoneg_val, an_1000_val, an_10_100_val;
9447 9641
9448 bnx2x_848xx_specific_func(phy, params, PHY_INIT); 9642 bnx2x_848xx_specific_func(phy, params, PHY_INIT);
9449 bnx2x_cl45_write(bp, phy, 9643 bnx2x_cl45_write(bp, phy,
@@ -9542,11 +9736,12 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9542 if (phy->req_duplex == DUPLEX_FULL) 9736 if (phy->req_duplex == DUPLEX_FULL)
9543 autoneg_val |= (1<<8); 9737 autoneg_val |= (1<<8);
9544 9738
9545 /* Always write this if this is not 84833. 9739 /* Always write this if this is not 84833/4.
9546 * For 84833, write it only when it's a forced speed. 9740 * For 84833/4, write it only when it's a forced speed.
9547 */ 9741 */
9548 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 9742 if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
9549 ((autoneg_val & (1<<12)) == 0)) 9743 (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
9744 ((autoneg_val & (1<<12)) == 0))
9550 bnx2x_cl45_write(bp, phy, 9745 bnx2x_cl45_write(bp, phy,
9551 MDIO_AN_DEVAD, 9746 MDIO_AN_DEVAD,
9552 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val); 9747 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
@@ -9558,14 +9753,11 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9558 DP(NETIF_MSG_LINK, "Advertising 10G\n"); 9753 DP(NETIF_MSG_LINK, "Advertising 10G\n");
9559 /* Restart autoneg for 10G*/ 9754 /* Restart autoneg for 10G*/
9560 9755
9561 bnx2x_cl45_read(bp, phy, 9756 bnx2x_cl45_read_or_write(
9562 MDIO_AN_DEVAD, 9757 bp, phy,
9563 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL, 9758 MDIO_AN_DEVAD,
9564 &an_10g_val); 9759 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
9565 bnx2x_cl45_write(bp, phy, 9760 0x1000);
9566 MDIO_AN_DEVAD,
9567 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
9568 an_10g_val | 0x1000);
9569 bnx2x_cl45_write(bp, phy, 9761 bnx2x_cl45_write(bp, phy,
9570 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 9762 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
9571 0x3200); 9763 0x3200);
@@ -9598,9 +9790,8 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
9598#define PHY84833_CMDHDLR_WAIT 300 9790#define PHY84833_CMDHDLR_WAIT 300
9599#define PHY84833_CMDHDLR_MAX_ARGS 5 9791#define PHY84833_CMDHDLR_MAX_ARGS 5
9600static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 9792static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9601 struct link_params *params, 9793 struct link_params *params, u16 fw_cmd,
9602 u16 fw_cmd, 9794 u16 cmd_args[], int argc)
9603 u16 cmd_args[], int argc)
9604{ 9795{
9605 int idx; 9796 int idx;
9606 u16 val; 9797 u16 val;
@@ -9614,7 +9805,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9614 MDIO_84833_CMD_HDLR_STATUS, &val); 9805 MDIO_84833_CMD_HDLR_STATUS, &val);
9615 if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) 9806 if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
9616 break; 9807 break;
9617 usleep_range(1000, 2000); 9808 usleep_range(1000, 2000);
9618 } 9809 }
9619 if (idx >= PHY84833_CMDHDLR_WAIT) { 9810 if (idx >= PHY84833_CMDHDLR_WAIT) {
9620 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 9811 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9635,7 +9826,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9635 if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || 9826 if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
9636 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) 9827 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
9637 break; 9828 break;
9638 usleep_range(1000, 2000); 9829 usleep_range(1000, 2000);
9639 } 9830 }
9640 if ((idx >= PHY84833_CMDHDLR_WAIT) || 9831 if ((idx >= PHY84833_CMDHDLR_WAIT) ||
9641 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 9832 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9654,7 +9845,6 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
9654 return 0; 9845 return 0;
9655} 9846}
9656 9847
9657
9658static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy, 9848static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
9659 struct link_params *params, 9849 struct link_params *params,
9660 struct link_vars *vars) 9850 struct link_vars *vars)
@@ -9802,11 +9992,11 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9802 struct bnx2x *bp = params->bp; 9992 struct bnx2x *bp = params->bp;
9803 u8 port, initialize = 1; 9993 u8 port, initialize = 1;
9804 u16 val; 9994 u16 val;
9805 u32 actual_phy_selection, cms_enable; 9995 u32 actual_phy_selection;
9806 u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; 9996 u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
9807 int rc = 0; 9997 int rc = 0;
9808 9998
9809 usleep_range(1000, 2000); 9999 usleep_range(1000, 2000);
9810 10000
9811 if (!(CHIP_IS_E1x(bp))) 10001 if (!(CHIP_IS_E1x(bp)))
9812 port = BP_PATH(bp); 10002 port = BP_PATH(bp);
@@ -9828,7 +10018,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9828 10018
9829 /* Wait for GPHY to come out of reset */ 10019 /* Wait for GPHY to come out of reset */
9830 msleep(50); 10020 msleep(50);
9831 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 10021 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
10022 (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
9832 /* BCM84823 requires that XGXS links up first @ 10G for normal 10023 /* BCM84823 requires that XGXS links up first @ 10G for normal
9833 * behavior. 10024 * behavior.
9834 */ 10025 */
@@ -9884,7 +10075,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9884 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", 10075 DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
9885 params->multi_phy_config, val); 10076 params->multi_phy_config, val);
9886 10077
9887 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 10078 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
10079 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
9888 bnx2x_84833_pair_swap_cfg(phy, params, vars); 10080 bnx2x_84833_pair_swap_cfg(phy, params, vars);
9889 10081
9890 /* Keep AutogrEEEn disabled. */ 10082 /* Keep AutogrEEEn disabled. */
@@ -9904,7 +10096,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9904 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 10096 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9905 /* 84833 PHY has a better feature and doesn't need to support this. */ 10097 /* 84833 PHY has a better feature and doesn't need to support this. */
9906 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) { 10098 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
9907 cms_enable = REG_RD(bp, params->shmem_base + 10099 u32 cms_enable = REG_RD(bp, params->shmem_base +
9908 offsetof(struct shmem_region, 10100 offsetof(struct shmem_region,
9909 dev_info.port_hw_config[params->port].default_cfg)) & 10101 dev_info.port_hw_config[params->port].default_cfg)) &
9910 PORT_HW_CFG_ENABLE_CMS_MASK; 10102 PORT_HW_CFG_ENABLE_CMS_MASK;
@@ -9933,7 +10125,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9933 return rc; 10125 return rc;
9934 } 10126 }
9935 10127
9936 if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) && 10128 if ((phy->req_duplex == DUPLEX_FULL) &&
9937 (params->eee_mode & EEE_MODE_ADV_LPI) && 10129 (params->eee_mode & EEE_MODE_ADV_LPI) &&
9938 (bnx2x_eee_calc_timer(params) || 10130 (bnx2x_eee_calc_timer(params) ||
9939 !(params->eee_mode & EEE_MODE_ENABLE_LPI))) 10131 !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
@@ -9948,15 +10140,13 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9948 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 10140 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
9949 } 10141 }
9950 10142
9951 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 10143 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
10144 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
9952 /* Bring PHY out of super isolate mode as the final step. */ 10145 /* Bring PHY out of super isolate mode as the final step. */
9953 bnx2x_cl45_read(bp, phy, 10146 bnx2x_cl45_read_and_write(bp, phy,
9954 MDIO_CTL_DEVAD, 10147 MDIO_CTL_DEVAD,
9955 MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val); 10148 MDIO_84833_TOP_CFG_XGPHY_STRAP1,
9956 val &= ~MDIO_84833_SUPER_ISOLATE; 10149 (u16)~MDIO_84833_SUPER_ISOLATE);
9957 bnx2x_cl45_write(bp, phy,
9958 MDIO_CTL_DEVAD,
9959 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
9960 } 10150 }
9961 return rc; 10151 return rc;
9962} 10152}
@@ -10090,7 +10280,6 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10090 return link_up; 10280 return link_up;
10091} 10281}
10092 10282
10093
10094static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) 10283static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
10095{ 10284{
10096 int status = 0; 10285 int status = 0;
@@ -10962,7 +11151,7 @@ static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
10962/* STATIC PHY DECLARATION */ 11151/* STATIC PHY DECLARATION */
10963/******************************************************************/ 11152/******************************************************************/
10964 11153
10965static struct bnx2x_phy phy_null = { 11154static const struct bnx2x_phy phy_null = {
10966 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN, 11155 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
10967 .addr = 0, 11156 .addr = 0,
10968 .def_md_devad = 0, 11157 .def_md_devad = 0,
@@ -10988,7 +11177,7 @@ static struct bnx2x_phy phy_null = {
10988 .phy_specific_func = (phy_specific_func_t)NULL 11177 .phy_specific_func = (phy_specific_func_t)NULL
10989}; 11178};
10990 11179
10991static struct bnx2x_phy phy_serdes = { 11180static const struct bnx2x_phy phy_serdes = {
10992 .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT, 11181 .type = PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
10993 .addr = 0xff, 11182 .addr = 0xff,
10994 .def_md_devad = 0, 11183 .def_md_devad = 0,
@@ -11023,7 +11212,7 @@ static struct bnx2x_phy phy_serdes = {
11023 .phy_specific_func = (phy_specific_func_t)NULL 11212 .phy_specific_func = (phy_specific_func_t)NULL
11024}; 11213};
11025 11214
11026static struct bnx2x_phy phy_xgxs = { 11215static const struct bnx2x_phy phy_xgxs = {
11027 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 11216 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
11028 .addr = 0xff, 11217 .addr = 0xff,
11029 .def_md_devad = 0, 11218 .def_md_devad = 0,
@@ -11058,12 +11247,11 @@ static struct bnx2x_phy phy_xgxs = {
11058 .set_link_led = (set_link_led_t)NULL, 11247 .set_link_led = (set_link_led_t)NULL,
11059 .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func 11248 .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
11060}; 11249};
11061static struct bnx2x_phy phy_warpcore = { 11250static const struct bnx2x_phy phy_warpcore = {
11062 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 11251 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
11063 .addr = 0xff, 11252 .addr = 0xff,
11064 .def_md_devad = 0, 11253 .def_md_devad = 0,
11065 .flags = (FLAGS_HW_LOCK_REQUIRED | 11254 .flags = FLAGS_TX_ERROR_CHECK,
11066 FLAGS_TX_ERROR_CHECK),
11067 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11255 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11068 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11256 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11069 .mdio_ctrl = 0, 11257 .mdio_ctrl = 0,
@@ -11097,7 +11285,7 @@ static struct bnx2x_phy phy_warpcore = {
11097}; 11285};
11098 11286
11099 11287
11100static struct bnx2x_phy phy_7101 = { 11288static const struct bnx2x_phy phy_7101 = {
11101 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101, 11289 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
11102 .addr = 0xff, 11290 .addr = 0xff,
11103 .def_md_devad = 0, 11291 .def_md_devad = 0,
@@ -11126,11 +11314,11 @@ static struct bnx2x_phy phy_7101 = {
11126 .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led, 11314 .set_link_led = (set_link_led_t)bnx2x_7101_set_link_led,
11127 .phy_specific_func = (phy_specific_func_t)NULL 11315 .phy_specific_func = (phy_specific_func_t)NULL
11128}; 11316};
11129static struct bnx2x_phy phy_8073 = { 11317static const struct bnx2x_phy phy_8073 = {
11130 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073, 11318 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
11131 .addr = 0xff, 11319 .addr = 0xff,
11132 .def_md_devad = 0, 11320 .def_md_devad = 0,
11133 .flags = FLAGS_HW_LOCK_REQUIRED, 11321 .flags = 0,
11134 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11322 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11135 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11323 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11136 .mdio_ctrl = 0, 11324 .mdio_ctrl = 0,
@@ -11157,7 +11345,7 @@ static struct bnx2x_phy phy_8073 = {
11157 .set_link_led = (set_link_led_t)NULL, 11345 .set_link_led = (set_link_led_t)NULL,
11158 .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func 11346 .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func
11159}; 11347};
11160static struct bnx2x_phy phy_8705 = { 11348static const struct bnx2x_phy phy_8705 = {
11161 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, 11349 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
11162 .addr = 0xff, 11350 .addr = 0xff,
11163 .def_md_devad = 0, 11351 .def_md_devad = 0,
@@ -11185,7 +11373,7 @@ static struct bnx2x_phy phy_8705 = {
11185 .set_link_led = (set_link_led_t)NULL, 11373 .set_link_led = (set_link_led_t)NULL,
11186 .phy_specific_func = (phy_specific_func_t)NULL 11374 .phy_specific_func = (phy_specific_func_t)NULL
11187}; 11375};
11188static struct bnx2x_phy phy_8706 = { 11376static const struct bnx2x_phy phy_8706 = {
11189 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706, 11377 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
11190 .addr = 0xff, 11378 .addr = 0xff,
11191 .def_md_devad = 0, 11379 .def_md_devad = 0,
@@ -11215,12 +11403,11 @@ static struct bnx2x_phy phy_8706 = {
11215 .phy_specific_func = (phy_specific_func_t)NULL 11403 .phy_specific_func = (phy_specific_func_t)NULL
11216}; 11404};
11217 11405
11218static struct bnx2x_phy phy_8726 = { 11406static const struct bnx2x_phy phy_8726 = {
11219 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726, 11407 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
11220 .addr = 0xff, 11408 .addr = 0xff,
11221 .def_md_devad = 0, 11409 .def_md_devad = 0,
11222 .flags = (FLAGS_HW_LOCK_REQUIRED | 11410 .flags = (FLAGS_INIT_XGXS_FIRST |
11223 FLAGS_INIT_XGXS_FIRST |
11224 FLAGS_TX_ERROR_CHECK), 11411 FLAGS_TX_ERROR_CHECK),
11225 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11412 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11226 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11413 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
@@ -11248,7 +11435,7 @@ static struct bnx2x_phy phy_8726 = {
11248 .phy_specific_func = (phy_specific_func_t)NULL 11435 .phy_specific_func = (phy_specific_func_t)NULL
11249}; 11436};
11250 11437
11251static struct bnx2x_phy phy_8727 = { 11438static const struct bnx2x_phy phy_8727 = {
11252 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 11439 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
11253 .addr = 0xff, 11440 .addr = 0xff,
11254 .def_md_devad = 0, 11441 .def_md_devad = 0,
@@ -11278,7 +11465,7 @@ static struct bnx2x_phy phy_8727 = {
11278 .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led, 11465 .set_link_led = (set_link_led_t)bnx2x_8727_set_link_led,
11279 .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func 11466 .phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
11280}; 11467};
11281static struct bnx2x_phy phy_8481 = { 11468static const struct bnx2x_phy phy_8481 = {
11282 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481, 11469 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
11283 .addr = 0xff, 11470 .addr = 0xff,
11284 .def_md_devad = 0, 11471 .def_md_devad = 0,
@@ -11314,7 +11501,7 @@ static struct bnx2x_phy phy_8481 = {
11314 .phy_specific_func = (phy_specific_func_t)NULL 11501 .phy_specific_func = (phy_specific_func_t)NULL
11315}; 11502};
11316 11503
11317static struct bnx2x_phy phy_84823 = { 11504static const struct bnx2x_phy phy_84823 = {
11318 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, 11505 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
11319 .addr = 0xff, 11506 .addr = 0xff,
11320 .def_md_devad = 0, 11507 .def_md_devad = 0,
@@ -11351,7 +11538,7 @@ static struct bnx2x_phy phy_84823 = {
11351 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 11538 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
11352}; 11539};
11353 11540
11354static struct bnx2x_phy phy_84833 = { 11541static const struct bnx2x_phy phy_84833 = {
11355 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, 11542 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
11356 .addr = 0xff, 11543 .addr = 0xff,
11357 .def_md_devad = 0, 11544 .def_md_devad = 0,
@@ -11386,7 +11573,41 @@ static struct bnx2x_phy phy_84833 = {
11386 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 11573 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
11387}; 11574};
11388 11575
11389static struct bnx2x_phy phy_54618se = { 11576static const struct bnx2x_phy phy_84834 = {
11577 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834,
11578 .addr = 0xff,
11579 .def_md_devad = 0,
11580 .flags = FLAGS_FAN_FAILURE_DET_REQ |
11581 FLAGS_REARM_LATCH_SIGNAL,
11582 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11583 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11584 .mdio_ctrl = 0,
11585 .supported = (SUPPORTED_100baseT_Half |
11586 SUPPORTED_100baseT_Full |
11587 SUPPORTED_1000baseT_Full |
11588 SUPPORTED_10000baseT_Full |
11589 SUPPORTED_TP |
11590 SUPPORTED_Autoneg |
11591 SUPPORTED_Pause |
11592 SUPPORTED_Asym_Pause),
11593 .media_type = ETH_PHY_BASE_T,
11594 .ver_addr = 0,
11595 .req_flow_ctrl = 0,
11596 .req_line_speed = 0,
11597 .speed_cap_mask = 0,
11598 .req_duplex = 0,
11599 .rsrv = 0,
11600 .config_init = (config_init_t)bnx2x_848x3_config_init,
11601 .read_status = (read_status_t)bnx2x_848xx_read_status,
11602 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
11603 .config_loopback = (config_loopback_t)NULL,
11604 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
11605 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
11606 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11607 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
11608};
11609
11610static const struct bnx2x_phy phy_54618se = {
11390 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE, 11611 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
11391 .addr = 0xff, 11612 .addr = 0xff,
11392 .def_md_devad = 0, 11613 .def_md_devad = 0,
@@ -11564,9 +11785,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11564 phy->media_type = ETH_PHY_KR; 11785 phy->media_type = ETH_PHY_KR;
11565 phy->flags |= FLAGS_WC_DUAL_MODE; 11786 phy->flags |= FLAGS_WC_DUAL_MODE;
11566 phy->supported &= (SUPPORTED_20000baseKR2_Full | 11787 phy->supported &= (SUPPORTED_20000baseKR2_Full |
11788 SUPPORTED_Autoneg |
11567 SUPPORTED_FIBRE | 11789 SUPPORTED_FIBRE |
11568 SUPPORTED_Pause | 11790 SUPPORTED_Pause |
11569 SUPPORTED_Asym_Pause); 11791 SUPPORTED_Asym_Pause);
11792 phy->flags &= ~FLAGS_TX_ERROR_CHECK;
11570 break; 11793 break;
11571 default: 11794 default:
11572 DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n", 11795 DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
@@ -11665,6 +11888,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11665 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: 11888 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
11666 *phy = phy_84833; 11889 *phy = phy_84833;
11667 break; 11890 break;
11891 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
11892 *phy = phy_84834;
11893 break;
11668 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: 11894 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
11669 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: 11895 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
11670 *phy = phy_54618se; 11896 *phy = phy_54618se;
@@ -11721,9 +11947,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11721 } 11947 }
11722 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); 11948 phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
11723 11949
11724 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && 11950 if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
11951 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
11725 (phy->ver_addr)) { 11952 (phy->ver_addr)) {
11726 /* Remove 100Mb link supported for BCM84833 when phy fw 11953 /* Remove 100Mb link supported for BCM84833/4 when phy fw
11727 * version lower than or equal to 1.39 11954 * version lower than or equal to 1.39
11728 */ 11955 */
11729 u32 raw_ver = REG_RD(bp, phy->ver_addr); 11956 u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11733,12 +11960,6 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11733 SUPPORTED_100baseT_Full); 11960 SUPPORTED_100baseT_Full);
11734 } 11961 }
11735 11962
11736 /* In case mdc/mdio_access of the external phy is different than the
11737 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
11738 * to prevent one port interfere with another port's CL45 operations.
11739 */
11740 if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
11741 phy->flags |= FLAGS_HW_LOCK_REQUIRED;
11742 DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n", 11963 DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
11743 phy_type, port, phy_index); 11964 phy_type, port, phy_index);
11744 DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n", 11965 DP(NETIF_MSG_LINK, " addr=0x%x, mdio_ctl=0x%x\n",
@@ -11863,7 +12084,6 @@ u32 bnx2x_phy_selection(struct link_params *params)
11863 return return_cfg; 12084 return return_cfg;
11864} 12085}
11865 12086
11866
11867int bnx2x_phy_probe(struct link_params *params) 12087int bnx2x_phy_probe(struct link_params *params)
11868{ 12088{
11869 u8 phy_index, actual_phy_idx; 12089 u8 phy_index, actual_phy_idx;
@@ -11907,6 +12127,10 @@ int bnx2x_phy_probe(struct link_params *params)
11907 FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET) 12127 FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
11908 phy->flags &= ~FLAGS_TX_ERROR_CHECK; 12128 phy->flags &= ~FLAGS_TX_ERROR_CHECK;
11909 12129
12130 if (!(params->feature_config_flags &
12131 FEATURE_CONFIG_MT_SUPPORT))
12132 phy->flags |= FLAGS_MDC_MDIO_WA_G;
12133
11910 sync_offset = params->shmem_base + 12134 sync_offset = params->shmem_base +
11911 offsetof(struct shmem_region, 12135 offsetof(struct shmem_region,
11912 dev_info.port_hw_config[params->port].media_type); 12136 dev_info.port_hw_config[params->port].media_type);
@@ -11934,8 +12158,8 @@ int bnx2x_phy_probe(struct link_params *params)
11934 return 0; 12158 return 0;
11935} 12159}
11936 12160
11937void bnx2x_init_bmac_loopback(struct link_params *params, 12161static void bnx2x_init_bmac_loopback(struct link_params *params,
11938 struct link_vars *vars) 12162 struct link_vars *vars)
11939{ 12163{
11940 struct bnx2x *bp = params->bp; 12164 struct bnx2x *bp = params->bp;
11941 vars->link_up = 1; 12165 vars->link_up = 1;
@@ -11954,8 +12178,8 @@ void bnx2x_init_bmac_loopback(struct link_params *params,
11954 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12178 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
11955} 12179}
11956 12180
11957void bnx2x_init_emac_loopback(struct link_params *params, 12181static void bnx2x_init_emac_loopback(struct link_params *params,
11958 struct link_vars *vars) 12182 struct link_vars *vars)
11959{ 12183{
11960 struct bnx2x *bp = params->bp; 12184 struct bnx2x *bp = params->bp;
11961 vars->link_up = 1; 12185 vars->link_up = 1;
@@ -11973,8 +12197,8 @@ void bnx2x_init_emac_loopback(struct link_params *params,
11973 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12197 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
11974} 12198}
11975 12199
11976void bnx2x_init_xmac_loopback(struct link_params *params, 12200static void bnx2x_init_xmac_loopback(struct link_params *params,
11977 struct link_vars *vars) 12201 struct link_vars *vars)
11978{ 12202{
11979 struct bnx2x *bp = params->bp; 12203 struct bnx2x *bp = params->bp;
11980 vars->link_up = 1; 12204 vars->link_up = 1;
@@ -11999,8 +12223,8 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
11999 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12223 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12000} 12224}
12001 12225
12002void bnx2x_init_umac_loopback(struct link_params *params, 12226static void bnx2x_init_umac_loopback(struct link_params *params,
12003 struct link_vars *vars) 12227 struct link_vars *vars)
12004{ 12228{
12005 struct bnx2x *bp = params->bp; 12229 struct bnx2x *bp = params->bp;
12006 vars->link_up = 1; 12230 vars->link_up = 1;
@@ -12014,17 +12238,21 @@ void bnx2x_init_umac_loopback(struct link_params *params,
12014 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12238 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12015} 12239}
12016 12240
12017void bnx2x_init_xgxs_loopback(struct link_params *params, 12241static void bnx2x_init_xgxs_loopback(struct link_params *params,
12018 struct link_vars *vars) 12242 struct link_vars *vars)
12019{ 12243{
12020 struct bnx2x *bp = params->bp; 12244 struct bnx2x *bp = params->bp;
12021 vars->link_up = 1; 12245 struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
12022 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12246 vars->link_up = 1;
12023 vars->duplex = DUPLEX_FULL; 12247 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12248 vars->duplex = DUPLEX_FULL;
12024 if (params->req_line_speed[0] == SPEED_1000) 12249 if (params->req_line_speed[0] == SPEED_1000)
12025 vars->line_speed = SPEED_1000; 12250 vars->line_speed = SPEED_1000;
12251 else if ((params->req_line_speed[0] == SPEED_20000) ||
12252 (int_phy->flags & FLAGS_WC_DUAL_MODE))
12253 vars->line_speed = SPEED_20000;
12026 else 12254 else
12027 vars->line_speed = SPEED_10000; 12255 vars->line_speed = SPEED_10000;
12028 12256
12029 if (!USES_WARPCORE(bp)) 12257 if (!USES_WARPCORE(bp))
12030 bnx2x_xgxs_deassert(params); 12258 bnx2x_xgxs_deassert(params);
@@ -12044,34 +12272,30 @@ void bnx2x_init_xgxs_loopback(struct link_params *params,
12044 bnx2x_bmac_enable(params, vars, 0, 1); 12272 bnx2x_bmac_enable(params, vars, 0, 1);
12045 } 12273 }
12046 12274
12047 if (params->loopback_mode == LOOPBACK_XGXS) { 12275 if (params->loopback_mode == LOOPBACK_XGXS) {
12048 /* set 10G XGXS loopback */ 12276 /* Set 10G XGXS loopback */
12049 params->phy[INT_PHY].config_loopback( 12277 int_phy->config_loopback(int_phy, params);
12050 &params->phy[INT_PHY], 12278 } else {
12051 params); 12279 /* Set external phy loopback */
12052 12280 u8 phy_index;
12053 } else { 12281 for (phy_index = EXT_PHY1;
12054 /* set external phy loopback */ 12282 phy_index < params->num_phys; phy_index++)
12055 u8 phy_index; 12283 if (params->phy[phy_index].config_loopback)
12056 for (phy_index = EXT_PHY1; 12284 params->phy[phy_index].config_loopback(
12057 phy_index < params->num_phys; phy_index++) { 12285 &params->phy[phy_index],
12058 if (params->phy[phy_index].config_loopback) 12286 params);
12059 params->phy[phy_index].config_loopback( 12287 }
12060 &params->phy[phy_index], 12288 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12061 params);
12062 }
12063 }
12064 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12065 12289
12066 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); 12290 bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
12067} 12291}
12068 12292
12069static void bnx2x_set_rx_filter(struct link_params *params, u8 en) 12293void bnx2x_set_rx_filter(struct link_params *params, u8 en)
12070{ 12294{
12071 struct bnx2x *bp = params->bp; 12295 struct bnx2x *bp = params->bp;
12072 u8 val = en * 0x1F; 12296 u8 val = en * 0x1F;
12073 12297
12074 /* Open the gate between the NIG to the BRB */ 12298 /* Open / close the gate between the NIG and the BRB */
12075 if (!CHIP_IS_E1x(bp)) 12299 if (!CHIP_IS_E1x(bp))
12076 val |= en * 0x20; 12300 val |= en * 0x20;
12077 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val); 12301 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val);
@@ -12345,7 +12569,7 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
12345 * Hold it as vars low 12569 * Hold it as vars low
12346 */ 12570 */
12347 /* Clear link led */ 12571 /* Clear link led */
12348 bnx2x_set_mdio_clk(bp, params->chip_id, port); 12572 bnx2x_set_mdio_emac_per_phy(bp, params);
12349 bnx2x_set_led(params, vars, LED_MODE_OFF, 0); 12573 bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
12350 12574
12351 if (reset_ext_phy) { 12575 if (reset_ext_phy) {
@@ -12696,7 +12920,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
12696 /* Initiate PHY reset*/ 12920 /* Initiate PHY reset*/
12697 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, 12921 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
12698 port); 12922 port);
12699 usleep_range(1000, 2000); 12923 usleep_range(1000, 2000);
12700 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, 12924 bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
12701 port); 12925 port);
12702 12926
@@ -12784,7 +13008,8 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
12784} 13008}
12785 13009
12786static int bnx2x_84833_pre_init_phy(struct bnx2x *bp, 13010static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
12787 struct bnx2x_phy *phy) 13011 struct bnx2x_phy *phy,
13012 u8 port)
12788{ 13013{
12789 u16 val, cnt; 13014 u16 val, cnt;
12790 /* Wait for FW completing its initialization. */ 13015 /* Wait for FW completing its initialization. */
@@ -12794,7 +13019,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
12794 MDIO_PMA_REG_CTRL, &val); 13019 MDIO_PMA_REG_CTRL, &val);
12795 if (!(val & (1<<15))) 13020 if (!(val & (1<<15)))
12796 break; 13021 break;
12797 usleep_range(1000, 2000); 13022 usleep_range(1000, 2000);
12798 } 13023 }
12799 if (cnt >= 1500) { 13024 if (cnt >= 1500) {
12800 DP(NETIF_MSG_LINK, "84833 reset timeout\n"); 13025 DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12811,26 +13036,28 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
12811 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val); 13036 MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
12812 13037
12813 /* Save spirom version */ 13038 /* Save spirom version */
12814 bnx2x_save_848xx_spirom_version(phy, bp, PORT_0); 13039 bnx2x_save_848xx_spirom_version(phy, bp, port);
12815 return 0; 13040 return 0;
12816} 13041}
12817 13042
12818int bnx2x_pre_init_phy(struct bnx2x *bp, 13043int bnx2x_pre_init_phy(struct bnx2x *bp,
12819 u32 shmem_base, 13044 u32 shmem_base,
12820 u32 shmem2_base, 13045 u32 shmem2_base,
12821 u32 chip_id) 13046 u32 chip_id,
13047 u8 port)
12822{ 13048{
12823 int rc = 0; 13049 int rc = 0;
12824 struct bnx2x_phy phy; 13050 struct bnx2x_phy phy;
12825 bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
12826 if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base, 13051 if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
12827 PORT_0, &phy)) { 13052 port, &phy) != 0) {
12828 DP(NETIF_MSG_LINK, "populate_phy failed\n"); 13053 DP(NETIF_MSG_LINK, "populate_phy failed\n");
12829 return -EINVAL; 13054 return -EINVAL;
12830 } 13055 }
13056 bnx2x_set_mdio_clk(bp, chip_id, phy.mdio_ctrl);
12831 switch (phy.type) { 13057 switch (phy.type) {
12832 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: 13058 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
12833 rc = bnx2x_84833_pre_init_phy(bp, &phy); 13059 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
13060 rc = bnx2x_84833_pre_init_phy(bp, &phy, port);
12834 break; 13061 break;
12835 default: 13062 default:
12836 break; 13063 break;
@@ -12867,6 +13094,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12867 phy_index, chip_id); 13094 phy_index, chip_id);
12868 break; 13095 break;
12869 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: 13096 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
13097 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
12870 /* GPIO3's are linked, and so both need to be toggled 13098 /* GPIO3's are linked, and so both need to be toggled
12871 * to obtain required 2us pulse. 13099 * to obtain required 2us pulse.
12872 */ 13100 */
@@ -12898,8 +13126,9 @@ int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
12898 u32 phy_ver, val; 13126 u32 phy_ver, val;
12899 u8 phy_index = 0; 13127 u8 phy_index = 0;
12900 u32 ext_phy_type, ext_phy_config; 13128 u32 ext_phy_type, ext_phy_config;
12901 bnx2x_set_mdio_clk(bp, chip_id, PORT_0); 13129
12902 bnx2x_set_mdio_clk(bp, chip_id, PORT_1); 13130 bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC0);
13131 bnx2x_set_mdio_clk(bp, chip_id, GRCBASE_EMAC1);
12903 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 13132 DP(NETIF_MSG_LINK, "Begin common phy init\n");
12904 if (CHIP_IS_E3(bp)) { 13133 if (CHIP_IS_E3(bp)) {
12905 /* Enable EPIO */ 13134 /* Enable EPIO */
@@ -12960,6 +13189,7 @@ static void bnx2x_check_over_curr(struct link_params *params,
12960 " error.\n", 13189 " error.\n",
12961 params->port); 13190 params->port);
12962 vars->phy_flags |= PHY_OVER_CURRENT_FLAG; 13191 vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
13192 bnx2x_warpcore_power_module(params, 0);
12963 } 13193 }
12964 } else 13194 } else
12965 vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; 13195 vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
@@ -13139,6 +13369,108 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
13139 } 13369 }
13140 } 13370 }
13141} 13371}
13372static void bnx2x_disable_kr2(struct link_params *params,
13373 struct link_vars *vars,
13374 struct bnx2x_phy *phy)
13375{
13376 struct bnx2x *bp = params->bp;
13377 int i;
13378 static struct bnx2x_reg_set reg_set[] = {
13379 /* Step 1 - Program the TX/RX alignment markers */
13380 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
13381 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
13382 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
13383 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
13384 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
13385 {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
13386 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
13387 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
13388 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
13389 {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
13390 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
13391 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
13392 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
13393 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
13394 {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
13395 };
13396 DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
13397
13398 for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
13399 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
13400 reg_set[i].val);
13401 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13402 bnx2x_update_link_attr(params, vars->link_attr_sync);
13403
13404 /* Restart AN on leading lane */
13405 bnx2x_warpcore_restart_AN_KR(phy, params);
13406}
13407
13408static void bnx2x_kr2_recovery(struct link_params *params,
13409 struct link_vars *vars,
13410 struct bnx2x_phy *phy)
13411{
13412 struct bnx2x *bp = params->bp;
13413 DP(NETIF_MSG_LINK, "KR2 recovery\n");
13414 bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
13415 bnx2x_warpcore_restart_AN_KR(phy, params);
13416}
13417
13418static void bnx2x_check_kr2_wa(struct link_params *params,
13419 struct link_vars *vars,
13420 struct bnx2x_phy *phy)
13421{
13422 struct bnx2x *bp = params->bp;
13423 u16 base_page, next_page, not_kr2_device, lane;
13424 int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
13425
13426 if (!sigdet) {
13427 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
13428 bnx2x_kr2_recovery(params, vars, phy);
13429 return;
13430 }
13431
13432 lane = bnx2x_get_warpcore_lane(phy, params);
13433 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
13434 MDIO_AER_BLOCK_AER_REG, lane);
13435 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
13436 MDIO_AN_REG_LP_AUTO_NEG, &base_page);
13437 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
13438 MDIO_AN_REG_LP_AUTO_NEG2, &next_page);
13439 bnx2x_set_aer_mmd(params, phy);
13440
13441 /* CL73 has not begun yet */
13442 if (base_page == 0) {
13443 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
13444 bnx2x_kr2_recovery(params, vars, phy);
13445 return;
13446 }
13447
13448 /* In case NP bit is not set in the BasePage, or it is set,
13449 * but only KX is advertised, declare this link partner as non-KR2
13450 * device.
13451 */
13452 not_kr2_device = (((base_page & 0x8000) == 0) ||
13453 (((base_page & 0x8000) &&
13454 ((next_page & 0xe0) == 0x2))));
13455
13456 /* In case KR2 is already disabled, check if we need to re-enable it */
13457 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13458 if (!not_kr2_device) {
13459 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
13460 next_page);
13461 bnx2x_kr2_recovery(params, vars, phy);
13462 }
13463 return;
13464 }
13465 /* KR2 is enabled, but not KR2 device */
13466 if (not_kr2_device) {
13467 /* Disable KR2 on both lanes */
13468 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
13469 bnx2x_disable_kr2(params, vars, phy);
13470 return;
13471 }
13472}
13473
13142void bnx2x_period_func(struct link_params *params, struct link_vars *vars) 13474void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13143{ 13475{
13144 u16 phy_idx; 13476 u16 phy_idx;
@@ -13156,6 +13488,9 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13156 if (CHIP_IS_E3(bp)) { 13488 if (CHIP_IS_E3(bp)) {
13157 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 13489 struct bnx2x_phy *phy = &params->phy[INT_PHY];
13158 bnx2x_set_aer_mmd(params, phy); 13490 bnx2x_set_aer_mmd(params, phy);
13491 if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
13492 (phy->speed_cap_mask & SPEED_20000))
13493 bnx2x_check_kr2_wa(params, vars, phy);
13159 bnx2x_check_over_curr(params, vars); 13494 bnx2x_check_over_curr(params, vars);
13160 if (vars->rx_tx_asic_rst) 13495 if (vars->rx_tx_asic_rst)
13161 bnx2x_warpcore_config_runtime(phy, params, vars); 13496 bnx2x_warpcore_config_runtime(phy, params, vars);
@@ -13176,27 +13511,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13176 bnx2x_update_mng(params, vars->link_status); 13511 bnx2x_update_mng(params, vars->link_status);
13177 } 13512 }
13178 } 13513 }
13179
13180 }
13181
13182}
13183
13184u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
13185{
13186 u8 phy_index;
13187 struct bnx2x_phy phy;
13188 for (phy_index = INT_PHY; phy_index < MAX_PHYS;
13189 phy_index++) {
13190 if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
13191 0, &phy) != 0) {
13192 DP(NETIF_MSG_LINK, "populate phy failed\n");
13193 return 0;
13194 }
13195
13196 if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
13197 return 1;
13198 } 13514 }
13199 return 0;
13200} 13515}
13201 13516
13202u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, 13517u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 9165b89a4b19..ee6e7ec85457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -139,8 +139,6 @@ struct bnx2x_phy {
139 u8 addr; 139 u8 addr;
140 u8 def_md_devad; 140 u8 def_md_devad;
141 u16 flags; 141 u16 flags;
142 /* Require HW lock */
143#define FLAGS_HW_LOCK_REQUIRED (1<<0)
144 /* No Over-Current detection */ 142 /* No Over-Current detection */
145#define FLAGS_NOC (1<<1) 143#define FLAGS_NOC (1<<1)
146 /* Fan failure detection required */ 144 /* Fan failure detection required */
@@ -156,6 +154,7 @@ struct bnx2x_phy {
156#define FLAGS_MDC_MDIO_WA_B0 (1<<10) 154#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
157#define FLAGS_TX_ERROR_CHECK (1<<12) 155#define FLAGS_TX_ERROR_CHECK (1<<12)
158#define FLAGS_EEE (1<<13) 156#define FLAGS_EEE (1<<13)
157#define FLAGS_MDC_MDIO_WA_G (1<<15)
159 158
160 /* preemphasis values for the rx side */ 159 /* preemphasis values for the rx side */
161 u16 rx_preemphasis[4]; 160 u16 rx_preemphasis[4];
@@ -267,6 +266,9 @@ struct link_params {
267#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) 266#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
268#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) 267#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
269#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11) 268#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
269#define FEATURE_CONFIG_MT_SUPPORT (1<<13)
270#define FEATURE_CONFIG_BOOT_FROM_SAN (1<<14)
271
270 /* Will be populated during common init */ 272 /* Will be populated during common init */
271 struct bnx2x_phy phy[MAX_PHYS]; 273 struct bnx2x_phy phy[MAX_PHYS];
272 274
@@ -347,6 +349,8 @@ struct link_vars {
347 u8 rx_tx_asic_rst; 349 u8 rx_tx_asic_rst;
348 u8 turn_to_run_wc_rt; 350 u8 turn_to_run_wc_rt;
349 u16 rsrv2; 351 u16 rsrv2;
352 /* The same definitions as the shmem2 parameter */
353 u32 link_attr_sync;
350}; 354};
351 355
352/***********************************************************/ 356/***********************************************************/
@@ -418,10 +422,6 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
418 422
419void bnx2x_hw_reset_phy(struct link_params *params); 423void bnx2x_hw_reset_phy(struct link_params *params);
420 424
421/* Checks if HW lock is required for this phy/board type */
422u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
423 u32 shmem2_base);
424
425/* Check swap bit and adjust PHY order */ 425/* Check swap bit and adjust PHY order */
426u32 bnx2x_phy_selection(struct link_params *params); 426u32 bnx2x_phy_selection(struct link_params *params);
427 427
@@ -432,7 +432,8 @@ int bnx2x_phy_probe(struct link_params *params);
432u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, 432u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
433 u32 shmem2_base, u8 port); 433 u32 shmem2_base, u8 port);
434 434
435 435/* Open / close the gate between the NIG and the BRB */
436void bnx2x_set_rx_filter(struct link_params *params, u8 en);
436 437
437/* DCBX structs */ 438/* DCBX structs */
438 439
@@ -459,9 +460,6 @@ struct bnx2x_nig_brb_pfc_port_params {
459 u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS]; 460 u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
460 u32 llfc_high_priority_classes; 461 u32 llfc_high_priority_classes;
461 u32 llfc_low_priority_classes; 462 u32 llfc_low_priority_classes;
462 /* BRB */
463 u32 cos0_pauseable;
464 u32 cos1_pauseable;
465}; 463};
466 464
467 465
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 01611b33a93d..940ef859dc60 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -79,7 +79,7 @@
79/* Time in jiffies before concluding the transmitter is hung */ 79/* Time in jiffies before concluding the transmitter is hung */
80#define TX_TIMEOUT (5*HZ) 80#define TX_TIMEOUT (5*HZ)
81 81
82static char version[] __devinitdata = 82static char version[] =
83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver " 83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
84 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 84 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
85 85
@@ -149,7 +149,7 @@ enum bnx2x_board_type {
149/* indexed by board_type, above */ 149/* indexed by board_type, above */
150static struct { 150static struct {
151 char *name; 151 char *name;
152} board_info[] __devinitdata = { 152} board_info[] = {
153 { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" }, 153 { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
154 { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" }, 154 { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
155 { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" }, 155 { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
@@ -791,10 +791,9 @@ void bnx2x_panic_dump(struct bnx2x *bp)
791 791
792 /* host sb data */ 792 /* host sb data */
793 793
794#ifdef BCM_CNIC
795 if (IS_FCOE_FP(fp)) 794 if (IS_FCOE_FP(fp))
796 continue; 795 continue;
797#endif 796
798 BNX2X_ERR(" run indexes ("); 797 BNX2X_ERR(" run indexes (");
799 for (j = 0; j < HC_SB_MAX_SM; j++) 798 for (j = 0; j < HC_SB_MAX_SM; j++)
800 pr_cont("0x%x%s", 799 pr_cont("0x%x%s",
@@ -859,7 +858,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
859#ifdef BNX2X_STOP_ON_ERROR 858#ifdef BNX2X_STOP_ON_ERROR
860 /* Rings */ 859 /* Rings */
861 /* Rx */ 860 /* Rx */
862 for_each_rx_queue(bp, i) { 861 for_each_valid_rx_queue(bp, i) {
863 struct bnx2x_fastpath *fp = &bp->fp[i]; 862 struct bnx2x_fastpath *fp = &bp->fp[i];
864 863
865 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 864 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -893,7 +892,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
893 } 892 }
894 893
895 /* Tx */ 894 /* Tx */
896 for_each_tx_queue(bp, i) { 895 for_each_valid_tx_queue(bp, i) {
897 struct bnx2x_fastpath *fp = &bp->fp[i]; 896 struct bnx2x_fastpath *fp = &bp->fp[i];
898 for_each_cos_in_tx_queue(fp, cos) { 897 for_each_cos_in_tx_queue(fp, cos) {
899 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; 898 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
@@ -1483,7 +1482,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
1483 BNX2X_ERR("BUG! proper val not read from IGU!\n"); 1482 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1484} 1483}
1485 1484
1486void bnx2x_int_disable(struct bnx2x *bp) 1485static void bnx2x_int_disable(struct bnx2x *bp)
1487{ 1486{
1488 if (bp->common.int_block == INT_BLOCK_HC) 1487 if (bp->common.int_block == INT_BLOCK_HC)
1489 bnx2x_hc_int_disable(bp); 1488 bnx2x_hc_int_disable(bp);
@@ -1504,9 +1503,8 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1504 if (msix) { 1503 if (msix) {
1505 synchronize_irq(bp->msix_table[0].vector); 1504 synchronize_irq(bp->msix_table[0].vector);
1506 offset = 1; 1505 offset = 1;
1507#ifdef BCM_CNIC 1506 if (CNIC_SUPPORT(bp))
1508 offset++; 1507 offset++;
1509#endif
1510 for_each_eth_queue(bp, i) 1508 for_each_eth_queue(bp, i)
1511 synchronize_irq(bp->msix_table[offset++].vector); 1509 synchronize_irq(bp->msix_table[offset++].vector);
1512 } else 1510 } else
@@ -1588,9 +1586,8 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1588 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1586 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1589} 1587}
1590 1588
1591#ifdef BCM_CNIC
1592static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err); 1589static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1593#endif 1590
1594 1591
1595void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) 1592void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1596{ 1593{
@@ -1720,7 +1717,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1720 for_each_eth_queue(bp, i) { 1717 for_each_eth_queue(bp, i) {
1721 struct bnx2x_fastpath *fp = &bp->fp[i]; 1718 struct bnx2x_fastpath *fp = &bp->fp[i];
1722 1719
1723 mask = 0x2 << (fp->index + CNIC_PRESENT); 1720 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1724 if (status & mask) { 1721 if (status & mask) {
1725 /* Handle Rx or Tx according to SB id */ 1722 /* Handle Rx or Tx according to SB id */
1726 prefetch(fp->rx_cons_sb); 1723 prefetch(fp->rx_cons_sb);
@@ -1732,22 +1729,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1732 } 1729 }
1733 } 1730 }
1734 1731
1735#ifdef BCM_CNIC 1732 if (CNIC_SUPPORT(bp)) {
1736 mask = 0x2; 1733 mask = 0x2;
1737 if (status & (mask | 0x1)) { 1734 if (status & (mask | 0x1)) {
1738 struct cnic_ops *c_ops = NULL; 1735 struct cnic_ops *c_ops = NULL;
1739 1736
1740 if (likely(bp->state == BNX2X_STATE_OPEN)) { 1737 if (likely(bp->state == BNX2X_STATE_OPEN)) {
1741 rcu_read_lock(); 1738 rcu_read_lock();
1742 c_ops = rcu_dereference(bp->cnic_ops); 1739 c_ops = rcu_dereference(bp->cnic_ops);
1743 if (c_ops) 1740 if (c_ops)
1744 c_ops->cnic_handler(bp->cnic_data, NULL); 1741 c_ops->cnic_handler(bp->cnic_data,
1745 rcu_read_unlock(); 1742 NULL);
1746 } 1743 rcu_read_unlock();
1744 }
1747 1745
1748 status &= ~mask; 1746 status &= ~mask;
1747 }
1749 } 1748 }
1750#endif
1751 1749
1752 if (unlikely(status & 0x1)) { 1750 if (unlikely(status & 0x1)) {
1753 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1751 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -2034,40 +2032,39 @@ int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2034 return 0; 2032 return 0;
2035} 2033}
2036 2034
2037static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode) 2035static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2038{ 2036{
2039 u32 spio_mask = (1 << spio_num);
2040 u32 spio_reg; 2037 u32 spio_reg;
2041 2038
2042 if ((spio_num < MISC_REGISTERS_SPIO_4) || 2039 /* Only 2 SPIOs are configurable */
2043 (spio_num > MISC_REGISTERS_SPIO_7)) { 2040 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2044 BNX2X_ERR("Invalid SPIO %d\n", spio_num); 2041 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2045 return -EINVAL; 2042 return -EINVAL;
2046 } 2043 }
2047 2044
2048 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO); 2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2049 /* read SPIO and mask except the float bits */ 2046 /* read SPIO and mask except the float bits */
2050 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT); 2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2051 2048
2052 switch (mode) { 2049 switch (mode) {
2053 case MISC_REGISTERS_SPIO_OUTPUT_LOW: 2050 case MISC_SPIO_OUTPUT_LOW:
2054 DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num); 2051 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2055 /* clear FLOAT and set CLR */ 2052 /* clear FLOAT and set CLR */
2056 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2053 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2057 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS); 2054 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2058 break; 2055 break;
2059 2056
2060 case MISC_REGISTERS_SPIO_OUTPUT_HIGH: 2057 case MISC_SPIO_OUTPUT_HIGH:
2061 DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num); 2058 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2062 /* clear FLOAT and set SET */ 2059 /* clear FLOAT and set SET */
2063 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2060 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2064 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS); 2061 spio_reg |= (spio << MISC_SPIO_SET_POS);
2065 break; 2062 break;
2066 2063
2067 case MISC_REGISTERS_SPIO_INPUT_HI_Z: 2064 case MISC_SPIO_INPUT_HI_Z:
2068 DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num); 2065 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2069 /* set FLOAT */ 2066 /* set FLOAT */
2070 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS); 2067 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2071 break; 2068 break;
2072 2069
2073 default: 2070 default:
@@ -2106,22 +2103,25 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp)
2106 } 2103 }
2107} 2104}
2108 2105
2109u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) 2106static void bnx2x_set_requested_fc(struct bnx2x *bp)
2110{ 2107{
2111 if (!BP_NOMCP(bp)) { 2108 /* Initialize link parameters structure variables
2112 u8 rc; 2109 * It is recommended to turn off RX FC for jumbo frames
2113 int cfx_idx = bnx2x_get_link_cfg_idx(bp); 2110 * for better performance
2114 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx]; 2111 */
2115 /* 2112 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2116 * Initialize link parameters structure variables 2113 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2117 * It is recommended to turn off RX FC for jumbo frames 2114 else
2118 * for better performance 2115 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2119 */ 2116}
2120 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2122 else
2123 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2124 2117
2118int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2119{
2120 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2121 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2122
2123 if (!BP_NOMCP(bp)) {
2124 bnx2x_set_requested_fc(bp);
2125 bnx2x_acquire_phy_lock(bp); 2125 bnx2x_acquire_phy_lock(bp);
2126 2126
2127 if (load_mode == LOAD_DIAG) { 2127 if (load_mode == LOAD_DIAG) {
@@ -2150,11 +2150,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2150 2150
2151 bnx2x_calc_fc_adv(bp); 2151 bnx2x_calc_fc_adv(bp);
2152 2152
2153 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) { 2153 if (bp->link_vars.link_up) {
2154 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2154 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2155 bnx2x_link_report(bp); 2155 bnx2x_link_report(bp);
2156 } else 2156 }
2157 queue_delayed_work(bnx2x_wq, &bp->period_task, 0); 2157 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2158 bp->link_params.req_line_speed[cfx_idx] = req_line_speed; 2158 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2159 return rc; 2159 return rc;
2160 } 2160 }
@@ -3075,11 +3075,13 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3075 3075
3076static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) 3076static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3077{ 3077{
3078#ifdef BCM_CNIC
3079 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3078 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3080 struct fcoe_stats_info *fcoe_stat = 3079 struct fcoe_stats_info *fcoe_stat =
3081 &bp->slowpath->drv_info_to_mcp.fcoe_stat; 3080 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3082 3081
3082 if (!CNIC_LOADED(bp))
3083 return;
3084
3083 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT, 3085 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
3084 bp->fip_mac, ETH_ALEN); 3086 bp->fip_mac, ETH_ALEN);
3085 3087
@@ -3162,16 +3164,17 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3162 3164
3163 /* ask L5 driver to add data to the struct */ 3165 /* ask L5 driver to add data to the struct */
3164 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD); 3166 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3165#endif
3166} 3167}
3167 3168
3168static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp) 3169static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3169{ 3170{
3170#ifdef BCM_CNIC
3171 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app; 3171 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3172 struct iscsi_stats_info *iscsi_stat = 3172 struct iscsi_stats_info *iscsi_stat =
3173 &bp->slowpath->drv_info_to_mcp.iscsi_stat; 3173 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3174 3174
3175 if (!CNIC_LOADED(bp))
3176 return;
3177
3175 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT, 3178 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
3176 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3179 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
3177 3180
@@ -3180,7 +3183,6 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3180 3183
3181 /* ask L5 driver to add data to the struct */ 3184 /* ask L5 driver to add data to the struct */
3182 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD); 3185 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3183#endif
3184} 3186}
3185 3187
3186/* called due to MCP event (on pmf): 3188/* called due to MCP event (on pmf):
@@ -3589,6 +3591,21 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3589 3591
3590 /* now set back the mask */ 3592 /* now set back the mask */
3591 if (asserted & ATTN_NIG_FOR_FUNC) { 3593 if (asserted & ATTN_NIG_FOR_FUNC) {
3594 /* Verify that IGU ack through BAR was written before restoring
3595 * NIG mask. This loop should exit after 2-3 iterations max.
3596 */
3597 if (bp->common.int_block != INT_BLOCK_HC) {
3598 u32 cnt = 0, igu_acked;
3599 do {
3600 igu_acked = REG_RD(bp,
3601 IGU_REG_ATTENTION_ACK_BITS);
3602 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3603 (++cnt < MAX_IGU_ATTN_ACK_TO));
3604 if (!igu_acked)
3605 DP(NETIF_MSG_HW,
3606 "Failed to verify IGU ack on time\n");
3607 barrier();
3608 }
3592 REG_WR(bp, nig_int_mask_addr, nig_mask); 3609 REG_WR(bp, nig_int_mask_addr, nig_mask);
3593 bnx2x_release_phy_lock(bp); 3610 bnx2x_release_phy_lock(bp);
3594 } 3611 }
@@ -4572,7 +4589,6 @@ static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4572 mmiowb(); /* keep prod updates ordered */ 4589 mmiowb(); /* keep prod updates ordered */
4573} 4590}
4574 4591
4575#ifdef BCM_CNIC
4576static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid, 4592static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4577 union event_ring_elem *elem) 4593 union event_ring_elem *elem)
4578{ 4594{
@@ -4594,7 +4610,6 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4594 bnx2x_cnic_cfc_comp(bp, cid, err); 4610 bnx2x_cnic_cfc_comp(bp, cid, err);
4595 return 0; 4611 return 0;
4596} 4612}
4597#endif
4598 4613
4599static void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4614static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4600{ 4615{
@@ -4635,11 +4650,9 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4635 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 4650 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
4636 case BNX2X_FILTER_MAC_PENDING: 4651 case BNX2X_FILTER_MAC_PENDING:
4637 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 4652 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4638#ifdef BCM_CNIC 4653 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
4639 if (cid == BNX2X_ISCSI_ETH_CID(bp))
4640 vlan_mac_obj = &bp->iscsi_l2_mac_obj; 4654 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4641 else 4655 else
4642#endif
4643 vlan_mac_obj = &bp->sp_objs[cid].mac_obj; 4656 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4644 4657
4645 break; 4658 break;
@@ -4665,9 +4678,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4665 4678
4666} 4679}
4667 4680
4668#ifdef BCM_CNIC
4669static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4681static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4670#endif
4671 4682
4672static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4683static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4673{ 4684{
@@ -4678,14 +4689,12 @@ static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4678 /* Send rx_mode command again if was requested */ 4689 /* Send rx_mode command again if was requested */
4679 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state)) 4690 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4680 bnx2x_set_storm_rx_mode(bp); 4691 bnx2x_set_storm_rx_mode(bp);
4681#ifdef BCM_CNIC
4682 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, 4692 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4683 &bp->sp_state)) 4693 &bp->sp_state))
4684 bnx2x_set_iscsi_eth_rx_mode(bp, true); 4694 bnx2x_set_iscsi_eth_rx_mode(bp, true);
4685 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, 4695 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
4686 &bp->sp_state)) 4696 &bp->sp_state))
4687 bnx2x_set_iscsi_eth_rx_mode(bp, false); 4697 bnx2x_set_iscsi_eth_rx_mode(bp, false);
4688#endif
4689 4698
4690 netif_addr_unlock_bh(bp->dev); 4699 netif_addr_unlock_bh(bp->dev);
4691} 4700}
@@ -4747,7 +4756,6 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4747 q); 4756 q);
4748 } 4757 }
4749 4758
4750#ifdef BCM_CNIC
4751 if (!NO_FCOE(bp)) { 4759 if (!NO_FCOE(bp)) {
4752 fp = &bp->fp[FCOE_IDX(bp)]; 4760 fp = &bp->fp[FCOE_IDX(bp)];
4753 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; 4761 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -4770,22 +4778,16 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
4770 bnx2x_link_report(bp); 4778 bnx2x_link_report(bp);
4771 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); 4779 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4772 } 4780 }
4773#else
4774 /* If no FCoE ring - ACK MCP now */
4775 bnx2x_link_report(bp);
4776 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4777#endif /* BCM_CNIC */
4778} 4781}
4779 4782
4780static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4783static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4781 struct bnx2x *bp, u32 cid) 4784 struct bnx2x *bp, u32 cid)
4782{ 4785{
4783 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4786 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4784#ifdef BCM_CNIC 4787
4785 if (cid == BNX2X_FCOE_ETH_CID(bp)) 4788 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
4786 return &bnx2x_fcoe_sp_obj(bp, q_obj); 4789 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4787 else 4790 else
4788#endif
4789 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; 4791 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4790} 4792}
4791 4793
@@ -4793,6 +4795,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4793{ 4795{
4794 u16 hw_cons, sw_cons, sw_prod; 4796 u16 hw_cons, sw_cons, sw_prod;
4795 union event_ring_elem *elem; 4797 union event_ring_elem *elem;
4798 u8 echo;
4796 u32 cid; 4799 u32 cid;
4797 u8 opcode; 4800 u8 opcode;
4798 int spqe_cnt = 0; 4801 int spqe_cnt = 0;
@@ -4847,10 +4850,11 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4847 */ 4850 */
4848 DP(BNX2X_MSG_SP, 4851 DP(BNX2X_MSG_SP,
4849 "got delete ramrod for MULTI[%d]\n", cid); 4852 "got delete ramrod for MULTI[%d]\n", cid);
4850#ifdef BCM_CNIC 4853
4851 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 4854 if (CNIC_LOADED(bp) &&
4855 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
4852 goto next_spqe; 4856 goto next_spqe;
4853#endif 4857
4854 q_obj = bnx2x_cid_to_q_obj(bp, cid); 4858 q_obj = bnx2x_cid_to_q_obj(bp, cid);
4855 4859
4856 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL)) 4860 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
@@ -4875,21 +4879,34 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4875 break; 4879 break;
4876 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4880 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4877 goto next_spqe; 4881 goto next_spqe;
4882
4878 case EVENT_RING_OPCODE_FUNCTION_UPDATE: 4883 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
4879 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 4884 echo = elem->message.data.function_update_event.echo;
4880 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 4885 if (echo == SWITCH_UPDATE) {
4881 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE); 4886 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4887 "got FUNC_SWITCH_UPDATE ramrod\n");
4888 if (f_obj->complete_cmd(
4889 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
4890 break;
4882 4891
4883 /* We will perform the Queues update from sp_rtnl task 4892 } else {
4884 * as all Queue SP operations should run under 4893 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
4885 * rtnl_lock. 4894 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4886 */ 4895 f_obj->complete_cmd(bp, f_obj,
4887 smp_mb__before_clear_bit(); 4896 BNX2X_F_CMD_AFEX_UPDATE);
4888 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, 4897
4889 &bp->sp_rtnl_state); 4898 /* We will perform the Queues update from
4890 smp_mb__after_clear_bit(); 4899 * sp_rtnl task as all Queue SP operations
4900 * should run under rtnl_lock.
4901 */
4902 smp_mb__before_clear_bit();
4903 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
4904 &bp->sp_rtnl_state);
4905 smp_mb__after_clear_bit();
4906
4907 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4908 }
4891 4909
4892 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4893 goto next_spqe; 4910 goto next_spqe;
4894 4911
4895 case EVENT_RING_OPCODE_AFEX_VIF_LISTS: 4912 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
@@ -4999,11 +5016,10 @@ static void bnx2x_sp_task(struct work_struct *work)
4999 5016
5000 /* SP events: STAT_QUERY and others */ 5017 /* SP events: STAT_QUERY and others */
5001 if (status & BNX2X_DEF_SB_IDX) { 5018 if (status & BNX2X_DEF_SB_IDX) {
5002#ifdef BCM_CNIC
5003 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5019 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5004 5020
5005 if ((!NO_FCOE(bp)) && 5021 if (FCOE_INIT(bp) &&
5006 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5022 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5007 /* 5023 /*
5008 * Prevent local bottom-halves from running as 5024 * Prevent local bottom-halves from running as
5009 * we are going to change the local NAPI list. 5025 * we are going to change the local NAPI list.
@@ -5012,7 +5028,7 @@ static void bnx2x_sp_task(struct work_struct *work)
5012 napi_schedule(&bnx2x_fcoe(bp, napi)); 5028 napi_schedule(&bnx2x_fcoe(bp, napi));
5013 local_bh_enable(); 5029 local_bh_enable();
5014 } 5030 }
5015#endif 5031
5016 /* Handle EQ completions */ 5032 /* Handle EQ completions */
5017 bnx2x_eq_int(bp); 5033 bnx2x_eq_int(bp);
5018 5034
@@ -5050,8 +5066,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5050 return IRQ_HANDLED; 5066 return IRQ_HANDLED;
5051#endif 5067#endif
5052 5068
5053#ifdef BCM_CNIC 5069 if (CNIC_LOADED(bp)) {
5054 {
5055 struct cnic_ops *c_ops; 5070 struct cnic_ops *c_ops;
5056 5071
5057 rcu_read_lock(); 5072 rcu_read_lock();
@@ -5060,7 +5075,7 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5060 c_ops->cnic_handler(bp->cnic_data, NULL); 5075 c_ops->cnic_handler(bp->cnic_data, NULL);
5061 rcu_read_unlock(); 5076 rcu_read_unlock();
5062 } 5077 }
5063#endif 5078
5064 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 5079 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
5065 5080
5066 return IRQ_HANDLED; 5081 return IRQ_HANDLED;
@@ -5498,12 +5513,10 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5498 unsigned long rx_mode_flags = 0, ramrod_flags = 0; 5513 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5499 unsigned long rx_accept_flags = 0, tx_accept_flags = 0; 5514 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5500 5515
5501#ifdef BCM_CNIC
5502 if (!NO_FCOE(bp)) 5516 if (!NO_FCOE(bp))
5503 5517
5504 /* Configure rx_mode of FCoE Queue */ 5518 /* Configure rx_mode of FCoE Queue */
5505 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags); 5519 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5506#endif
5507 5520
5508 switch (bp->rx_mode) { 5521 switch (bp->rx_mode) {
5509 case BNX2X_RX_MODE_NONE: 5522 case BNX2X_RX_MODE_NONE:
@@ -5624,12 +5637,12 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5624 5637
5625static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp) 5638static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
5626{ 5639{
5627 return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT; 5640 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
5628} 5641}
5629 5642
5630static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp) 5643static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5631{ 5644{
5632 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5645 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
5633} 5646}
5634 5647
5635static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5648static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
@@ -5720,23 +5733,25 @@ static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5720 txdata->tx_pkt = 0; 5733 txdata->tx_pkt = 0;
5721} 5734}
5722 5735
5736static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
5737{
5738 int i;
5739
5740 for_each_tx_queue_cnic(bp, i)
5741 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
5742}
5723static void bnx2x_init_tx_rings(struct bnx2x *bp) 5743static void bnx2x_init_tx_rings(struct bnx2x *bp)
5724{ 5744{
5725 int i; 5745 int i;
5726 u8 cos; 5746 u8 cos;
5727 5747
5728 for_each_tx_queue(bp, i) 5748 for_each_eth_queue(bp, i)
5729 for_each_cos_in_tx_queue(&bp->fp[i], cos) 5749 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5730 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); 5750 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5731} 5751}
5732 5752
5733void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5753void bnx2x_nic_init_cnic(struct bnx2x *bp)
5734{ 5754{
5735 int i;
5736
5737 for_each_eth_queue(bp, i)
5738 bnx2x_init_eth_fp(bp, i);
5739#ifdef BCM_CNIC
5740 if (!NO_FCOE(bp)) 5755 if (!NO_FCOE(bp))
5741 bnx2x_init_fcoe_fp(bp); 5756 bnx2x_init_fcoe_fp(bp);
5742 5757
@@ -5744,8 +5759,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5744 BNX2X_VF_ID_INVALID, false, 5759 BNX2X_VF_ID_INVALID, false,
5745 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp)); 5760 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
5746 5761
5747#endif 5762 /* ensure status block indices were read */
5763 rmb();
5764 bnx2x_init_rx_rings_cnic(bp);
5765 bnx2x_init_tx_rings_cnic(bp);
5766
5767 /* flush all */
5768 mb();
5769 mmiowb();
5770}
5748 5771
5772void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5773{
5774 int i;
5775
5776 for_each_eth_queue(bp, i)
5777 bnx2x_init_eth_fp(bp, i);
5749 /* Initialize MOD_ABS interrupts */ 5778 /* Initialize MOD_ABS interrupts */
5750 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 5779 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
5751 bp->common.shmem_base, bp->common.shmem2_base, 5780 bp->common.shmem_base, bp->common.shmem2_base,
@@ -6031,10 +6060,9 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
6031 msleep(50); 6060 msleep(50);
6032 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON); 6061 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6033 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON); 6062 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6034#ifndef BCM_CNIC 6063 if (!CNIC_SUPPORT(bp))
6035 /* set NIC mode */ 6064 /* set NIC mode */
6036 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6065 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6037#endif
6038 6066
6039 /* Enable inputs of parser neighbor blocks */ 6067 /* Enable inputs of parser neighbor blocks */
6040 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff); 6068 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
@@ -6049,6 +6077,8 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
6049 6077
6050static void bnx2x_enable_blocks_attention(struct bnx2x *bp) 6078static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6051{ 6079{
6080 u32 val;
6081
6052 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 6082 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6053 if (!CHIP_IS_E1x(bp)) 6083 if (!CHIP_IS_E1x(bp))
6054 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40); 6084 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
@@ -6082,17 +6112,14 @@ static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6082/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */ 6112/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6083/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */ 6113/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6084 6114
6085 if (CHIP_REV_IS_FPGA(bp)) 6115 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6086 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000); 6116 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6087 else if (!CHIP_IS_E1x(bp)) 6117 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6088 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 6118 if (!CHIP_IS_E1x(bp))
6089 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF 6119 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6090 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT 6120 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6091 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN 6121 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6092 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED 6122
6093 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
6094 else
6095 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6096 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0); 6123 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6097 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0); 6124 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6098 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0); 6125 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
@@ -6185,18 +6212,16 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6185 return; 6212 return;
6186 6213
6187 /* Fan failure is indicated by SPIO 5 */ 6214 /* Fan failure is indicated by SPIO 5 */
6188 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, 6215 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6189 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6190 6216
6191 /* set to active low mode */ 6217 /* set to active low mode */
6192 val = REG_RD(bp, MISC_REG_SPIO_INT); 6218 val = REG_RD(bp, MISC_REG_SPIO_INT);
6193 val |= ((1 << MISC_REGISTERS_SPIO_5) << 6219 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6194 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6195 REG_WR(bp, MISC_REG_SPIO_INT, val); 6220 REG_WR(bp, MISC_REG_SPIO_INT, val);
6196 6221
6197 /* enable interrupt to signal the IGU */ 6222 /* enable interrupt to signal the IGU */
6198 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6223 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6199 val |= (1 << MISC_REGISTERS_SPIO_5); 6224 val |= MISC_SPIO_SPIO5;
6200 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val); 6225 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6201} 6226}
6202 6227
@@ -6256,6 +6281,10 @@ void bnx2x_pf_disable(struct bnx2x *bp)
6256static void bnx2x__common_init_phy(struct bnx2x *bp) 6281static void bnx2x__common_init_phy(struct bnx2x *bp)
6257{ 6282{
6258 u32 shmem_base[2], shmem2_base[2]; 6283 u32 shmem_base[2], shmem2_base[2];
6284 /* Avoid common init in case MFW supports LFA */
6285 if (SHMEM2_RD(bp, size) >
6286 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6287 return;
6259 shmem_base[0] = bp->common.shmem_base; 6288 shmem_base[0] = bp->common.shmem_base;
6260 shmem2_base[0] = bp->common.shmem2_base; 6289 shmem2_base[0] = bp->common.shmem2_base;
6261 if (!CHIP_IS_E1x(bp)) { 6290 if (!CHIP_IS_E1x(bp)) {
@@ -6522,9 +6551,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6522 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6551 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6523 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6552 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6524 6553
6525#ifdef BCM_CNIC 6554 if (CNIC_SUPPORT(bp))
6526 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON); 6555 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6527#endif
6528 6556
6529 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON); 6557 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6530 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT); 6558 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
@@ -6611,18 +6639,18 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6611 6639
6612 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON); 6640 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6613 6641
6614#ifdef BCM_CNIC 6642 if (CNIC_SUPPORT(bp)) {
6615 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6643 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6616 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc); 6644 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6617 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b); 6645 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6618 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a); 6646 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6619 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116); 6647 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6620 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b); 6648 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6621 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf); 6649 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6622 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09); 6650 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6623 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f); 6651 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6624 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7); 6652 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6625#endif 6653 }
6626 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6654 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6627 6655
6628 if (sizeof(union cdu_context) != 1024) 6656 if (sizeof(union cdu_context) != 1024)
@@ -6786,11 +6814,11 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6786 /* QM cid (connection) count */ 6814 /* QM cid (connection) count */
6787 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET); 6815 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
6788 6816
6789#ifdef BCM_CNIC 6817 if (CNIC_SUPPORT(bp)) {
6790 bnx2x_init_block(bp, BLOCK_TM, init_phase); 6818 bnx2x_init_block(bp, BLOCK_TM, init_phase);
6791 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6819 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6792 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6820 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6793#endif 6821 }
6794 6822
6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6823 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6796 6824
@@ -6877,9 +6905,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6877 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6905 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6878 } 6906 }
6879 6907
6880#ifdef BCM_CNIC 6908 if (CNIC_SUPPORT(bp))
6881 bnx2x_init_block(bp, BLOCK_SRC, init_phase); 6909 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
6882#endif 6910
6883 bnx2x_init_block(bp, BLOCK_CDU, init_phase); 6911 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
6884 bnx2x_init_block(bp, BLOCK_CFC, init_phase); 6912 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
6885 6913
@@ -6955,7 +6983,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6955 6983
6956 /* If SPIO5 is set to generate interrupts, enable it for this port */ 6984 /* If SPIO5 is set to generate interrupts, enable it for this port */
6957 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN); 6985 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6958 if (val & (1 << MISC_REGISTERS_SPIO_5)) { 6986 if (val & MISC_SPIO_SPIO5) {
6959 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : 6987 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6960 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); 6988 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6961 val = REG_RD(bp, reg_addr); 6989 val = REG_RD(bp, reg_addr);
@@ -7040,6 +7068,130 @@ static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7040 bnx2x_ilt_wr(bp, i, 0); 7068 bnx2x_ilt_wr(bp, i, 0);
7041} 7069}
7042 7070
7071
7072static void bnx2x_init_searcher(struct bnx2x *bp)
7073{
7074 int port = BP_PORT(bp);
7075 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7076 /* T1 hash bits value determines the T1 number of entries */
7077 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7078}
7079
7080static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7081{
7082 int rc;
7083 struct bnx2x_func_state_params func_params = {NULL};
7084 struct bnx2x_func_switch_update_params *switch_update_params =
7085 &func_params.params.switch_update;
7086
7087 /* Prepare parameters for function state transitions */
7088 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7089 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7090
7091 func_params.f_obj = &bp->func_obj;
7092 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7093
7094 /* Function parameters */
7095 switch_update_params->suspend = suspend;
7096
7097 rc = bnx2x_func_state_change(bp, &func_params);
7098
7099 return rc;
7100}
7101
7102static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7103{
7104 int rc, i, port = BP_PORT(bp);
7105 int vlan_en = 0, mac_en[NUM_MACS];
7106
7107
7108 /* Close input from network */
7109 if (bp->mf_mode == SINGLE_FUNCTION) {
7110 bnx2x_set_rx_filter(&bp->link_params, 0);
7111 } else {
7112 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7113 NIG_REG_LLH0_FUNC_EN);
7114 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7115 NIG_REG_LLH0_FUNC_EN, 0);
7116 for (i = 0; i < NUM_MACS; i++) {
7117 mac_en[i] = REG_RD(bp, port ?
7118 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7119 4 * i) :
7120 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7121 4 * i));
7122 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7123 4 * i) :
7124 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7125 }
7126 }
7127
7128 /* Close BMC to host */
7129 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7130 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7131
7132 /* Suspend Tx switching to the PF. Completion of this ramrod
7133 * further guarantees that all the packets of that PF / child
7134 * VFs in BRB were processed by the Parser, so it is safe to
7135 * change the NIC_MODE register.
7136 */
7137 rc = bnx2x_func_switch_update(bp, 1);
7138 if (rc) {
7139 BNX2X_ERR("Can't suspend tx-switching!\n");
7140 return rc;
7141 }
7142
7143 /* Change NIC_MODE register */
7144 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7145
7146 /* Open input from network */
7147 if (bp->mf_mode == SINGLE_FUNCTION) {
7148 bnx2x_set_rx_filter(&bp->link_params, 1);
7149 } else {
7150 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7151 NIG_REG_LLH0_FUNC_EN, vlan_en);
7152 for (i = 0; i < NUM_MACS; i++) {
7153 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7154 4 * i) :
7155 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7156 mac_en[i]);
7157 }
7158 }
7159
7160 /* Enable BMC to host */
7161 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7162 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7163
7164 /* Resume Tx switching to the PF */
7165 rc = bnx2x_func_switch_update(bp, 0);
7166 if (rc) {
7167 BNX2X_ERR("Can't resume tx-switching!\n");
7168 return rc;
7169 }
7170
7171 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7172 return 0;
7173}
7174
7175int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7176{
7177 int rc;
7178
7179 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7180
7181 if (CONFIGURE_NIC_MODE(bp)) {
7182 /* Configrue searcher as part of function hw init */
7183 bnx2x_init_searcher(bp);
7184
7185 /* Reset NIC mode */
7186 rc = bnx2x_reset_nic_mode(bp);
7187 if (rc)
7188 BNX2X_ERR("Can't change NIC mode!\n");
7189 return rc;
7190 }
7191
7192 return 0;
7193}
7194
7043static int bnx2x_init_hw_func(struct bnx2x *bp) 7195static int bnx2x_init_hw_func(struct bnx2x *bp)
7044{ 7196{
7045 int port = BP_PORT(bp); 7197 int port = BP_PORT(bp);
@@ -7082,17 +7234,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7082 } 7234 }
7083 bnx2x_ilt_init_op(bp, INITOP_SET); 7235 bnx2x_ilt_init_op(bp, INITOP_SET);
7084 7236
7085#ifdef BCM_CNIC 7237 if (!CONFIGURE_NIC_MODE(bp)) {
7086 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM); 7238 bnx2x_init_searcher(bp);
7087 7239 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7088 /* T1 hash bits value determines the T1 number of entries */ 7240 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7089 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); 7241 } else {
7090#endif 7242 /* Set NIC mode */
7243 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7244 DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
7091 7245
7092#ifndef BCM_CNIC 7246 }
7093 /* set NIC mode */
7094 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7095#endif /* BCM_CNIC */
7096 7247
7097 if (!CHIP_IS_E1x(bp)) { 7248 if (!CHIP_IS_E1x(bp)) {
7098 u32 pf_conf = IGU_PF_CONF_FUNC_EN; 7249 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
@@ -7343,6 +7494,20 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
7343} 7494}
7344 7495
7345 7496
7497void bnx2x_free_mem_cnic(struct bnx2x *bp)
7498{
7499 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7500
7501 if (!CHIP_IS_E1x(bp))
7502 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7503 sizeof(struct host_hc_status_block_e2));
7504 else
7505 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7506 sizeof(struct host_hc_status_block_e1x));
7507
7508 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7509}
7510
7346void bnx2x_free_mem(struct bnx2x *bp) 7511void bnx2x_free_mem(struct bnx2x *bp)
7347{ 7512{
7348 int i; 7513 int i;
@@ -7367,17 +7532,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
7367 7532
7368 BNX2X_FREE(bp->ilt->lines); 7533 BNX2X_FREE(bp->ilt->lines);
7369 7534
7370#ifdef BCM_CNIC
7371 if (!CHIP_IS_E1x(bp))
7372 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7373 sizeof(struct host_hc_status_block_e2));
7374 else
7375 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7376 sizeof(struct host_hc_status_block_e1x));
7377
7378 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7379#endif
7380
7381 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 7535 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7382 7536
7383 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, 7537 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
@@ -7445,24 +7599,44 @@ alloc_mem_err:
7445 return -ENOMEM; 7599 return -ENOMEM;
7446} 7600}
7447 7601
7448 7602int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7449int bnx2x_alloc_mem(struct bnx2x *bp)
7450{ 7603{
7451 int i, allocated, context_size;
7452
7453#ifdef BCM_CNIC
7454 if (!CHIP_IS_E1x(bp)) 7604 if (!CHIP_IS_E1x(bp))
7455 /* size = the status block + ramrod buffers */ 7605 /* size = the status block + ramrod buffers */
7456 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 7606 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7457 sizeof(struct host_hc_status_block_e2)); 7607 sizeof(struct host_hc_status_block_e2));
7458 else 7608 else
7459 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping, 7609 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7460 sizeof(struct host_hc_status_block_e1x)); 7610 &bp->cnic_sb_mapping,
7611 sizeof(struct
7612 host_hc_status_block_e1x));
7461 7613
7462 /* allocate searcher T2 table */ 7614 if (CONFIGURE_NIC_MODE(bp))
7463 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 7615 /* allocate searcher T2 table, as it wan't allocated before */
7464#endif 7616 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7617
7618 /* write address to which L5 should insert its values */
7619 bp->cnic_eth_dev.addr_drv_info_to_mcp =
7620 &bp->slowpath->drv_info_to_mcp;
7465 7621
7622 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7623 goto alloc_mem_err;
7624
7625 return 0;
7626
7627alloc_mem_err:
7628 bnx2x_free_mem_cnic(bp);
7629 BNX2X_ERR("Can't allocate memory\n");
7630 return -ENOMEM;
7631}
7632
7633int bnx2x_alloc_mem(struct bnx2x *bp)
7634{
7635 int i, allocated, context_size;
7636
7637 if (!CONFIGURE_NIC_MODE(bp))
7638 /* allocate searcher T2 table */
7639 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7466 7640
7467 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 7641 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7468 sizeof(struct host_sp_status_block)); 7642 sizeof(struct host_sp_status_block));
@@ -7470,11 +7644,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
7470 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 7644 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7471 sizeof(struct bnx2x_slowpath)); 7645 sizeof(struct bnx2x_slowpath));
7472 7646
7473#ifdef BCM_CNIC
7474 /* write address to which L5 should insert its values */
7475 bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
7476#endif
7477
7478 /* Allocated memory for FW statistics */ 7647 /* Allocated memory for FW statistics */
7479 if (bnx2x_alloc_fw_stats_mem(bp)) 7648 if (bnx2x_alloc_fw_stats_mem(bp))
7480 goto alloc_mem_err; 7649 goto alloc_mem_err;
@@ -7596,14 +7765,12 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7596{ 7765{
7597 unsigned long ramrod_flags = 0; 7766 unsigned long ramrod_flags = 0;
7598 7767
7599#ifdef BCM_CNIC
7600 if (is_zero_ether_addr(bp->dev->dev_addr) && 7768 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7601 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) { 7769 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7602 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7770 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7603 "Ignoring Zero MAC for STORAGE SD mode\n"); 7771 "Ignoring Zero MAC for STORAGE SD mode\n");
7604 return 0; 7772 return 0;
7605 } 7773 }
7606#endif
7607 7774
7608 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n"); 7775 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7609 7776
@@ -7632,7 +7799,8 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
7632 bnx2x_enable_msi(bp); 7799 bnx2x_enable_msi(bp);
7633 /* falling through... */ 7800 /* falling through... */
7634 case INT_MODE_INTx: 7801 case INT_MODE_INTx:
7635 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7802 bp->num_ethernet_queues = 1;
7803 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
7636 BNX2X_DEV_INFO("set number of queues to 1\n"); 7804 BNX2X_DEV_INFO("set number of queues to 1\n");
7637 break; 7805 break;
7638 default: 7806 default:
@@ -7644,9 +7812,10 @@ void bnx2x_set_int_mode(struct bnx2x *bp)
7644 bp->flags & USING_SINGLE_MSIX_FLAG) { 7812 bp->flags & USING_SINGLE_MSIX_FLAG) {
7645 /* failed to enable multiple MSI-X */ 7813 /* failed to enable multiple MSI-X */
7646 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n", 7814 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7647 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7815 bp->num_queues,
7816 1 + bp->num_cnic_queues);
7648 7817
7649 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7818 bp->num_queues = 1 + bp->num_cnic_queues;
7650 7819
7651 /* Try to enable MSI */ 7820 /* Try to enable MSI */
7652 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) && 7821 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
@@ -7679,9 +7848,9 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7679 ilt_client->flags = ILT_CLIENT_SKIP_MEM; 7848 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
7680 ilt_client->start = line; 7849 ilt_client->start = line;
7681 line += bnx2x_cid_ilt_lines(bp); 7850 line += bnx2x_cid_ilt_lines(bp);
7682#ifdef BCM_CNIC 7851
7683 line += CNIC_ILT_LINES; 7852 if (CNIC_SUPPORT(bp))
7684#endif 7853 line += CNIC_ILT_LINES;
7685 ilt_client->end = line - 1; 7854 ilt_client->end = line - 1;
7686 7855
7687 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7856 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
@@ -7714,49 +7883,43 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7714 ilog2(ilt_client->page_size >> 12)); 7883 ilog2(ilt_client->page_size >> 12));
7715 7884
7716 } 7885 }
7717 /* SRC */
7718 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7719#ifdef BCM_CNIC
7720 ilt_client->client_num = ILT_CLIENT_SRC;
7721 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7722 ilt_client->flags = 0;
7723 ilt_client->start = line;
7724 line += SRC_ILT_LINES;
7725 ilt_client->end = line - 1;
7726 7886
7727 DP(NETIF_MSG_IFUP, 7887 if (CNIC_SUPPORT(bp)) {
7728 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7888 /* SRC */
7729 ilt_client->start, 7889 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7730 ilt_client->end, 7890 ilt_client->client_num = ILT_CLIENT_SRC;
7731 ilt_client->page_size, 7891 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7732 ilt_client->flags, 7892 ilt_client->flags = 0;
7733 ilog2(ilt_client->page_size >> 12)); 7893 ilt_client->start = line;
7894 line += SRC_ILT_LINES;
7895 ilt_client->end = line - 1;
7734 7896
7735#else 7897 DP(NETIF_MSG_IFUP,
7736 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM); 7898 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7737#endif 7899 ilt_client->start,
7900 ilt_client->end,
7901 ilt_client->page_size,
7902 ilt_client->flags,
7903 ilog2(ilt_client->page_size >> 12));
7738 7904
7739 /* TM */ 7905 /* TM */
7740 ilt_client = &ilt->clients[ILT_CLIENT_TM]; 7906 ilt_client = &ilt->clients[ILT_CLIENT_TM];
7741#ifdef BCM_CNIC 7907 ilt_client->client_num = ILT_CLIENT_TM;
7742 ilt_client->client_num = ILT_CLIENT_TM; 7908 ilt_client->page_size = TM_ILT_PAGE_SZ;
7743 ilt_client->page_size = TM_ILT_PAGE_SZ; 7909 ilt_client->flags = 0;
7744 ilt_client->flags = 0; 7910 ilt_client->start = line;
7745 ilt_client->start = line; 7911 line += TM_ILT_LINES;
7746 line += TM_ILT_LINES; 7912 ilt_client->end = line - 1;
7747 ilt_client->end = line - 1;
7748 7913
7749 DP(NETIF_MSG_IFUP, 7914 DP(NETIF_MSG_IFUP,
7750 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n", 7915 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7751 ilt_client->start, 7916 ilt_client->start,
7752 ilt_client->end, 7917 ilt_client->end,
7753 ilt_client->page_size, 7918 ilt_client->page_size,
7754 ilt_client->flags, 7919 ilt_client->flags,
7755 ilog2(ilt_client->page_size >> 12)); 7920 ilog2(ilt_client->page_size >> 12));
7921 }
7756 7922
7757#else
7758 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
7759#endif
7760 BUG_ON(line > ILT_MAX_LINES); 7923 BUG_ON(line > ILT_MAX_LINES);
7761} 7924}
7762 7925
@@ -7823,7 +7986,7 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7823 } 7986 }
7824} 7987}
7825 7988
7826int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, 7989static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7827 struct bnx2x_queue_state_params *q_params, 7990 struct bnx2x_queue_state_params *q_params,
7828 struct bnx2x_queue_setup_tx_only_params *tx_only_params, 7991 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
7829 int tx_index, bool leading) 7992 int tx_index, bool leading)
@@ -7924,6 +8087,9 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7924 /* Set the command */ 8087 /* Set the command */
7925 q_params.cmd = BNX2X_Q_CMD_SETUP; 8088 q_params.cmd = BNX2X_Q_CMD_SETUP;
7926 8089
8090 if (IS_FCOE_FP(fp))
8091 bp->fcoe_init = true;
8092
7927 /* Change the state to SETUP */ 8093 /* Change the state to SETUP */
7928 rc = bnx2x_queue_state_change(bp, &q_params); 8094 rc = bnx2x_queue_state_change(bp, &q_params);
7929 if (rc) { 8095 if (rc) {
@@ -8037,12 +8203,12 @@ static void bnx2x_reset_func(struct bnx2x *bp)
8037 SB_DISABLED); 8203 SB_DISABLED);
8038 } 8204 }
8039 8205
8040#ifdef BCM_CNIC 8206 if (CNIC_LOADED(bp))
8041 /* CNIC SB */ 8207 /* CNIC SB */
8042 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8208 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8043 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)), 8209 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8044 SB_DISABLED); 8210 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8045#endif 8211
8046 /* SP SB */ 8212 /* SP SB */
8047 REG_WR8(bp, BAR_CSTRORM_INTMEM + 8213 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8048 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), 8214 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
@@ -8061,19 +8227,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
8061 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0); 8227 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8062 } 8228 }
8063 8229
8064#ifdef BCM_CNIC 8230 if (CNIC_LOADED(bp)) {
8065 /* Disable Timer scan */ 8231 /* Disable Timer scan */
8066 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0); 8232 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8067 /* 8233 /*
8068 * Wait for at least 10ms and up to 2 second for the timers scan to 8234 * Wait for at least 10ms and up to 2 second for the timers
8069 * complete 8235 * scan to complete
8070 */ 8236 */
8071 for (i = 0; i < 200; i++) { 8237 for (i = 0; i < 200; i++) {
8072 msleep(10); 8238 msleep(10);
8073 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4)) 8239 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8074 break; 8240 break;
8241 }
8075 } 8242 }
8076#endif
8077 /* Clear ILT */ 8243 /* Clear ILT */
8078 bnx2x_clear_func_ilt(bp, func); 8244 bnx2x_clear_func_ilt(bp, func);
8079 8245
@@ -8409,13 +8575,24 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8409 /* Close multi and leading connections 8575 /* Close multi and leading connections
8410 * Completions for ramrods are collected in a synchronous way 8576 * Completions for ramrods are collected in a synchronous way
8411 */ 8577 */
8412 for_each_queue(bp, i) 8578 for_each_eth_queue(bp, i)
8413 if (bnx2x_stop_queue(bp, i)) 8579 if (bnx2x_stop_queue(bp, i))
8414#ifdef BNX2X_STOP_ON_ERROR 8580#ifdef BNX2X_STOP_ON_ERROR
8415 return; 8581 return;
8416#else 8582#else
8417 goto unload_error; 8583 goto unload_error;
8418#endif 8584#endif
8585
8586 if (CNIC_LOADED(bp)) {
8587 for_each_cnic_queue(bp, i)
8588 if (bnx2x_stop_queue(bp, i))
8589#ifdef BNX2X_STOP_ON_ERROR
8590 return;
8591#else
8592 goto unload_error;
8593#endif
8594 }
8595
8419 /* If SP settings didn't get completed so far - something 8596 /* If SP settings didn't get completed so far - something
8420 * very wrong has happen. 8597 * very wrong has happen.
8421 */ 8598 */
@@ -8437,6 +8614,8 @@ unload_error:
8437 bnx2x_netif_stop(bp, 1); 8614 bnx2x_netif_stop(bp, 1);
8438 /* Delete all NAPI objects */ 8615 /* Delete all NAPI objects */
8439 bnx2x_del_all_napi(bp); 8616 bnx2x_del_all_napi(bp);
8617 if (CNIC_LOADED(bp))
8618 bnx2x_del_all_napi_cnic(bp);
8440 8619
8441 /* Release IRQs */ 8620 /* Release IRQs */
8442 bnx2x_free_irq(bp); 8621 bnx2x_free_irq(bp);
@@ -8558,7 +8737,8 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8558 8737
8559 /* Get shmem offset */ 8738 /* Get shmem offset */
8560 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); 8739 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8561 validity_offset = offsetof(struct shmem_region, validity_map[0]); 8740 validity_offset =
8741 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
8562 8742
8563 /* Clear validity map flags */ 8743 /* Clear validity map flags */
8564 if (shmem > 0) 8744 if (shmem > 0)
@@ -8651,7 +8831,11 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
8651 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU | 8831 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
8652 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE; 8832 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
8653 8833
8654 /* Don't reset the following blocks */ 8834 /* Don't reset the following blocks.
8835 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
8836 * reset, as in 4 port device they might still be owned
8837 * by the MCP (there is only one leader per path).
8838 */
8655 not_reset_mask1 = 8839 not_reset_mask1 =
8656 MISC_REGISTERS_RESET_REG_1_RST_HC | 8840 MISC_REGISTERS_RESET_REG_1_RST_HC |
8657 MISC_REGISTERS_RESET_REG_1_RST_PXPV | 8841 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
@@ -8667,19 +8851,19 @@ static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
8667 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE | 8851 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8668 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B | 8852 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
8669 MISC_REGISTERS_RESET_REG_2_RST_ATC | 8853 MISC_REGISTERS_RESET_REG_2_RST_ATC |
8670 MISC_REGISTERS_RESET_REG_2_PGLC; 8854 MISC_REGISTERS_RESET_REG_2_PGLC |
8855 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
8856 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
8857 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
8858 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
8859 MISC_REGISTERS_RESET_REG_2_UMAC0 |
8860 MISC_REGISTERS_RESET_REG_2_UMAC1;
8671 8861
8672 /* 8862 /*
8673 * Keep the following blocks in reset: 8863 * Keep the following blocks in reset:
8674 * - all xxMACs are handled by the bnx2x_link code. 8864 * - all xxMACs are handled by the bnx2x_link code.
8675 */ 8865 */
8676 stay_reset2 = 8866 stay_reset2 =
8677 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
8678 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
8679 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
8680 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
8681 MISC_REGISTERS_RESET_REG_2_UMAC0 |
8682 MISC_REGISTERS_RESET_REG_2_UMAC1 |
8683 MISC_REGISTERS_RESET_REG_2_XMAC | 8867 MISC_REGISTERS_RESET_REG_2_XMAC |
8684 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT; 8868 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
8685 8869
@@ -8769,6 +8953,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
8769 int cnt = 1000; 8953 int cnt = 1000;
8770 u32 val = 0; 8954 u32 val = 0;
8771 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2; 8955 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8956 u32 tags_63_32 = 0;
8772 8957
8773 8958
8774 /* Empty the Tetris buffer, wait for 1s */ 8959 /* Empty the Tetris buffer, wait for 1s */
@@ -8778,10 +8963,14 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
8778 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0); 8963 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8779 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1); 8964 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8780 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2); 8965 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8966 if (CHIP_IS_E3(bp))
8967 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
8968
8781 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) && 8969 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8782 ((port_is_idle_0 & 0x1) == 0x1) && 8970 ((port_is_idle_0 & 0x1) == 0x1) &&
8783 ((port_is_idle_1 & 0x1) == 0x1) && 8971 ((port_is_idle_1 & 0x1) == 0x1) &&
8784 (pgl_exp_rom2 == 0xffffffff)) 8972 (pgl_exp_rom2 == 0xffffffff) &&
8973 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
8785 break; 8974 break;
8786 usleep_range(1000, 1000); 8975 usleep_range(1000, 1000);
8787 } while (cnt-- > 0); 8976 } while (cnt-- > 0);
@@ -8838,9 +9027,6 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
8838 9027
8839 /* TBD: Add resetting the NO_MCP mode DB here */ 9028 /* TBD: Add resetting the NO_MCP mode DB here */
8840 9029
8841 /* PXP */
8842 bnx2x_pxp_prep(bp);
8843
8844 /* Open the gates #2, #3 and #4 */ 9030 /* Open the gates #2, #3 and #4 */
8845 bnx2x_set_234_gates(bp, false); 9031 bnx2x_set_234_gates(bp, false);
8846 9032
@@ -8850,7 +9036,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
8850 return 0; 9036 return 0;
8851} 9037}
8852 9038
8853int bnx2x_leader_reset(struct bnx2x *bp) 9039static int bnx2x_leader_reset(struct bnx2x *bp)
8854{ 9040{
8855 int rc = 0; 9041 int rc = 0;
8856 bool global = bnx2x_reset_is_global(bp); 9042 bool global = bnx2x_reset_is_global(bp);
@@ -9234,7 +9420,7 @@ static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
9234 bnx2x_undi_int_disable_e1h(bp); 9420 bnx2x_undi_int_disable_e1h(bp);
9235} 9421}
9236 9422
9237static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp) 9423static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
9238{ 9424{
9239 u32 val, base_addr, offset, mask, reset_reg; 9425 u32 val, base_addr, offset, mask, reset_reg;
9240 bool mac_stopped = false; 9426 bool mac_stopped = false;
@@ -9301,8 +9487,7 @@ static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
9301#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 9487#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9302#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 9488#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9303 9489
9304static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, 9490static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
9305 u8 inc)
9306{ 9491{
9307 u16 rcq, bd; 9492 u16 rcq, bd;
9308 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); 9493 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
@@ -9317,7 +9502,7 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
9317 port, bd, rcq); 9502 port, bd, rcq);
9318} 9503}
9319 9504
9320static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) 9505static int bnx2x_prev_mcp_done(struct bnx2x *bp)
9321{ 9506{
9322 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 9507 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9323 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); 9508 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
@@ -9329,7 +9514,21 @@ static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
9329 return 0; 9514 return 0;
9330} 9515}
9331 9516
9332static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp) 9517static struct bnx2x_prev_path_list *
9518 bnx2x_prev_path_get_entry(struct bnx2x *bp)
9519{
9520 struct bnx2x_prev_path_list *tmp_list;
9521
9522 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
9523 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9524 bp->pdev->bus->number == tmp_list->bus &&
9525 BP_PATH(bp) == tmp_list->path)
9526 return tmp_list;
9527
9528 return NULL;
9529}
9530
9531static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9333{ 9532{
9334 struct bnx2x_prev_path_list *tmp_list; 9533 struct bnx2x_prev_path_list *tmp_list;
9335 int rc = false; 9534 int rc = false;
@@ -9353,7 +9552,7 @@ static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
9353 return rc; 9552 return rc;
9354} 9553}
9355 9554
9356static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) 9555static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9357{ 9556{
9358 struct bnx2x_prev_path_list *tmp_list; 9557 struct bnx2x_prev_path_list *tmp_list;
9359 int rc; 9558 int rc;
@@ -9367,6 +9566,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
9367 tmp_list->bus = bp->pdev->bus->number; 9566 tmp_list->bus = bp->pdev->bus->number;
9368 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 9567 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9369 tmp_list->path = BP_PATH(bp); 9568 tmp_list->path = BP_PATH(bp);
9569 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
9370 9570
9371 rc = down_interruptible(&bnx2x_prev_sem); 9571 rc = down_interruptible(&bnx2x_prev_sem);
9372 if (rc) { 9572 if (rc) {
@@ -9382,7 +9582,7 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
9382 return rc; 9582 return rc;
9383} 9583}
9384 9584
9385static int __devinit bnx2x_do_flr(struct bnx2x *bp) 9585static int bnx2x_do_flr(struct bnx2x *bp)
9386{ 9586{
9387 int i; 9587 int i;
9388 u16 status; 9588 u16 status;
@@ -9422,7 +9622,7 @@ clear:
9422 return 0; 9622 return 0;
9423} 9623}
9424 9624
9425static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) 9625static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9426{ 9626{
9427 int rc; 9627 int rc;
9428 9628
@@ -9460,9 +9660,10 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9460 return rc; 9660 return rc;
9461} 9661}
9462 9662
9463static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) 9663static int bnx2x_prev_unload_common(struct bnx2x *bp)
9464{ 9664{
9465 u32 reset_reg, tmp_reg = 0, rc; 9665 u32 reset_reg, tmp_reg = 0, rc;
9666 bool prev_undi = false;
9466 /* It is possible a previous function received 'common' answer, 9667 /* It is possible a previous function received 'common' answer,
9467 * but hasn't loaded yet, therefore creating a scenario of 9668 * but hasn't loaded yet, therefore creating a scenario of
9468 * multiple functions receiving 'common' on the same path. 9669 * multiple functions receiving 'common' on the same path.
@@ -9477,7 +9678,6 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9477 /* Reset should be performed after BRB is emptied */ 9678 /* Reset should be performed after BRB is emptied */
9478 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 9679 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
9479 u32 timer_count = 1000; 9680 u32 timer_count = 1000;
9480 bool prev_undi = false;
9481 9681
9482 /* Close the MAC Rx to prevent BRB from filling up */ 9682 /* Close the MAC Rx to prevent BRB from filling up */
9483 bnx2x_prev_unload_close_mac(bp); 9683 bnx2x_prev_unload_close_mac(bp);
@@ -9527,7 +9727,7 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9527 /* No packets are in the pipeline, path is ready for reset */ 9727 /* No packets are in the pipeline, path is ready for reset */
9528 bnx2x_reset_common(bp); 9728 bnx2x_reset_common(bp);
9529 9729
9530 rc = bnx2x_prev_mark_path(bp); 9730 rc = bnx2x_prev_mark_path(bp, prev_undi);
9531 if (rc) { 9731 if (rc) {
9532 bnx2x_prev_mcp_done(bp); 9732 bnx2x_prev_mcp_done(bp);
9533 return rc; 9733 return rc;
@@ -9543,7 +9743,7 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9543 * to clear the interrupt which detected this from the pglueb and the was done 9743 * to clear the interrupt which detected this from the pglueb and the was done
9544 * bit 9744 * bit
9545 */ 9745 */
9546static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 9746static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9547{ 9747{
9548 if (!CHIP_IS_E1x(bp)) { 9748 if (!CHIP_IS_E1x(bp)) {
9549 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 9749 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
@@ -9555,10 +9755,11 @@ static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9555 } 9755 }
9556} 9756}
9557 9757
9558static int __devinit bnx2x_prev_unload(struct bnx2x *bp) 9758static int bnx2x_prev_unload(struct bnx2x *bp)
9559{ 9759{
9560 int time_counter = 10; 9760 int time_counter = 10;
9561 u32 rc, fw, hw_lock_reg, hw_lock_val; 9761 u32 rc, fw, hw_lock_reg, hw_lock_val;
9762 struct bnx2x_prev_path_list *prev_list;
9562 BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); 9763 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
9563 9764
9564 /* clear hw from errors which may have resulted from an interrupted 9765 /* clear hw from errors which may have resulted from an interrupted
@@ -9617,12 +9818,18 @@ static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
9617 rc = -EBUSY; 9818 rc = -EBUSY;
9618 } 9819 }
9619 9820
9821 /* Mark function if its port was used to boot from SAN */
9822 prev_list = bnx2x_prev_path_get_entry(bp);
9823 if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
9824 bp->link_params.feature_config_flags |=
9825 FEATURE_CONFIG_BOOT_FROM_SAN;
9826
9620 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc); 9827 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
9621 9828
9622 return rc; 9829 return rc;
9623} 9830}
9624 9831
9625static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) 9832static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
9626{ 9833{
9627 u32 val, val2, val3, val4, id, boot_mode; 9834 u32 val, val2, val3, val4, id, boot_mode;
9628 u16 pmc; 9835 u16 pmc;
@@ -9701,6 +9908,14 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9701 9908
9702 bp->link_params.shmem_base = bp->common.shmem_base; 9909 bp->link_params.shmem_base = bp->common.shmem_base;
9703 bp->link_params.shmem2_base = bp->common.shmem2_base; 9910 bp->link_params.shmem2_base = bp->common.shmem2_base;
9911 if (SHMEM2_RD(bp, size) >
9912 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
9913 bp->link_params.lfa_base =
9914 REG_RD(bp, bp->common.shmem2_base +
9915 (u32)offsetof(struct shmem2_region,
9916 lfa_host_addr[BP_PORT(bp)]));
9917 else
9918 bp->link_params.lfa_base = 0;
9704 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n", 9919 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9705 bp->common.shmem_base, bp->common.shmem2_base); 9920 bp->common.shmem_base, bp->common.shmem2_base);
9706 9921
@@ -9748,6 +9963,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9748 bp->link_params.feature_config_flags |= 9963 bp->link_params.feature_config_flags |=
9749 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 9964 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
9750 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 9965 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
9966
9967 bp->link_params.feature_config_flags |=
9968 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
9969 FEATURE_CONFIG_MT_SUPPORT : 0;
9970
9751 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? 9971 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
9752 BC_SUPPORTS_PFC_STATS : 0; 9972 BC_SUPPORTS_PFC_STATS : 0;
9753 9973
@@ -9792,7 +10012,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9792#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID) 10012#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
9793#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR) 10013#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
9794 10014
9795static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) 10015static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
9796{ 10016{
9797 int pfid = BP_FUNC(bp); 10017 int pfid = BP_FUNC(bp);
9798 int igu_sb_id; 10018 int igu_sb_id;
@@ -9809,7 +10029,7 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
9809 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x + 10029 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
9810 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn); 10030 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
9811 10031
9812 return; 10032 return 0;
9813 } 10033 }
9814 10034
9815 /* IGU in normal mode - read CAM */ 10035 /* IGU in normal mode - read CAM */
@@ -9843,12 +10063,15 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
9843 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); 10063 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
9844#endif 10064#endif
9845 10065
9846 if (igu_sb_cnt == 0) 10066 if (igu_sb_cnt == 0) {
9847 BNX2X_ERR("CAM configuration error\n"); 10067 BNX2X_ERR("CAM configuration error\n");
10068 return -EINVAL;
10069 }
10070
10071 return 0;
9848} 10072}
9849 10073
9850static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 10074static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
9851 u32 switch_cfg)
9852{ 10075{
9853 int cfg_size = 0, idx, port = BP_PORT(bp); 10076 int cfg_size = 0, idx, port = BP_PORT(bp);
9854 10077
@@ -9946,7 +10169,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9946 bp->port.supported[1]); 10169 bp->port.supported[1]);
9947} 10170}
9948 10171
9949static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp) 10172static void bnx2x_link_settings_requested(struct bnx2x *bp)
9950{ 10173{
9951 u32 link_config, idx, cfg_size = 0; 10174 u32 link_config, idx, cfg_size = 0;
9952 bp->port.advertising[0] = 0; 10175 bp->port.advertising[0] = 0;
@@ -10115,11 +10338,13 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
10115 10338
10116 bp->link_params.req_flow_ctrl[idx] = (link_config & 10339 bp->link_params.req_flow_ctrl[idx] = (link_config &
10117 PORT_FEATURE_FLOW_CONTROL_MASK); 10340 PORT_FEATURE_FLOW_CONTROL_MASK);
10118 if ((bp->link_params.req_flow_ctrl[idx] == 10341 if (bp->link_params.req_flow_ctrl[idx] ==
10119 BNX2X_FLOW_CTRL_AUTO) && 10342 BNX2X_FLOW_CTRL_AUTO) {
10120 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) { 10343 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
10121 bp->link_params.req_flow_ctrl[idx] = 10344 bp->link_params.req_flow_ctrl[idx] =
10122 BNX2X_FLOW_CTRL_NONE; 10345 BNX2X_FLOW_CTRL_NONE;
10346 else
10347 bnx2x_set_requested_fc(bp);
10123 } 10348 }
10124 10349
10125 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n", 10350 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
@@ -10130,7 +10355,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
10130 } 10355 }
10131} 10356}
10132 10357
10133static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi) 10358static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
10134{ 10359{
10135 mac_hi = cpu_to_be16(mac_hi); 10360 mac_hi = cpu_to_be16(mac_hi);
10136 mac_lo = cpu_to_be32(mac_lo); 10361 mac_lo = cpu_to_be32(mac_lo);
@@ -10138,7 +10363,7 @@ static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
10138 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo)); 10363 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
10139} 10364}
10140 10365
10141static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 10366static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
10142{ 10367{
10143 int port = BP_PORT(bp); 10368 int port = BP_PORT(bp);
10144 u32 config; 10369 u32 config;
@@ -10199,17 +10424,6 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10199 bp->mdio.prtad = 10424 bp->mdio.prtad =
10200 XGXS_EXT_PHY_ADDR(ext_phy_config); 10425 XGXS_EXT_PHY_ADDR(ext_phy_config);
10201 10426
10202 /*
10203 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
10204 * In MF mode, it is set to cover self test cases
10205 */
10206 if (IS_MF(bp))
10207 bp->port.need_hw_lock = 1;
10208 else
10209 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
10210 bp->common.shmem_base,
10211 bp->common.shmem2_base);
10212
10213 /* Configure link feature according to nvram value */ 10427 /* Configure link feature according to nvram value */
10214 eee_mode = (((SHMEM_RD(bp, dev_info. 10428 eee_mode = (((SHMEM_RD(bp, dev_info.
10215 port_feature_config[port].eee_power_mode)) & 10429 port_feature_config[port].eee_power_mode)) &
@@ -10227,12 +10441,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10227void bnx2x_get_iscsi_info(struct bnx2x *bp) 10441void bnx2x_get_iscsi_info(struct bnx2x *bp)
10228{ 10442{
10229 u32 no_flags = NO_ISCSI_FLAG; 10443 u32 no_flags = NO_ISCSI_FLAG;
10230#ifdef BCM_CNIC
10231 int port = BP_PORT(bp); 10444 int port = BP_PORT(bp);
10232
10233 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10445 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10234 drv_lic_key[port].max_iscsi_conn); 10446 drv_lic_key[port].max_iscsi_conn);
10235 10447
10448 if (!CNIC_SUPPORT(bp)) {
10449 bp->flags |= no_flags;
10450 return;
10451 }
10452
10236 /* Get the number of maximum allowed iSCSI connections */ 10453 /* Get the number of maximum allowed iSCSI connections */
10237 bp->cnic_eth_dev.max_iscsi_conn = 10454 bp->cnic_eth_dev.max_iscsi_conn =
10238 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> 10455 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
@@ -10247,13 +10464,10 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
10247 */ 10464 */
10248 if (!bp->cnic_eth_dev.max_iscsi_conn) 10465 if (!bp->cnic_eth_dev.max_iscsi_conn)
10249 bp->flags |= no_flags; 10466 bp->flags |= no_flags;
10250#else 10467
10251 bp->flags |= no_flags;
10252#endif
10253} 10468}
10254 10469
10255#ifdef BCM_CNIC 10470static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10256static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10257{ 10471{
10258 /* Port info */ 10472 /* Port info */
10259 bp->cnic_eth_dev.fcoe_wwn_port_name_hi = 10473 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
@@ -10267,16 +10481,18 @@ static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10267 bp->cnic_eth_dev.fcoe_wwn_node_name_lo = 10481 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10268 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower); 10482 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10269} 10483}
10270#endif 10484static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10271static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
10272{ 10485{
10273#ifdef BCM_CNIC
10274 int port = BP_PORT(bp); 10486 int port = BP_PORT(bp);
10275 int func = BP_ABS_FUNC(bp); 10487 int func = BP_ABS_FUNC(bp);
10276
10277 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, 10488 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10278 drv_lic_key[port].max_fcoe_conn); 10489 drv_lic_key[port].max_fcoe_conn);
10279 10490
10491 if (!CNIC_SUPPORT(bp)) {
10492 bp->flags |= NO_FCOE_FLAG;
10493 return;
10494 }
10495
10280 /* Get the number of maximum allowed FCoE connections */ 10496 /* Get the number of maximum allowed FCoE connections */
10281 bp->cnic_eth_dev.max_fcoe_conn = 10497 bp->cnic_eth_dev.max_fcoe_conn =
10282 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> 10498 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
@@ -10311,8 +10527,9 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
10311 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) 10527 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
10312 bnx2x_get_ext_wwn_info(bp, func); 10528 bnx2x_get_ext_wwn_info(bp, func);
10313 10529
10314 } else if (IS_MF_FCOE_SD(bp)) 10530 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
10315 bnx2x_get_ext_wwn_info(bp, func); 10531 bnx2x_get_ext_wwn_info(bp, func);
10532 }
10316 10533
10317 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn); 10534 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
10318 10535
@@ -10322,12 +10539,9 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
10322 */ 10539 */
10323 if (!bp->cnic_eth_dev.max_fcoe_conn) 10540 if (!bp->cnic_eth_dev.max_fcoe_conn)
10324 bp->flags |= NO_FCOE_FLAG; 10541 bp->flags |= NO_FCOE_FLAG;
10325#else
10326 bp->flags |= NO_FCOE_FLAG;
10327#endif
10328} 10542}
10329 10543
10330static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) 10544static void bnx2x_get_cnic_info(struct bnx2x *bp)
10331{ 10545{
10332 /* 10546 /*
10333 * iSCSI may be dynamically disabled but reading 10547 * iSCSI may be dynamically disabled but reading
@@ -10338,143 +10552,162 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
10338 bnx2x_get_fcoe_info(bp); 10552 bnx2x_get_fcoe_info(bp);
10339} 10553}
10340 10554
10341static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) 10555static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
10342{ 10556{
10343 u32 val, val2; 10557 u32 val, val2;
10344 int func = BP_ABS_FUNC(bp); 10558 int func = BP_ABS_FUNC(bp);
10345 int port = BP_PORT(bp); 10559 int port = BP_PORT(bp);
10346#ifdef BCM_CNIC
10347 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; 10560 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
10348 u8 *fip_mac = bp->fip_mac; 10561 u8 *fip_mac = bp->fip_mac;
10349#endif
10350 10562
10351 /* Zero primary MAC configuration */ 10563 if (IS_MF(bp)) {
10352 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10564 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
10353
10354 if (BP_NOMCP(bp)) {
10355 BNX2X_ERROR("warning: random MAC workaround active\n");
10356 eth_hw_addr_random(bp->dev);
10357 } else if (IS_MF(bp)) {
10358 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10359 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10360 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10361 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10362 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10363
10364#ifdef BCM_CNIC
10365 /*
10366 * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
10367 * FCoE MAC then the appropriate feature should be disabled. 10565 * FCoE MAC then the appropriate feature should be disabled.
10368 * 10566 * In non SD mode features configuration comes from struct
10369 * In non SD mode features configuration comes from 10567 * func_ext_config.
10370 * struct func_ext_config.
10371 */ 10568 */
10372 if (!IS_MF_SD(bp)) { 10569 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
10373 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); 10570 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
10374 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { 10571 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
10375 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10572 val2 = MF_CFG_RD(bp, func_ext_config[func].
10376 iscsi_mac_addr_upper); 10573 iscsi_mac_addr_upper);
10377 val = MF_CFG_RD(bp, func_ext_config[func]. 10574 val = MF_CFG_RD(bp, func_ext_config[func].
10378 iscsi_mac_addr_lower); 10575 iscsi_mac_addr_lower);
10379 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10576 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10380 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10577 BNX2X_DEV_INFO
10381 iscsi_mac); 10578 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10382 } else 10579 } else {
10383 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; 10580 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10581 }
10384 10582
10385 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { 10583 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
10386 val2 = MF_CFG_RD(bp, func_ext_config[func]. 10584 val2 = MF_CFG_RD(bp, func_ext_config[func].
10387 fcoe_mac_addr_upper); 10585 fcoe_mac_addr_upper);
10388 val = MF_CFG_RD(bp, func_ext_config[func]. 10586 val = MF_CFG_RD(bp, func_ext_config[func].
10389 fcoe_mac_addr_lower); 10587 fcoe_mac_addr_lower);
10390 bnx2x_set_mac_buf(fip_mac, val, val2); 10588 bnx2x_set_mac_buf(fip_mac, val, val2);
10391 BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n", 10589 BNX2X_DEV_INFO
10392 fip_mac); 10590 ("Read FCoE L2 MAC: %pM\n", fip_mac);
10393 10591 } else {
10394 } else
10395 bp->flags |= NO_FCOE_FLAG; 10592 bp->flags |= NO_FCOE_FLAG;
10593 }
10396 10594
10397 bp->mf_ext_config = cfg; 10595 bp->mf_ext_config = cfg;
10398 10596
10399 } else { /* SD MODE */ 10597 } else { /* SD MODE */
10400 if (IS_MF_STORAGE_SD(bp)) { 10598 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
10401 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10599 /* use primary mac as iscsi mac */
10402 /* use primary mac as iscsi mac */ 10600 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
10403 memcpy(iscsi_mac, bp->dev->dev_addr, 10601
10404 ETH_ALEN); 10602 BNX2X_DEV_INFO("SD ISCSI MODE\n");
10405 10603 BNX2X_DEV_INFO
10406 BNX2X_DEV_INFO("SD ISCSI MODE\n"); 10604 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10407 BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n", 10605 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
10408 iscsi_mac); 10606 /* use primary mac as fip mac */
10409 } else { /* FCoE */ 10607 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
10410 memcpy(fip_mac, bp->dev->dev_addr, 10608 BNX2X_DEV_INFO("SD FCoE MODE\n");
10411 ETH_ALEN); 10609 BNX2X_DEV_INFO
10412 BNX2X_DEV_INFO("SD FCoE MODE\n"); 10610 ("Read FIP MAC: %pM\n", fip_mac);
10413 BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
10414 fip_mac);
10415 }
10416 /* Zero primary MAC configuration */
10417 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10418 } 10611 }
10419 } 10612 }
10420 10613
10614 if (IS_MF_STORAGE_SD(bp))
10615 /* Zero primary MAC configuration */
10616 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10617
10421 if (IS_MF_FCOE_AFEX(bp)) 10618 if (IS_MF_FCOE_AFEX(bp))
10422 /* use FIP MAC as primary MAC */ 10619 /* use FIP MAC as primary MAC */
10423 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 10620 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10424 10621
10425#endif
10426 } else { 10622 } else {
10427 /* in SF read MACs from port configuration */
10428 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10429 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10430 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10431
10432#ifdef BCM_CNIC
10433 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10623 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10434 iscsi_mac_upper); 10624 iscsi_mac_upper);
10435 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10625 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10436 iscsi_mac_lower); 10626 iscsi_mac_lower);
10437 bnx2x_set_mac_buf(iscsi_mac, val, val2); 10627 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10438 10628
10439 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10629 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10440 fcoe_fip_mac_upper); 10630 fcoe_fip_mac_upper);
10441 val = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10631 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10442 fcoe_fip_mac_lower); 10632 fcoe_fip_mac_lower);
10443 bnx2x_set_mac_buf(fip_mac, val, val2); 10633 bnx2x_set_mac_buf(fip_mac, val, val2);
10444#endif
10445 } 10634 }
10446 10635
10447 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 10636 /* Disable iSCSI OOO if MAC configuration is invalid. */
10448 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10449
10450#ifdef BCM_CNIC
10451 /* Disable iSCSI if MAC configuration is
10452 * invalid.
10453 */
10454 if (!is_valid_ether_addr(iscsi_mac)) { 10637 if (!is_valid_ether_addr(iscsi_mac)) {
10455 bp->flags |= NO_ISCSI_FLAG; 10638 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10456 memset(iscsi_mac, 0, ETH_ALEN); 10639 memset(iscsi_mac, 0, ETH_ALEN);
10457 } 10640 }
10458 10641
10459 /* Disable FCoE if MAC configuration is 10642 /* Disable FCoE if MAC configuration is invalid. */
10460 * invalid.
10461 */
10462 if (!is_valid_ether_addr(fip_mac)) { 10643 if (!is_valid_ether_addr(fip_mac)) {
10463 bp->flags |= NO_FCOE_FLAG; 10644 bp->flags |= NO_FCOE_FLAG;
10464 memset(bp->fip_mac, 0, ETH_ALEN); 10645 memset(bp->fip_mac, 0, ETH_ALEN);
10465 } 10646 }
10466#endif 10647}
10648
10649static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
10650{
10651 u32 val, val2;
10652 int func = BP_ABS_FUNC(bp);
10653 int port = BP_PORT(bp);
10654
10655 /* Zero primary MAC configuration */
10656 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10657
10658 if (BP_NOMCP(bp)) {
10659 BNX2X_ERROR("warning: random MAC workaround active\n");
10660 eth_hw_addr_random(bp->dev);
10661 } else if (IS_MF(bp)) {
10662 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10663 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10664 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10665 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10666 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10667
10668 if (CNIC_SUPPORT(bp))
10669 bnx2x_get_cnic_mac_hwinfo(bp);
10670 } else {
10671 /* in SF read MACs from port configuration */
10672 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10673 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10674 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10675
10676 if (CNIC_SUPPORT(bp))
10677 bnx2x_get_cnic_mac_hwinfo(bp);
10678 }
10679
10680 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
10681 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10467 10682
10468 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr)) 10683 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
10469 dev_err(&bp->pdev->dev, 10684 dev_err(&bp->pdev->dev,
10470 "bad Ethernet MAC address configuration: %pM\n" 10685 "bad Ethernet MAC address configuration: %pM\n"
10471 "change it manually before bringing up the appropriate network interface\n", 10686 "change it manually before bringing up the appropriate network interface\n",
10472 bp->dev->dev_addr); 10687 bp->dev->dev_addr);
10688}
10473 10689
10690static bool bnx2x_get_dropless_info(struct bnx2x *bp)
10691{
10692 int tmp;
10693 u32 cfg;
10474 10694
10695 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
10696 /* Take function: tmp = func */
10697 tmp = BP_ABS_FUNC(bp);
10698 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
10699 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
10700 } else {
10701 /* Take port: tmp = port */
10702 tmp = BP_PORT(bp);
10703 cfg = SHMEM_RD(bp,
10704 dev_info.port_hw_config[tmp].generic_features);
10705 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
10706 }
10707 return cfg;
10475} 10708}
10476 10709
10477static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 10710static int bnx2x_get_hwinfo(struct bnx2x *bp)
10478{ 10711{
10479 int /*abs*/func = BP_ABS_FUNC(bp); 10712 int /*abs*/func = BP_ABS_FUNC(bp);
10480 int vn; 10713 int vn;
@@ -10516,6 +10749,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10516 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) { 10749 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
10517 dev_err(&bp->pdev->dev, 10750 dev_err(&bp->pdev->dev,
10518 "FORCING Normal Mode failed!!!\n"); 10751 "FORCING Normal Mode failed!!!\n");
10752 bnx2x_release_hw_lock(bp,
10753 HW_LOCK_RESOURCE_RESET);
10519 return -EPERM; 10754 return -EPERM;
10520 } 10755 }
10521 } 10756 }
@@ -10526,9 +10761,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10526 } else 10761 } else
10527 BNX2X_DEV_INFO("IGU Normal Mode\n"); 10762 BNX2X_DEV_INFO("IGU Normal Mode\n");
10528 10763
10529 bnx2x_get_igu_cam_info(bp); 10764 rc = bnx2x_get_igu_cam_info(bp);
10530
10531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET); 10765 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
10766 if (rc)
10767 return rc;
10532 } 10768 }
10533 10769
10534 /* 10770 /*
@@ -10697,7 +10933,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10697 return rc; 10933 return rc;
10698} 10934}
10699 10935
10700static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp) 10936static void bnx2x_read_fwinfo(struct bnx2x *bp)
10701{ 10937{
10702 int cnt, i, block_end, rodi; 10938 int cnt, i, block_end, rodi;
10703 char vpd_start[BNX2X_VPD_LEN+1]; 10939 char vpd_start[BNX2X_VPD_LEN+1];
@@ -10782,7 +11018,7 @@ out_not_found:
10782 return; 11018 return;
10783} 11019}
10784 11020
10785static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp) 11021static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
10786{ 11022{
10787 u32 flags = 0; 11023 u32 flags = 0;
10788 11024
@@ -10832,7 +11068,7 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
10832 INIT_MODE_FLAGS(bp) = flags; 11068 INIT_MODE_FLAGS(bp) = flags;
10833} 11069}
10834 11070
10835static int __devinit bnx2x_init_bp(struct bnx2x *bp) 11071static int bnx2x_init_bp(struct bnx2x *bp)
10836{ 11072{
10837 int func; 11073 int func;
10838 int rc; 11074 int rc;
@@ -10840,9 +11076,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10840 mutex_init(&bp->port.phy_mutex); 11076 mutex_init(&bp->port.phy_mutex);
10841 mutex_init(&bp->fw_mb_mutex); 11077 mutex_init(&bp->fw_mb_mutex);
10842 spin_lock_init(&bp->stats_lock); 11078 spin_lock_init(&bp->stats_lock);
10843#ifdef BCM_CNIC 11079
10844 mutex_init(&bp->cnic_mutex);
10845#endif
10846 11080
10847 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 11081 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
10848 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); 11082 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -10880,10 +11114,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10880 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 11114 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
10881 11115
10882 bp->disable_tpa = disable_tpa; 11116 bp->disable_tpa = disable_tpa;
10883
10884#ifdef BCM_CNIC
10885 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11117 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
10886#endif
10887 11118
10888 /* Set TPA flags */ 11119 /* Set TPA flags */
10889 if (bp->disable_tpa) { 11120 if (bp->disable_tpa) {
@@ -10897,7 +11128,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10897 if (CHIP_IS_E1(bp)) 11128 if (CHIP_IS_E1(bp))
10898 bp->dropless_fc = 0; 11129 bp->dropless_fc = 0;
10899 else 11130 else
10900 bp->dropless_fc = dropless_fc; 11131 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
10901 11132
10902 bp->mrrs = mrrs; 11133 bp->mrrs = mrrs;
10903 11134
@@ -10914,15 +11145,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10914 bp->timer.data = (unsigned long) bp; 11145 bp->timer.data = (unsigned long) bp;
10915 bp->timer.function = bnx2x_timer; 11146 bp->timer.function = bnx2x_timer;
10916 11147
10917 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); 11148 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
10918 bnx2x_dcbx_init_params(bp); 11149 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
11150 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
11151 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
11152 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11153 bnx2x_dcbx_init_params(bp);
11154 } else {
11155 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
11156 }
10919 11157
10920#ifdef BCM_CNIC
10921 if (CHIP_IS_E1x(bp)) 11158 if (CHIP_IS_E1x(bp))
10922 bp->cnic_base_cl_id = FP_SB_MAX_E1x; 11159 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
10923 else 11160 else
10924 bp->cnic_base_cl_id = FP_SB_MAX_E2; 11161 bp->cnic_base_cl_id = FP_SB_MAX_E2;
10925#endif
10926 11162
10927 /* multiple tx priority */ 11163 /* multiple tx priority */
10928 if (CHIP_IS_E1x(bp)) 11164 if (CHIP_IS_E1x(bp))
@@ -10932,6 +11168,16 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10932 if (CHIP_IS_E3B0(bp)) 11168 if (CHIP_IS_E3B0(bp))
10933 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 11169 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
10934 11170
11171 /* We need at least one default status block for slow-path events,
11172 * second status block for the L2 queue, and a third status block for
11173 * CNIC if supproted.
11174 */
11175 if (CNIC_SUPPORT(bp))
11176 bp->min_msix_vec_cnt = 3;
11177 else
11178 bp->min_msix_vec_cnt = 2;
11179 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11180
10935 return rc; 11181 return rc;
10936} 11182}
10937 11183
@@ -11168,11 +11414,9 @@ void bnx2x_set_rx_mode(struct net_device *dev)
11168 } 11414 }
11169 11415
11170 bp->rx_mode = rx_mode; 11416 bp->rx_mode = rx_mode;
11171#ifdef BCM_CNIC
11172 /* handle ISCSI SD mode */ 11417 /* handle ISCSI SD mode */
11173 if (IS_MF_ISCSI_SD(bp)) 11418 if (IS_MF_ISCSI_SD(bp))
11174 bp->rx_mode = BNX2X_RX_MODE_NONE; 11419 bp->rx_mode = BNX2X_RX_MODE_NONE;
11175#endif
11176 11420
11177 /* Schedule the rx_mode command */ 11421 /* Schedule the rx_mode command */
11178 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) { 11422 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -11284,7 +11528,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11284#endif 11528#endif
11285 .ndo_setup_tc = bnx2x_setup_tc, 11529 .ndo_setup_tc = bnx2x_setup_tc,
11286 11530
11287#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC) 11531#ifdef NETDEV_FCOE_WWNN
11288 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 11532 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11289#endif 11533#endif
11290}; 11534};
@@ -11307,9 +11551,8 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
11307 return 0; 11551 return 0;
11308} 11552}
11309 11553
11310static int __devinit bnx2x_init_dev(struct pci_dev *pdev, 11554static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
11311 struct net_device *dev, 11555 unsigned long board_type)
11312 unsigned long board_type)
11313{ 11556{
11314 struct bnx2x *bp; 11557 struct bnx2x *bp;
11315 int rc; 11558 int rc;
@@ -11346,6 +11589,14 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11346 goto err_out_disable; 11589 goto err_out_disable;
11347 } 11590 }
11348 11591
11592 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
11593 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
11594 PCICFG_REVESION_ID_ERROR_VAL) {
11595 pr_err("PCI device error, probably due to fan failure, aborting\n");
11596 rc = -ENODEV;
11597 goto err_out_disable;
11598 }
11599
11349 if (atomic_read(&pdev->enable_cnt) == 1) { 11600 if (atomic_read(&pdev->enable_cnt) == 1) {
11350 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 11601 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11351 if (rc) { 11602 if (rc) {
@@ -11481,8 +11732,7 @@ err_out:
11481 return rc; 11732 return rc;
11482} 11733}
11483 11734
11484static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp, 11735static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
11485 int *width, int *speed)
11486{ 11736{
11487 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL); 11737 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11488 11738
@@ -11750,9 +12000,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11750{ 12000{
11751 int cid_count = BNX2X_L2_MAX_CID(bp); 12001 int cid_count = BNX2X_L2_MAX_CID(bp);
11752 12002
11753#ifdef BCM_CNIC 12003 if (CNIC_SUPPORT(bp))
11754 cid_count += CNIC_CID_MAX; 12004 cid_count += CNIC_CID_MAX;
11755#endif
11756 return roundup(cid_count, QM_CID_ROUND); 12005 return roundup(cid_count, QM_CID_ROUND);
11757} 12006}
11758 12007
@@ -11762,7 +12011,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11762 * @dev: pci device 12011 * @dev: pci device
11763 * 12012 *
11764 */ 12013 */
11765static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 12014static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
12015 int cnic_cnt)
11766{ 12016{
11767 int pos; 12017 int pos;
11768 u16 control; 12018 u16 control;
@@ -11774,7 +12024,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
11774 * one fast path queue: one FP queue + SB for CNIC 12024 * one fast path queue: one FP queue + SB for CNIC
11775 */ 12025 */
11776 if (!pos) 12026 if (!pos)
11777 return 1 + CNIC_PRESENT; 12027 return 1 + cnic_cnt;
11778 12028
11779 /* 12029 /*
11780 * The value in the PCI configuration space is the index of the last 12030 * The value in the PCI configuration space is the index of the last
@@ -11786,14 +12036,16 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
11786 return control & PCI_MSIX_FLAGS_QSIZE; 12036 return control & PCI_MSIX_FLAGS_QSIZE;
11787} 12037}
11788 12038
11789static int __devinit bnx2x_init_one(struct pci_dev *pdev, 12039struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
11790 const struct pci_device_id *ent) 12040
12041static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11791{ 12042{
11792 struct net_device *dev = NULL; 12043 struct net_device *dev = NULL;
11793 struct bnx2x *bp; 12044 struct bnx2x *bp;
11794 int pcie_width, pcie_speed; 12045 int pcie_width, pcie_speed;
11795 int rc, max_non_def_sbs; 12046 int rc, max_non_def_sbs;
11796 int rx_count, tx_count, rss_count, doorbell_size; 12047 int rx_count, tx_count, rss_count, doorbell_size;
12048 int cnic_cnt;
11797 /* 12049 /*
11798 * An estimated maximum supported CoS number according to the chip 12050 * An estimated maximum supported CoS number according to the chip
11799 * version. 12051 * version.
@@ -11837,21 +12089,22 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11837 return -ENODEV; 12089 return -ENODEV;
11838 } 12090 }
11839 12091
11840 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); 12092 cnic_cnt = 1;
12093 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
11841 12094
11842 WARN_ON(!max_non_def_sbs); 12095 WARN_ON(!max_non_def_sbs);
11843 12096
11844 /* Maximum number of RSS queues: one IGU SB goes to CNIC */ 12097 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
11845 rss_count = max_non_def_sbs - CNIC_PRESENT; 12098 rss_count = max_non_def_sbs - cnic_cnt;
11846 12099
11847 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */ 12100 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
11848 rx_count = rss_count + FCOE_PRESENT; 12101 rx_count = rss_count + cnic_cnt;
11849 12102
11850 /* 12103 /*
11851 * Maximum number of netdev Tx queues: 12104 * Maximum number of netdev Tx queues:
11852 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 12105 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
11853 */ 12106 */
11854 tx_count = rss_count * max_cos_est + FCOE_PRESENT; 12107 tx_count = rss_count * max_cos_est + cnic_cnt;
11855 12108
11856 /* dev zeroed in init_etherdev */ 12109 /* dev zeroed in init_etherdev */
11857 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); 12110 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11862,6 +12115,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11862 12115
11863 bp->igu_sb_cnt = max_non_def_sbs; 12116 bp->igu_sb_cnt = max_non_def_sbs;
11864 bp->msg_enable = debug; 12117 bp->msg_enable = debug;
12118 bp->cnic_support = cnic_cnt;
12119 bp->cnic_probe = bnx2x_cnic_probe;
12120
11865 pci_set_drvdata(pdev, dev); 12121 pci_set_drvdata(pdev, dev);
11866 12122
11867 rc = bnx2x_init_dev(pdev, dev, ent->driver_data); 12123 rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
@@ -11870,6 +12126,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11870 return rc; 12126 return rc;
11871 } 12127 }
11872 12128
12129 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
11873 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); 12130 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
11874 12131
11875 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", 12132 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
@@ -11902,10 +12159,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11902 /* calc qm_cid_count */ 12159 /* calc qm_cid_count */
11903 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); 12160 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
11904 12161
11905#ifdef BCM_CNIC 12162 /* disable FCOE L2 queue for E1x*/
11906 /* disable FCOE L2 queue for E1x */
11907 if (CHIP_IS_E1x(bp)) 12163 if (CHIP_IS_E1x(bp))
11908 bp->flags |= NO_FCOE_FLAG; 12164 bp->flags |= NO_FCOE_FLAG;
12165
11909 /* disable FCOE for 57840 device, until FW supports it */ 12166 /* disable FCOE for 57840 device, until FW supports it */
11910 switch (ent->driver_data) { 12167 switch (ent->driver_data) {
11911 case BCM57840_O: 12168 case BCM57840_O:
@@ -11915,8 +12172,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11915 case BCM57840_MF: 12172 case BCM57840_MF:
11916 bp->flags |= NO_FCOE_FLAG; 12173 bp->flags |= NO_FCOE_FLAG;
11917 } 12174 }
11918#endif
11919
11920 12175
11921 /* Set bp->num_queues for MSI-X mode*/ 12176 /* Set bp->num_queues for MSI-X mode*/
11922 bnx2x_set_num_queues(bp); 12177 bnx2x_set_num_queues(bp);
@@ -11932,14 +12187,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11932 goto init_one_exit; 12187 goto init_one_exit;
11933 } 12188 }
11934 12189
11935#ifdef BCM_CNIC 12190
11936 if (!NO_FCOE(bp)) { 12191 if (!NO_FCOE(bp)) {
11937 /* Add storage MAC address */ 12192 /* Add storage MAC address */
11938 rtnl_lock(); 12193 rtnl_lock();
11939 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12194 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
11940 rtnl_unlock(); 12195 rtnl_unlock();
11941 } 12196 }
11942#endif
11943 12197
11944 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 12198 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
11945 12199
@@ -11973,7 +12227,7 @@ init_one_exit:
11973 return rc; 12227 return rc;
11974} 12228}
11975 12229
11976static void __devexit bnx2x_remove_one(struct pci_dev *pdev) 12230static void bnx2x_remove_one(struct pci_dev *pdev)
11977{ 12231{
11978 struct net_device *dev = pci_get_drvdata(pdev); 12232 struct net_device *dev = pci_get_drvdata(pdev);
11979 struct bnx2x *bp; 12233 struct bnx2x *bp;
@@ -11984,14 +12238,12 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11984 } 12238 }
11985 bp = netdev_priv(dev); 12239 bp = netdev_priv(dev);
11986 12240
11987#ifdef BCM_CNIC
11988 /* Delete storage MAC address */ 12241 /* Delete storage MAC address */
11989 if (!NO_FCOE(bp)) { 12242 if (!NO_FCOE(bp)) {
11990 rtnl_lock(); 12243 rtnl_lock();
11991 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN); 12244 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
11992 rtnl_unlock(); 12245 rtnl_unlock();
11993 } 12246 }
11994#endif
11995 12247
11996#ifdef BCM_DCBNL 12248#ifdef BCM_DCBNL
11997 /* Delete app tlvs from dcbnl */ 12249 /* Delete app tlvs from dcbnl */
@@ -12039,15 +12291,17 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12039 12291
12040 bp->rx_mode = BNX2X_RX_MODE_NONE; 12292 bp->rx_mode = BNX2X_RX_MODE_NONE;
12041 12293
12042#ifdef BCM_CNIC 12294 if (CNIC_LOADED(bp))
12043 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); 12295 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12044#endif 12296
12045 /* Stop Tx */ 12297 /* Stop Tx */
12046 bnx2x_tx_disable(bp); 12298 bnx2x_tx_disable(bp);
12047 12299
12048 bnx2x_netif_stop(bp, 0); 12300 bnx2x_netif_stop(bp, 0);
12049 /* Delete all NAPI objects */ 12301 /* Delete all NAPI objects */
12050 bnx2x_del_all_napi(bp); 12302 bnx2x_del_all_napi(bp);
12303 if (CNIC_LOADED(bp))
12304 bnx2x_del_all_napi_cnic(bp);
12051 12305
12052 del_timer_sync(&bp->timer); 12306 del_timer_sync(&bp->timer);
12053 12307
@@ -12188,7 +12442,7 @@ static struct pci_driver bnx2x_pci_driver = {
12188 .name = DRV_MODULE_NAME, 12442 .name = DRV_MODULE_NAME,
12189 .id_table = bnx2x_pci_tbl, 12443 .id_table = bnx2x_pci_tbl,
12190 .probe = bnx2x_init_one, 12444 .probe = bnx2x_init_one,
12191 .remove = __devexit_p(bnx2x_remove_one), 12445 .remove = bnx2x_remove_one,
12192 .suspend = bnx2x_suspend, 12446 .suspend = bnx2x_suspend,
12193 .resume = bnx2x_resume, 12447 .resume = bnx2x_resume,
12194 .err_handler = &bnx2x_err_handler, 12448 .err_handler = &bnx2x_err_handler,
@@ -12238,7 +12492,6 @@ void bnx2x_notify_link_changed(struct bnx2x *bp)
12238module_init(bnx2x_init); 12492module_init(bnx2x_init);
12239module_exit(bnx2x_cleanup); 12493module_exit(bnx2x_cleanup);
12240 12494
12241#ifdef BCM_CNIC
12242/** 12495/**
12243 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s). 12496 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
12244 * 12497 *
@@ -12691,12 +12944,31 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12691{ 12944{
12692 struct bnx2x *bp = netdev_priv(dev); 12945 struct bnx2x *bp = netdev_priv(dev);
12693 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 12946 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12947 int rc;
12948
12949 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
12694 12950
12695 if (ops == NULL) { 12951 if (ops == NULL) {
12696 BNX2X_ERR("NULL ops received\n"); 12952 BNX2X_ERR("NULL ops received\n");
12697 return -EINVAL; 12953 return -EINVAL;
12698 } 12954 }
12699 12955
12956 if (!CNIC_SUPPORT(bp)) {
12957 BNX2X_ERR("Can't register CNIC when not supported\n");
12958 return -EOPNOTSUPP;
12959 }
12960
12961 if (!CNIC_LOADED(bp)) {
12962 rc = bnx2x_load_cnic(bp);
12963 if (rc) {
12964 BNX2X_ERR("CNIC-related load failed\n");
12965 return rc;
12966 }
12967
12968 }
12969
12970 bp->cnic_enabled = true;
12971
12700 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL); 12972 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12701 if (!bp->cnic_kwq) 12973 if (!bp->cnic_kwq)
12702 return -ENOMEM; 12974 return -ENOMEM;
@@ -12786,7 +13058,5 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12786 cp->starting_cid); 13058 cp->starting_cid);
12787 return cp; 13059 return cp;
12788} 13060}
12789EXPORT_SYMBOL(bnx2x_cnic_probe);
12790 13061
12791#endif /* BCM_CNIC */
12792 13062
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 1b1999d34c71..bc2f65b32649 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2107,6 +2107,7 @@
2107#define NIG_REG_LLH1_ERROR_MASK 0x10090 2107#define NIG_REG_LLH1_ERROR_MASK 0x10090
2108/* [RW 8] event id for llh1 */ 2108/* [RW 8] event id for llh1 */
2109#define NIG_REG_LLH1_EVENT_ID 0x10088 2109#define NIG_REG_LLH1_EVENT_ID 0x10088
2110#define NIG_REG_LLH1_FUNC_EN 0x16104
2110#define NIG_REG_LLH1_FUNC_MEM 0x161c0 2111#define NIG_REG_LLH1_FUNC_MEM 0x161c0
2111#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160 2112#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
2112#define NIG_REG_LLH1_FUNC_MEM_SIZE 16 2113#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
@@ -2302,6 +2303,15 @@
2302 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to 2303 * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
2303 * accommodate the 9 input clients to ETS arbiter. */ 2304 * accommodate the 9 input clients to ETS arbiter. */
2304#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684 2305#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
2306/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
2307 * packets to BRB LB interface to forward the packet to the host. All
2308 * packets from MCP are forwarded to the network when this bit is cleared -
2309 * regardless of the configured destination in tx_mng_destination register.
2310 * When MCP-to-host paths for both ports 0 and 1 are disabled - the arbiter
2311 * for BRB LB interface is bypassed and PBF LB traffic is always selected to
2312 * send to BRB LB.
2313 */
2314#define NIG_REG_P0_TX_MNG_HOST_ENABLE 0x182f4
2305#define NIG_REG_P1_HWPFC_ENABLE 0x181d0 2315#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
2306#define NIG_REG_P1_MAC_IN_EN 0x185c0 2316#define NIG_REG_P1_MAC_IN_EN 0x185c0
2307/* [RW 1] Output enable for TX MAC interface */ 2317/* [RW 1] Output enable for TX MAC interface */
@@ -2418,6 +2428,12 @@
2418#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4 2428#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB 0x186e4
2419/* [R 1] TX FIFO for transmitting data to MAC is empty. */ 2429/* [R 1] TX FIFO for transmitting data to MAC is empty. */
2420#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594 2430#define NIG_REG_P1_TX_MACFIFO_EMPTY 0x18594
2431/* [RW 1] MCP-to-host path enable. Set this bit to enable the routing of MCP
2432 * packets to BRB LB interface to forward the packet to the host. All
2433 * packets from MCP are forwarded to the network when this bit is cleared -
2434 * regardless of the configured destination in tx_mng_destination register.
2435 */
2436#define NIG_REG_P1_TX_MNG_HOST_ENABLE 0x182f8
2421/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets 2437/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
2422 forwarded to the host. */ 2438 forwarded to the host. */
2423#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8 2439#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY 0x182b8
@@ -5482,6 +5498,7 @@
5482#define XMAC_CTRL_REG_RX_EN (0x1<<1) 5498#define XMAC_CTRL_REG_RX_EN (0x1<<1)
5483#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6) 5499#define XMAC_CTRL_REG_SOFT_RESET (0x1<<6)
5484#define XMAC_CTRL_REG_TX_EN (0x1<<0) 5500#define XMAC_CTRL_REG_TX_EN (0x1<<0)
5501#define XMAC_CTRL_REG_XLGMII_ALIGN_ENB (0x1<<7)
5485#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18) 5502#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN (0x1<<18)
5486#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17) 5503#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN (0x1<<17)
5487#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1) 5504#define XMAC_PFC_CTRL_HI_REG_FORCE_PFC_XON (0x1<<1)
@@ -5502,11 +5519,14 @@
5502#define XMAC_REG_PAUSE_CTRL 0x68 5519#define XMAC_REG_PAUSE_CTRL 0x68
5503#define XMAC_REG_PFC_CTRL 0x70 5520#define XMAC_REG_PFC_CTRL 0x70
5504#define XMAC_REG_PFC_CTRL_HI 0x74 5521#define XMAC_REG_PFC_CTRL_HI 0x74
5522#define XMAC_REG_RX_LSS_CTRL 0x50
5505#define XMAC_REG_RX_LSS_STATUS 0x58 5523#define XMAC_REG_RX_LSS_STATUS 0x58
5506/* [RW 14] Maximum packet size in receive direction; exclusive of preamble & 5524/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
5507 * CRC in strip mode */ 5525 * CRC in strip mode */
5508#define XMAC_REG_RX_MAX_SIZE 0x40 5526#define XMAC_REG_RX_MAX_SIZE 0x40
5509#define XMAC_REG_TX_CTRL 0x20 5527#define XMAC_REG_TX_CTRL 0x20
5528#define XMAC_RX_LSS_CTRL_REG_LOCAL_FAULT_DISABLE (0x1<<0)
5529#define XMAC_RX_LSS_CTRL_REG_REMOTE_FAULT_DISABLE (0x1<<1)
5510/* [RW 16] Indirect access to the XX table of the XX protection mechanism. 5530/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
5511 The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] - 5531 The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
5512 header pointer. */ 5532 header pointer. */
@@ -5922,6 +5942,16 @@
5922#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1 5942#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
5923#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 5943#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
5924#define MISC_REGISTERS_SPIO_SET_POS 8 5944#define MISC_REGISTERS_SPIO_SET_POS 8
5945#define MISC_SPIO_CLR_POS 16
5946#define MISC_SPIO_FLOAT (0xffL<<24)
5947#define MISC_SPIO_FLOAT_POS 24
5948#define MISC_SPIO_INPUT_HI_Z 2
5949#define MISC_SPIO_INT_OLD_SET_POS 16
5950#define MISC_SPIO_OUTPUT_HIGH 1
5951#define MISC_SPIO_OUTPUT_LOW 0
5952#define MISC_SPIO_SET_POS 8
5953#define MISC_SPIO_SPIO4 0x10
5954#define MISC_SPIO_SPIO5 0x20
5925#define HW_LOCK_MAX_RESOURCE_VALUE 31 5955#define HW_LOCK_MAX_RESOURCE_VALUE 31
5926#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13 5956#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
5927#define HW_LOCK_RESOURCE_DRV_FLAGS 10 5957#define HW_LOCK_RESOURCE_DRV_FLAGS 10
@@ -6130,7 +6160,9 @@
6130#define PCICFG_COMMAND_INT_DISABLE (1<<10) 6160#define PCICFG_COMMAND_INT_DISABLE (1<<10)
6131#define PCICFG_COMMAND_RESERVED (0x1f<<11) 6161#define PCICFG_COMMAND_RESERVED (0x1f<<11)
6132#define PCICFG_STATUS_OFFSET 0x06 6162#define PCICFG_STATUS_OFFSET 0x06
6133#define PCICFG_REVESION_ID_OFFSET 0x08 6163#define PCICFG_REVISION_ID_OFFSET 0x08
6164#define PCICFG_REVESION_ID_MASK 0xff
6165#define PCICFG_REVESION_ID_ERROR_VAL 0xff
6134#define PCICFG_CACHE_LINE_SIZE 0x0c 6166#define PCICFG_CACHE_LINE_SIZE 0x0c
6135#define PCICFG_LATENCY_TIMER 0x0d 6167#define PCICFG_LATENCY_TIMER 0x0d
6136#define PCICFG_BAR_1_LOW 0x10 6168#define PCICFG_BAR_1_LOW 0x10
@@ -6672,6 +6704,7 @@
6672#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00 6704#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI 0x1B00
6673#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00 6705#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS 0x1E00
6674#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00 6706#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI 0x1F00
6707#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_KR2 0x3900
6675 6708
6676 6709
6677#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 6710#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
@@ -7046,7 +7079,8 @@ Theotherbitsarereservedandshouldbezero*/
7046#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12 7079#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT2 0x12
7047#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000 7080#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_ABILITY 0x4000
7048#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000 7081#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADV2_FEC_REQ 0x8000
7049#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150 0x96 7082#define MDIO_WC_REG_PCS_STATUS2 0x0021
7083#define MDIO_WC_REG_PMD_KR_CONTROL 0x0096
7050#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000 7084#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL 0x8000
7051#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e 7085#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1 0x800e
7052#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010 7086#define MDIO_WC_REG_XGXSBLK1_DESKEW 0x8010
@@ -7078,6 +7112,7 @@ Theotherbitsarereservedandshouldbezero*/
7078#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130 7112#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
7079#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131 7113#define MDIO_WC_REG_PAR_DET_10G_CTRL 0x8131
7080#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141 7114#define MDIO_WC_REG_XGXS_X2_CONTROL2 0x8141
7115#define MDIO_WC_REG_XGXS_X2_CONTROL3 0x8142
7081#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B 7116#define MDIO_WC_REG_XGXS_RX_LN_SWAP1 0x816B
7082#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169 7117#define MDIO_WC_REG_XGXS_TX_LN_SWAP1 0x8169
7083#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0 7118#define MDIO_WC_REG_GP2_STATUS_GP_2_0 0x81d0
@@ -7112,6 +7147,7 @@ Theotherbitsarereservedandshouldbezero*/
7112#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a 7147#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET 0x0a
7113#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00 7148#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK 0x7c00
7114#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000 7149#define MDIO_WC_REG_TX_FIR_TAP_ENABLE 0x8000
7150#define MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP 0x82e2
7115#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3 7151#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL 0x82e3
7116#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6 7152#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL 0x82e6
7117#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7 7153#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL 0x82e7
@@ -7129,9 +7165,16 @@ Theotherbitsarereservedandshouldbezero*/
7129#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e 7165#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
7130#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345 7166#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
7131#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349 7167#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
7168#define MDIO_WC_REG_DIGITAL5_LINK_STATUS 0x834d
7132#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e 7169#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
7133#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350 7170#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
7134#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368 7171#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
7172#define MDIO_WC_REG_CL73_USERB0_CTRL 0x8370
7173#define MDIO_WC_REG_CL73_USERB0_USTAT 0x8371
7174#define MDIO_WC_REG_CL73_BAM_CTRL1 0x8372
7175#define MDIO_WC_REG_CL73_BAM_CTRL2 0x8373
7176#define MDIO_WC_REG_CL73_BAM_CTRL3 0x8374
7177#define MDIO_WC_REG_CL73_BAM_CODE_FIELD 0x837b
7135#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390 7178#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
7136#define MDIO_WC_REG_TX66_CONTROL 0x83b0 7179#define MDIO_WC_REG_TX66_CONTROL 0x83b0
7137#define MDIO_WC_REG_RX66_CONTROL 0x83c0 7180#define MDIO_WC_REG_RX66_CONTROL 0x83c0
@@ -7145,7 +7188,17 @@ Theotherbitsarereservedandshouldbezero*/
7145#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9 7188#define MDIO_WC_REG_RX66_SCW3_MASK 0x83c9
7146#define MDIO_WC_REG_FX100_CTRL1 0x8400 7189#define MDIO_WC_REG_FX100_CTRL1 0x8400
7147#define MDIO_WC_REG_FX100_CTRL3 0x8402 7190#define MDIO_WC_REG_FX100_CTRL3 0x8402
7148 7191#define MDIO_WC_REG_CL82_USERB1_TX_CTRL5 0x8436
7192#define MDIO_WC_REG_CL82_USERB1_TX_CTRL6 0x8437
7193#define MDIO_WC_REG_CL82_USERB1_TX_CTRL7 0x8438
7194#define MDIO_WC_REG_CL82_USERB1_TX_CTRL9 0x8439
7195#define MDIO_WC_REG_CL82_USERB1_RX_CTRL10 0x843a
7196#define MDIO_WC_REG_CL82_USERB1_RX_CTRL11 0x843b
7197#define MDIO_WC_REG_ETA_CL73_OUI1 0x8453
7198#define MDIO_WC_REG_ETA_CL73_OUI2 0x8454
7199#define MDIO_WC_REG_ETA_CL73_OUI3 0x8455
7200#define MDIO_WC_REG_ETA_CL73_LD_BAM_CODE 0x8456
7201#define MDIO_WC_REG_ETA_CL73_LD_UD_CODE 0x8457
7149#define MDIO_WC_REG_MICROBLK_CMD 0xffc2 7202#define MDIO_WC_REG_MICROBLK_CMD 0xffc2
7150#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5 7203#define MDIO_WC_REG_MICROBLK_DL_STATUS 0xffc5
7151#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc 7204#define MDIO_WC_REG_MICROBLK_CMD3 0xffcc
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 614981c02264..b8b4b749daab 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5350,12 +5350,24 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5350 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) && 5350 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5351 (!test_bit(BNX2X_F_CMD_STOP, &o->pending))) 5351 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5352 next_state = BNX2X_F_STATE_STARTED; 5352 next_state = BNX2X_F_STATE_STARTED;
5353
5354 /* Switch_update ramrod can be sent in either started or
5355 * tx_stopped state, and it doesn't change the state.
5356 */
5357 else if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5358 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5359 next_state = BNX2X_F_STATE_STARTED;
5360
5353 else if (cmd == BNX2X_F_CMD_TX_STOP) 5361 else if (cmd == BNX2X_F_CMD_TX_STOP)
5354 next_state = BNX2X_F_STATE_TX_STOPPED; 5362 next_state = BNX2X_F_STATE_TX_STOPPED;
5355 5363
5356 break; 5364 break;
5357 case BNX2X_F_STATE_TX_STOPPED: 5365 case BNX2X_F_STATE_TX_STOPPED:
5358 if (cmd == BNX2X_F_CMD_TX_START) 5366 if ((cmd == BNX2X_F_CMD_SWITCH_UPDATE) &&
5367 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5368 next_state = BNX2X_F_STATE_TX_STOPPED;
5369
5370 else if (cmd == BNX2X_F_CMD_TX_START)
5359 next_state = BNX2X_F_STATE_STARTED; 5371 next_state = BNX2X_F_STATE_STARTED;
5360 5372
5361 break; 5373 break;
@@ -5637,6 +5649,28 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5637 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5649 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5638} 5650}
5639 5651
5652static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5653 struct bnx2x_func_state_params *params)
5654{
5655 struct bnx2x_func_sp_obj *o = params->f_obj;
5656 struct function_update_data *rdata =
5657 (struct function_update_data *)o->rdata;
5658 dma_addr_t data_mapping = o->rdata_mapping;
5659 struct bnx2x_func_switch_update_params *switch_update_params =
5660 &params->params.switch_update;
5661
5662 memset(rdata, 0, sizeof(*rdata));
5663
5664 /* Fill the ramrod data with provided parameters */
5665 rdata->tx_switch_suspend_change_flg = 1;
5666 rdata->tx_switch_suspend = switch_update_params->suspend;
5667 rdata->echo = SWITCH_UPDATE;
5668
5669 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5670 U64_HI(data_mapping),
5671 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5672}
5673
5640static inline int bnx2x_func_send_afex_update(struct bnx2x *bp, 5674static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5641 struct bnx2x_func_state_params *params) 5675 struct bnx2x_func_state_params *params)
5642{ 5676{
@@ -5657,6 +5691,7 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5657 cpu_to_le16(afex_update_params->afex_default_vlan); 5691 cpu_to_le16(afex_update_params->afex_default_vlan);
5658 rdata->allowed_priorities_change_flg = 1; 5692 rdata->allowed_priorities_change_flg = 1;
5659 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5693 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5694 rdata->echo = AFEX_UPDATE;
5660 5695
5661 /* No need for an explicit memory barrier here as long we would 5696 /* No need for an explicit memory barrier here as long we would
5662 * need to ensure the ordering of writing to the SPQ element 5697 * need to ensure the ordering of writing to the SPQ element
@@ -5773,6 +5808,8 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5773 return bnx2x_func_send_tx_stop(bp, params); 5808 return bnx2x_func_send_tx_stop(bp, params);
5774 case BNX2X_F_CMD_TX_START: 5809 case BNX2X_F_CMD_TX_START:
5775 return bnx2x_func_send_tx_start(bp, params); 5810 return bnx2x_func_send_tx_start(bp, params);
5811 case BNX2X_F_CMD_SWITCH_UPDATE:
5812 return bnx2x_func_send_switch_update(bp, params);
5776 default: 5813 default:
5777 BNX2X_ERR("Unknown command: %d\n", params->cmd); 5814 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5778 return -EINVAL; 5815 return -EINVAL;
@@ -5818,16 +5855,30 @@ int bnx2x_func_state_change(struct bnx2x *bp,
5818 struct bnx2x_func_state_params *params) 5855 struct bnx2x_func_state_params *params)
5819{ 5856{
5820 struct bnx2x_func_sp_obj *o = params->f_obj; 5857 struct bnx2x_func_sp_obj *o = params->f_obj;
5821 int rc; 5858 int rc, cnt = 300;
5822 enum bnx2x_func_cmd cmd = params->cmd; 5859 enum bnx2x_func_cmd cmd = params->cmd;
5823 unsigned long *pending = &o->pending; 5860 unsigned long *pending = &o->pending;
5824 5861
5825 mutex_lock(&o->one_pending_mutex); 5862 mutex_lock(&o->one_pending_mutex);
5826 5863
5827 /* Check that the requested transition is legal */ 5864 /* Check that the requested transition is legal */
5828 if (o->check_transition(bp, o, params)) { 5865 rc = o->check_transition(bp, o, params);
5866 if ((rc == -EBUSY) &&
5867 (test_bit(RAMROD_RETRY, &params->ramrod_flags))) {
5868 while ((rc == -EBUSY) && (--cnt > 0)) {
5869 mutex_unlock(&o->one_pending_mutex);
5870 msleep(10);
5871 mutex_lock(&o->one_pending_mutex);
5872 rc = o->check_transition(bp, o, params);
5873 }
5874 if (rc == -EBUSY) {
5875 mutex_unlock(&o->one_pending_mutex);
5876 BNX2X_ERR("timeout waiting for previous ramrod completion\n");
5877 return rc;
5878 }
5879 } else if (rc) {
5829 mutex_unlock(&o->one_pending_mutex); 5880 mutex_unlock(&o->one_pending_mutex);
5830 return -EINVAL; 5881 return rc;
5831 } 5882 }
5832 5883
5833 /* Set "pending" bit */ 5884 /* Set "pending" bit */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index acf2fe4ca608..adbd91b1bdfc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -40,6 +40,12 @@ enum {
40 * pending commands list. 40 * pending commands list.
41 */ 41 */
42 RAMROD_CONT, 42 RAMROD_CONT,
43 /* If there is another pending ramrod, wait until it finishes and
44 * re-try to submit this one. This flag can be set only in sleepable
45 * context, and should not be set from the context that completes the
46 * ramrods as deadlock will occur.
47 */
48 RAMROD_RETRY,
43}; 49};
44 50
45typedef enum { 51typedef enum {
@@ -1061,6 +1067,7 @@ enum bnx2x_func_cmd {
1061 BNX2X_F_CMD_AFEX_VIFLISTS, 1067 BNX2X_F_CMD_AFEX_VIFLISTS,
1062 BNX2X_F_CMD_TX_STOP, 1068 BNX2X_F_CMD_TX_STOP,
1063 BNX2X_F_CMD_TX_START, 1069 BNX2X_F_CMD_TX_START,
1070 BNX2X_F_CMD_SWITCH_UPDATE,
1064 BNX2X_F_CMD_MAX, 1071 BNX2X_F_CMD_MAX,
1065}; 1072};
1066 1073
@@ -1103,6 +1110,10 @@ struct bnx2x_func_start_params {
1103 u8 network_cos_mode; 1110 u8 network_cos_mode;
1104}; 1111};
1105 1112
1113struct bnx2x_func_switch_update_params {
1114 u8 suspend;
1115};
1116
1106struct bnx2x_func_afex_update_params { 1117struct bnx2x_func_afex_update_params {
1107 u16 vif_id; 1118 u16 vif_id;
1108 u16 afex_default_vlan; 1119 u16 afex_default_vlan;
@@ -1136,6 +1147,7 @@ struct bnx2x_func_state_params {
1136 struct bnx2x_func_hw_init_params hw_init; 1147 struct bnx2x_func_hw_init_params hw_init;
1137 struct bnx2x_func_hw_reset_params hw_reset; 1148 struct bnx2x_func_hw_reset_params hw_reset;
1138 struct bnx2x_func_start_params start; 1149 struct bnx2x_func_start_params start;
1150 struct bnx2x_func_switch_update_params switch_update;
1139 struct bnx2x_func_afex_update_params afex_update; 1151 struct bnx2x_func_afex_update_params afex_update;
1140 struct bnx2x_func_afex_viflists_params afex_viflists; 1152 struct bnx2x_func_afex_viflists_params afex_viflists;
1141 struct bnx2x_func_tx_start_params tx_start; 1153 struct bnx2x_func_tx_start_params tx_start;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 348ed02d3c69..89ec0667140a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1149,6 +1149,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
1149 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); 1149 UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
1150 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); 1150 UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
1151 UPDATE_ESTAT_QSTAT(hw_csum_err); 1151 UPDATE_ESTAT_QSTAT(hw_csum_err);
1152 UPDATE_ESTAT_QSTAT(driver_filtered_tx_pkt);
1152 } 1153 }
1153} 1154}
1154 1155
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 24b8e505b60c..b4d7b26c7fe7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,7 @@ struct bnx2x_eth_stats {
203 /* Recovery */ 203 /* Recovery */
204 u32 recoverable_error; 204 u32 recoverable_error;
205 u32 unrecoverable_error; 205 u32 unrecoverable_error;
206 u32 driver_filtered_tx_pkt;
206 /* src: Clear-on-Read register; Will not survive PMF Migration */ 207 /* src: Clear-on-Read register; Will not survive PMF Migration */
207 u32 eee_tx_lpi; 208 u32 eee_tx_lpi;
208}; 209};
@@ -264,6 +265,7 @@ struct bnx2x_eth_q_stats {
264 u32 total_tpa_aggregated_frames_lo; 265 u32 total_tpa_aggregated_frames_lo;
265 u32 total_tpa_bytes_hi; 266 u32 total_tpa_bytes_hi;
266 u32 total_tpa_bytes_lo; 267 u32 total_tpa_bytes_lo;
268 u32 driver_filtered_tx_pkt;
267}; 269};
268 270
269struct bnx2x_eth_stats_old { 271struct bnx2x_eth_stats_old {
@@ -315,6 +317,7 @@ struct bnx2x_eth_q_stats_old {
315 u32 rx_err_discard_pkt_old; 317 u32 rx_err_discard_pkt_old;
316 u32 rx_skb_alloc_failed_old; 318 u32 rx_skb_alloc_failed_old;
317 u32 hw_csum_err_old; 319 u32 hw_csum_err_old;
320 u32 driver_filtered_tx_pkt_old;
318}; 321};
319 322
320struct bnx2x_net_stats_old { 323struct bnx2x_net_stats_old {
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index cc8434fd606e..df8c30d1a52c 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -40,8 +40,10 @@
40#include <net/ip6_checksum.h> 40#include <net/ip6_checksum.h>
41#include <scsi/iscsi_if.h> 41#include <scsi/iscsi_if.h>
42 42
43#define BCM_CNIC 1
43#include "cnic_if.h" 44#include "cnic_if.h"
44#include "bnx2.h" 45#include "bnx2.h"
46#include "bnx2x/bnx2x.h"
45#include "bnx2x/bnx2x_reg.h" 47#include "bnx2x/bnx2x_reg.h"
46#include "bnx2x/bnx2x_fw_defs.h" 48#include "bnx2x/bnx2x_fw_defs.h"
47#include "bnx2x/bnx2x_hsi.h" 49#include "bnx2x/bnx2x_hsi.h"
@@ -51,10 +53,10 @@
51#include "cnic.h" 53#include "cnic.h"
52#include "cnic_defs.h" 54#include "cnic_defs.h"
53 55
54#define DRV_MODULE_NAME "cnic" 56#define CNIC_MODULE_NAME "cnic"
55 57
56static char version[] __devinitdata = 58static char version[] =
57 "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 59 "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
58 60
59MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 61MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
60 "Chen (zongxi@broadcom.com"); 62 "Chen (zongxi@broadcom.com");
@@ -724,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
724 726
725 for (i = 0; i < dma->num_pages; i++) { 727 for (i = 0; i < dma->num_pages; i++) {
726 if (dma->pg_arr[i]) { 728 if (dma->pg_arr[i]) {
727 dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE, 729 dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
728 dma->pg_arr[i], dma->pg_map_arr[i]); 730 dma->pg_arr[i], dma->pg_map_arr[i]);
729 dma->pg_arr[i] = NULL; 731 dma->pg_arr[i] = NULL;
730 } 732 }
@@ -783,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
783 785
784 for (i = 0; i < pages; i++) { 786 for (i = 0; i < pages; i++) {
785 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 787 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
786 BCM_PAGE_SIZE, 788 BNX2_PAGE_SIZE,
787 &dma->pg_map_arr[i], 789 &dma->pg_map_arr[i],
788 GFP_ATOMIC); 790 GFP_ATOMIC);
789 if (dma->pg_arr[i] == NULL) 791 if (dma->pg_arr[i] == NULL)
@@ -792,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
792 if (!use_pg_tbl) 794 if (!use_pg_tbl)
793 return 0; 795 return 0;
794 796
795 dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & 797 dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
796 ~(BCM_PAGE_SIZE - 1); 798 ~(BNX2_PAGE_SIZE - 1);
797 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 799 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
798 &dma->pgtbl_map, GFP_ATOMIC); 800 &dma->pgtbl_map, GFP_ATOMIC);
799 if (dma->pgtbl == NULL) 801 if (dma->pgtbl == NULL)
@@ -895,11 +897,11 @@ static int cnic_alloc_context(struct cnic_dev *dev)
895{ 897{
896 struct cnic_local *cp = dev->cnic_priv; 898 struct cnic_local *cp = dev->cnic_priv;
897 899
898 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 900 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
899 int i, k, arr_size; 901 int i, k, arr_size;
900 902
901 cp->ctx_blk_size = BCM_PAGE_SIZE; 903 cp->ctx_blk_size = BNX2_PAGE_SIZE;
902 cp->cids_per_blk = BCM_PAGE_SIZE / 128; 904 cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
903 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 905 arr_size = BNX2_MAX_CID / cp->cids_per_blk *
904 sizeof(struct cnic_ctx); 906 sizeof(struct cnic_ctx);
905 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 907 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -931,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
931 for (i = 0; i < cp->ctx_blks; i++) { 933 for (i = 0; i < cp->ctx_blks; i++) {
932 cp->ctx_arr[i].ctx = 934 cp->ctx_arr[i].ctx =
933 dma_alloc_coherent(&dev->pcidev->dev, 935 dma_alloc_coherent(&dev->pcidev->dev,
934 BCM_PAGE_SIZE, 936 BNX2_PAGE_SIZE,
935 &cp->ctx_arr[i].mapping, 937 &cp->ctx_arr[i].mapping,
936 GFP_KERNEL); 938 GFP_KERNEL);
937 if (cp->ctx_arr[i].ctx == NULL) 939 if (cp->ctx_arr[i].ctx == NULL)
@@ -1011,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
1011 if (udev->l2_ring) 1013 if (udev->l2_ring)
1012 return 0; 1014 return 0;
1013 1015
1014 udev->l2_ring_size = pages * BCM_PAGE_SIZE; 1016 udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
1015 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 1017 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1016 &udev->l2_ring_map, 1018 &udev->l2_ring_map,
1017 GFP_KERNEL | __GFP_COMP); 1019 GFP_KERNEL | __GFP_COMP);
@@ -1234,8 +1236,6 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1234 int i, j, n, ret, pages; 1236 int i, j, n, ret, pages;
1235 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1237 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1236 1238
1237 cp->iro_arr = ethdev->iro_arr;
1238
1239 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1239 cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1240 cp->iscsi_start_cid = start_cid; 1240 cp->iscsi_start_cid = start_cid;
1241 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; 1241 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
@@ -1430,6 +1430,7 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1430static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1430static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1431{ 1431{
1432 struct cnic_local *cp = dev->cnic_priv; 1432 struct cnic_local *cp = dev->cnic_priv;
1433 struct bnx2x *bp = netdev_priv(dev->netdev);
1433 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1434 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1434 int hq_bds, pages; 1435 int hq_bds, pages;
1435 u32 pfid = cp->pfid; 1436 u32 pfid = cp->pfid;
@@ -1512,6 +1513,7 @@ static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1512{ 1513{
1513 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1514 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1514 struct cnic_local *cp = dev->cnic_priv; 1515 struct cnic_local *cp = dev->cnic_priv;
1516 struct bnx2x *bp = netdev_priv(dev->netdev);
1515 u32 pfid = cp->pfid; 1517 u32 pfid = cp->pfid;
1516 struct iscsi_kcqe kcqe; 1518 struct iscsi_kcqe kcqe;
1517 struct kcqe *cqes[1]; 1519 struct kcqe *cqes[1];
@@ -2048,6 +2050,7 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
2048static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 2050static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2049{ 2051{
2050 struct cnic_local *cp = dev->cnic_priv; 2052 struct cnic_local *cp = dev->cnic_priv;
2053 struct bnx2x *bp = netdev_priv(dev->netdev);
2051 u32 pfid = cp->pfid; 2054 u32 pfid = cp->pfid;
2052 u8 *mac = dev->mac_addr; 2055 u8 *mac = dev->mac_addr;
2053 2056
@@ -2084,6 +2087,7 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2084static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts) 2087static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2085{ 2088{
2086 struct cnic_local *cp = dev->cnic_priv; 2089 struct cnic_local *cp = dev->cnic_priv;
2090 struct bnx2x *bp = netdev_priv(dev->netdev);
2087 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 2091 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2088 u16 tstorm_flags = 0; 2092 u16 tstorm_flags = 0;
2089 2093
@@ -2103,6 +2107,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2103 u32 num, int *work) 2107 u32 num, int *work)
2104{ 2108{
2105 struct cnic_local *cp = dev->cnic_priv; 2109 struct cnic_local *cp = dev->cnic_priv;
2110 struct bnx2x *bp = netdev_priv(dev->netdev);
2106 struct l4_kwq_connect_req1 *kwqe1 = 2111 struct l4_kwq_connect_req1 *kwqe1 =
2107 (struct l4_kwq_connect_req1 *) wqes[0]; 2112 (struct l4_kwq_connect_req1 *) wqes[0];
2108 struct l4_kwq_connect_req3 *kwqe3; 2113 struct l4_kwq_connect_req3 *kwqe3;
@@ -2898,7 +2903,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
2898 u16 hw_cons, sw_cons; 2903 u16 hw_cons, sw_cons;
2899 struct cnic_uio_dev *udev = cp->udev; 2904 struct cnic_uio_dev *udev = cp->udev;
2900 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2905 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2901 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 2906 (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
2902 u32 cmd; 2907 u32 cmd;
2903 int comp = 0; 2908 int comp = 0;
2904 2909
@@ -3853,12 +3858,17 @@ static int cnic_cm_abort(struct cnic_sock *csk)
3853 return cnic_cm_abort_req(csk); 3858 return cnic_cm_abort_req(csk);
3854 3859
3855 /* Getting here means that we haven't started connect, or 3860 /* Getting here means that we haven't started connect, or
3856 * connect was not successful. 3861 * connect was not successful, or it has been reset by the target.
3857 */ 3862 */
3858 3863
3859 cp->close_conn(csk, opcode); 3864 cp->close_conn(csk, opcode);
3860 if (csk->state != opcode) 3865 if (csk->state != opcode) {
3866 /* Wait for remote reset sequence to complete */
3867 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3868 msleep(1);
3869
3861 return -EALREADY; 3870 return -EALREADY;
3871 }
3862 3872
3863 return 0; 3873 return 0;
3864} 3874}
@@ -3872,6 +3882,10 @@ static int cnic_cm_close(struct cnic_sock *csk)
3872 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3882 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3873 return cnic_cm_close_req(csk); 3883 return cnic_cm_close_req(csk);
3874 } else { 3884 } else {
3885 /* Wait for remote reset sequence to complete */
3886 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3887 msleep(1);
3888
3875 return -EALREADY; 3889 return -EALREADY;
3876 } 3890 }
3877 return 0; 3891 return 0;
@@ -4200,6 +4214,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4200static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 4214static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4201{ 4215{
4202 struct cnic_local *cp = dev->cnic_priv; 4216 struct cnic_local *cp = dev->cnic_priv;
4217 struct bnx2x *bp = netdev_priv(dev->netdev);
4203 u32 pfid = cp->pfid; 4218 u32 pfid = cp->pfid;
4204 u32 port = CNIC_PORT(cp); 4219 u32 port = CNIC_PORT(cp);
4205 4220
@@ -4349,7 +4364,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4349 int ret = 0, i; 4364 int ret = 0, i;
4350 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 4365 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4351 4366
4352 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4367 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4353 return 0; 4368 return 0;
4354 4369
4355 for (i = 0; i < cp->ctx_blks; i++) { 4370 for (i = 0; i < cp->ctx_blks; i++) {
@@ -4357,7 +4372,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4357 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 4372 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4358 u32 val; 4373 u32 val;
4359 4374
4360 memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); 4375 memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
4361 4376
4362 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 4377 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4363 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 4378 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4499,7 +4514,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4499 u32 cid_addr, tx_cid, sb_id; 4514 u32 cid_addr, tx_cid, sb_id;
4500 u32 val, offset0, offset1, offset2, offset3; 4515 u32 val, offset0, offset1, offset2, offset3;
4501 int i; 4516 int i;
4502 struct tx_bd *txbd; 4517 struct bnx2_tx_bd *txbd;
4503 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4518 dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4504 struct status_block *s_blk = cp->status_blk.gen; 4519 struct status_block *s_blk = cp->status_blk.gen;
4505 4520
@@ -4517,7 +4532,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4517 cp->tx_cons = *cp->tx_cons_ptr; 4532 cp->tx_cons = *cp->tx_cons_ptr;
4518 4533
4519 cid_addr = GET_CID_ADDR(tx_cid); 4534 cid_addr = GET_CID_ADDR(tx_cid);
4520 if (CHIP_NUM(cp) == CHIP_NUM_5709) { 4535 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
4521 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 4536 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4522 4537
4523 for (i = 0; i < PHY_CTX_SIZE; i += 4) 4538 for (i = 0; i < PHY_CTX_SIZE; i += 4)
@@ -4545,7 +4560,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4545 txbd = udev->l2_ring; 4560 txbd = udev->l2_ring;
4546 4561
4547 buf_map = udev->l2_buf_map; 4562 buf_map = udev->l2_buf_map;
4548 for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { 4563 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) {
4549 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 4564 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4550 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4565 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4551 } 4566 }
@@ -4565,7 +4580,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4565 struct cnic_uio_dev *udev = cp->udev; 4580 struct cnic_uio_dev *udev = cp->udev;
4566 u32 cid_addr, sb_id, val, coal_reg, coal_val; 4581 u32 cid_addr, sb_id, val, coal_reg, coal_val;
4567 int i; 4582 int i;
4568 struct rx_bd *rxbd; 4583 struct bnx2_rx_bd *rxbd;
4569 struct status_block *s_blk = cp->status_blk.gen; 4584 struct status_block *s_blk = cp->status_blk.gen;
4570 dma_addr_t ring_map = udev->l2_ring_map; 4585 dma_addr_t ring_map = udev->l2_ring_map;
4571 4586
@@ -4601,8 +4616,8 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4601 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4616 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4602 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4617 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4603 4618
4604 rxbd = udev->l2_ring + BCM_PAGE_SIZE; 4619 rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
4605 for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { 4620 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
4606 dma_addr_t buf_map; 4621 dma_addr_t buf_map;
4607 int n = (i % cp->l2_rx_ring_size) + 1; 4622 int n = (i % cp->l2_rx_ring_size) + 1;
4608 4623
@@ -4612,11 +4627,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4612 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 4627 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4613 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4628 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4614 } 4629 }
4615 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4630 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
4616 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4631 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4617 rxbd->rx_bd_haddr_hi = val; 4632 rxbd->rx_bd_haddr_hi = val;
4618 4633
4619 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4634 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
4620 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4635 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4621 rxbd->rx_bd_haddr_lo = val; 4636 rxbd->rx_bd_haddr_lo = val;
4622 4637
@@ -4662,7 +4677,7 @@ static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4662 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 4677 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4663 4678
4664 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 4679 val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4665 if (CHIP_NUM(cp) != CHIP_NUM_5709) 4680 if (BNX2_CHIP(cp) != BNX2_CHIP_5709)
4666 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 4681 val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4667 4682
4668 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 4683 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
@@ -4682,10 +4697,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4682 4697
4683 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 4698 val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4684 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4699 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4685 if (BCM_PAGE_BITS > 12) 4700 if (BNX2_PAGE_BITS > 12)
4686 val |= (12 - 8) << 4; 4701 val |= (12 - 8) << 4;
4687 else 4702 else
4688 val |= (BCM_PAGE_BITS - 8) << 4; 4703 val |= (BNX2_PAGE_BITS - 8) << 4;
4689 4704
4690 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 4705 CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4691 4706
@@ -4708,20 +4723,20 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4708 cp->kwq_con_idx = 0; 4723 cp->kwq_con_idx = 0;
4709 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 4724 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4710 4725
4711 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 4726 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708)
4712 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 4727 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4713 else 4728 else
4714 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 4729 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4715 4730
4716 /* Initialize the kernel work queue context. */ 4731 /* Initialize the kernel work queue context. */
4717 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4732 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4718 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4733 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4719 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 4734 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4720 4735
4721 val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 4736 val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4722 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4737 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4723 4738
4724 val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 4739 val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4725 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4740 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4726 4741
4727 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 4742 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4741,13 +4756,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4741 4756
4742 /* Initialize the kernel complete queue context. */ 4757 /* Initialize the kernel complete queue context. */
4743 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4758 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4744 (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4759 (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4745 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 4760 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4746 4761
4747 val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 4762 val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4748 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4763 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4749 4764
4750 val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 4765 val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4751 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4766 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4752 4767
4753 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 4768 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4843,6 +4858,7 @@ static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4843 u16 sb_id, u8 sb_index, 4858 u16 sb_id, u8 sb_index,
4844 u8 disable) 4859 u8 disable)
4845{ 4860{
4861 struct bnx2x *bp = netdev_priv(dev->netdev);
4846 4862
4847 u32 addr = BAR_CSTRORM_INTMEM + 4863 u32 addr = BAR_CSTRORM_INTMEM +
4848 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4864 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
@@ -4860,6 +4876,7 @@ static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4860static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 4876static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4861{ 4877{
4862 struct cnic_local *cp = dev->cnic_priv; 4878 struct cnic_local *cp = dev->cnic_priv;
4879 struct bnx2x *bp = netdev_priv(dev->netdev);
4863 u8 sb_id = cp->status_blk_num; 4880 u8 sb_id = cp->status_blk_num;
4864 4881
4865 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4882 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
@@ -4886,10 +4903,10 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4886 u32 cli = cp->ethdev->iscsi_l2_client_id; 4903 u32 cli = cp->ethdev->iscsi_l2_client_id;
4887 u32 val; 4904 u32 val;
4888 4905
4889 memset(txbd, 0, BCM_PAGE_SIZE); 4906 memset(txbd, 0, BNX2_PAGE_SIZE);
4890 4907
4891 buf_map = udev->l2_buf_map; 4908 buf_map = udev->l2_buf_map;
4892 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4909 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4893 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4910 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4894 struct eth_tx_parse_bd_e1x *pbd_e1x = 4911 struct eth_tx_parse_bd_e1x *pbd_e1x =
4895 &((txbd + 1)->parse_bd_e1x); 4912 &((txbd + 1)->parse_bd_e1x);
@@ -4908,9 +4925,9 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4908 4925
4909 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) 4926 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
4910 pbd_e2->parsing_data = (UNICAST_ADDRESS << 4927 pbd_e2->parsing_data = (UNICAST_ADDRESS <<
4911 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); 4928 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
4912 else 4929 else
4913 pbd_e1x->global_data = (UNICAST_ADDRESS << 4930 pbd_e1x->global_data = (UNICAST_ADDRESS <<
4914 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT); 4931 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT);
4915 } 4932 }
4916 4933
@@ -4945,9 +4962,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4945 struct cnic_local *cp = dev->cnic_priv; 4962 struct cnic_local *cp = dev->cnic_priv;
4946 struct cnic_uio_dev *udev = cp->udev; 4963 struct cnic_uio_dev *udev = cp->udev;
4947 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4964 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4948 BCM_PAGE_SIZE); 4965 BNX2_PAGE_SIZE);
4949 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4966 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4950 (udev->l2_ring + (2 * BCM_PAGE_SIZE)); 4967 (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
4951 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4968 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4952 int i; 4969 int i;
4953 u32 cli = cp->ethdev->iscsi_l2_client_id; 4970 u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -4971,20 +4988,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4971 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4988 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4972 } 4989 }
4973 4990
4974 val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32; 4991 val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
4975 rxbd->addr_hi = cpu_to_le32(val); 4992 rxbd->addr_hi = cpu_to_le32(val);
4976 data->rx.bd_page_base.hi = cpu_to_le32(val); 4993 data->rx.bd_page_base.hi = cpu_to_le32(val);
4977 4994
4978 val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff; 4995 val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
4979 rxbd->addr_lo = cpu_to_le32(val); 4996 rxbd->addr_lo = cpu_to_le32(val);
4980 data->rx.bd_page_base.lo = cpu_to_le32(val); 4997 data->rx.bd_page_base.lo = cpu_to_le32(val);
4981 4998
4982 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 4999 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4983 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32; 5000 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
4984 rxcqe->addr_hi = cpu_to_le32(val); 5001 rxcqe->addr_hi = cpu_to_le32(val);
4985 data->rx.cqe_page_base.hi = cpu_to_le32(val); 5002 data->rx.cqe_page_base.hi = cpu_to_le32(val);
4986 5003
4987 val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff; 5004 val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
4988 rxcqe->addr_lo = cpu_to_le32(val); 5005 rxcqe->addr_lo = cpu_to_le32(val);
4989 data->rx.cqe_page_base.lo = cpu_to_le32(val); 5006 data->rx.cqe_page_base.lo = cpu_to_le32(val);
4990 5007
@@ -5009,6 +5026,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
5009static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 5026static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5010{ 5027{
5011 struct cnic_local *cp = dev->cnic_priv; 5028 struct cnic_local *cp = dev->cnic_priv;
5029 struct bnx2x *bp = netdev_priv(dev->netdev);
5012 u32 pfid = cp->pfid; 5030 u32 pfid = cp->pfid;
5013 5031
5014 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 5032 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
@@ -5047,37 +5065,17 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
5047static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 5065static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5048{ 5066{
5049 struct cnic_local *cp = dev->cnic_priv; 5067 struct cnic_local *cp = dev->cnic_priv;
5068 struct bnx2x *bp = netdev_priv(dev->netdev);
5050 struct cnic_eth_dev *ethdev = cp->ethdev; 5069 struct cnic_eth_dev *ethdev = cp->ethdev;
5051 int func = CNIC_FUNC(cp), ret; 5070 int func, ret;
5052 u32 pfid; 5071 u32 pfid;
5053 5072
5054 dev->stats_addr = ethdev->addr_drv_info_to_mcp; 5073 dev->stats_addr = ethdev->addr_drv_info_to_mcp;
5055 cp->port_mode = CHIP_PORT_MODE_NONE; 5074 cp->port_mode = bp->common.chip_port_mode;
5056 5075 cp->pfid = bp->pfid;
5057 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { 5076 cp->func = bp->pf_num;
5058 u32 val;
5059 5077
5060 pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val); 5078 func = CNIC_FUNC(cp);
5061 cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
5062 ME_REG_ABS_PF_NUM_SHIFT);
5063 func = CNIC_FUNC(cp);
5064
5065 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
5066 if (!(val & 1))
5067 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
5068 else
5069 val = (val >> 1) & 1;
5070
5071 if (val) {
5072 cp->port_mode = CHIP_4_PORT_MODE;
5073 cp->pfid = func >> 1;
5074 } else {
5075 cp->port_mode = CHIP_2_PORT_MODE;
5076 cp->pfid = func & 0x6;
5077 }
5078 } else {
5079 cp->pfid = func;
5080 }
5081 pfid = cp->pfid; 5079 pfid = cp->pfid;
5082 5080
5083 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 5081 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
@@ -5144,6 +5142,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
5144static void cnic_init_rings(struct cnic_dev *dev) 5142static void cnic_init_rings(struct cnic_dev *dev)
5145{ 5143{
5146 struct cnic_local *cp = dev->cnic_priv; 5144 struct cnic_local *cp = dev->cnic_priv;
5145 struct bnx2x *bp = netdev_priv(dev->netdev);
5147 struct cnic_uio_dev *udev = cp->udev; 5146 struct cnic_uio_dev *udev = cp->udev;
5148 5147
5149 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 5148 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
@@ -5249,8 +5248,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
5249 msleep(10); 5248 msleep(10);
5250 } 5249 }
5251 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5250 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5252 rx_ring = udev->l2_ring + BCM_PAGE_SIZE; 5251 rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
5253 memset(rx_ring, 0, BCM_PAGE_SIZE); 5252 memset(rx_ring, 0, BNX2_PAGE_SIZE);
5254} 5253}
5255 5254
5256static int cnic_register_netdev(struct cnic_dev *dev) 5255static int cnic_register_netdev(struct cnic_dev *dev)
@@ -5344,8 +5343,28 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5344static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 5343static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5345{ 5344{
5346 struct cnic_local *cp = dev->cnic_priv; 5345 struct cnic_local *cp = dev->cnic_priv;
5346 struct bnx2x *bp = netdev_priv(dev->netdev);
5347 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS;
5348 u32 sb_id = cp->status_blk_num;
5349 u32 idx_off, syn_off;
5347 5350
5348 cnic_free_irq(dev); 5351 cnic_free_irq(dev);
5352
5353 if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
5354 idx_off = offsetof(struct hc_status_block_e2, index_values) +
5355 (hc_index * sizeof(u16));
5356
5357 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id);
5358 } else {
5359 idx_off = offsetof(struct hc_status_block_e1x, index_values) +
5360 (hc_index * sizeof(u16));
5361
5362 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id);
5363 }
5364 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0);
5365 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) +
5366 idx_off, 0);
5367
5349 *cp->kcq1.hw_prod_idx_ptr = 0; 5368 *cp->kcq1.hw_prod_idx_ptr = 0;
5350 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5369 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5351 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0); 5370 CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
@@ -5431,14 +5450,12 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5431 struct pci_dev *pdev; 5450 struct pci_dev *pdev;
5432 struct cnic_dev *cdev; 5451 struct cnic_dev *cdev;
5433 struct cnic_local *cp; 5452 struct cnic_local *cp;
5453 struct bnx2 *bp = netdev_priv(dev);
5434 struct cnic_eth_dev *ethdev = NULL; 5454 struct cnic_eth_dev *ethdev = NULL;
5435 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5436 5455
5437 probe = symbol_get(bnx2_cnic_probe); 5456 if (bp->cnic_probe)
5438 if (probe) { 5457 ethdev = (bp->cnic_probe)(dev);
5439 ethdev = (*probe)(dev); 5458
5440 symbol_put(bnx2_cnic_probe);
5441 }
5442 if (!ethdev) 5459 if (!ethdev)
5443 return NULL; 5460 return NULL;
5444 5461
@@ -5493,14 +5510,12 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5493 struct pci_dev *pdev; 5510 struct pci_dev *pdev;
5494 struct cnic_dev *cdev; 5511 struct cnic_dev *cdev;
5495 struct cnic_local *cp; 5512 struct cnic_local *cp;
5513 struct bnx2x *bp = netdev_priv(dev);
5496 struct cnic_eth_dev *ethdev = NULL; 5514 struct cnic_eth_dev *ethdev = NULL;
5497 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5498 5515
5499 probe = symbol_get(bnx2x_cnic_probe); 5516 if (bp->cnic_probe)
5500 if (probe) { 5517 ethdev = bp->cnic_probe(dev);
5501 ethdev = (*probe)(dev); 5518
5502 symbol_put(bnx2x_cnic_probe);
5503 }
5504 if (!ethdev) 5519 if (!ethdev)
5505 return NULL; 5520 return NULL;
5506 5521
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 148604c3fa0c..62c670619ae6 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -80,18 +80,18 @@
80#define CNIC_LOCAL_PORT_MAX 61024 80#define CNIC_LOCAL_PORT_MAX 61024
81#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) 81#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
82 82
83#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe)) 83#define KWQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kwqe))
84#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe)) 84#define KCQE_CNT (BNX2_PAGE_SIZE / sizeof(struct kcqe))
85#define MAX_KWQE_CNT (KWQE_CNT - 1) 85#define MAX_KWQE_CNT (KWQE_CNT - 1)
86#define MAX_KCQE_CNT (KCQE_CNT - 1) 86#define MAX_KCQE_CNT (KCQE_CNT - 1)
87 87
88#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1) 88#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
89#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1) 89#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
90 90
91#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5)) 91#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BNX2_PAGE_BITS - 5))
92#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT) 92#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
93 93
94#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5)) 94#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BNX2_PAGE_BITS - 5))
95#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT) 95#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
96 96
97#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \ 97#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \
@@ -186,14 +186,6 @@ struct kcq_info {
186 u16 (*hw_idx)(u16); 186 u16 (*hw_idx)(u16);
187}; 187};
188 188
189struct iro {
190 u32 base;
191 u16 m1;
192 u16 m2;
193 u16 m3;
194 u16 size;
195};
196
197struct cnic_uio_dev { 189struct cnic_uio_dev {
198 struct uio_info cnic_uinfo; 190 struct uio_info cnic_uinfo;
199 u32 uio_dev; 191 u32 uio_dev;
@@ -241,9 +233,6 @@ struct cnic_local {
241 u16 rx_cons; 233 u16 rx_cons;
242 u16 tx_cons; 234 u16 tx_cons;
243 235
244 const struct iro *iro_arr;
245#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
246
247 struct cnic_dma kwq_info; 236 struct cnic_dma kwq_info;
248 struct kwqe **kwq; 237 struct kwqe **kwq;
249 238
@@ -316,9 +305,6 @@ struct cnic_local {
316 int func; 305 int func;
317 u32 pfid; 306 u32 pfid;
318 u8 port_mode; 307 u8 port_mode;
319#define CHIP_4_PORT_MODE 0
320#define CHIP_2_PORT_MODE 1
321#define CHIP_PORT_MODE_NONE 2
322 308
323 u32 shmem_base; 309 u32 shmem_base;
324 310
@@ -420,11 +406,11 @@ struct bnx2x_bd_chain_next {
420 BNX2X_CHIP_IS_57840(x)) 406 BNX2X_CHIP_IS_57840(x))
421#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x)) 407#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
422 408
423#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id) 409#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
424 410 sizeof(struct eth_rx_bd))
425#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
426#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2) 411#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2)
427#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 412#define BNX2X_RCQ_DESC_CNT (BNX2_PAGE_SIZE / \
413 sizeof(union eth_rx_cqe))
428#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1) 414#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
429 415
430#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \ 416#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 865095aad1f6..2a35436f9095 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -14,8 +14,8 @@
14 14
15#include "bnx2x/bnx2x_mfw_req.h" 15#include "bnx2x/bnx2x_mfw_req.h"
16 16
17#define CNIC_MODULE_VERSION "2.5.14" 17#define CNIC_MODULE_VERSION "2.5.16"
18#define CNIC_MODULE_RELDATE "Sep 30, 2012" 18#define CNIC_MODULE_RELDATE "Dec 05, 2012"
19 19
20#define CNIC_ULP_RDMA 0 20#define CNIC_ULP_RDMA 0
21#define CNIC_ULP_ISCSI 1 21#define CNIC_ULP_ISCSI 1
@@ -353,7 +353,4 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
353 353
354extern int cnic_unregister_driver(int ulp_type); 354extern int cnic_unregister_driver(int ulp_type);
355 355
356extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
357extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
358
359#endif 356#endif
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 49e7a258da8a..3a1c8a3cf7c9 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2586,7 +2586,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
2586} 2586}
2587 2587
2588 2588
2589static int __devinit sbmac_probe(struct platform_device *pldev) 2589static int sbmac_probe(struct platform_device *pldev)
2590{ 2590{
2591 struct net_device *dev; 2591 struct net_device *dev;
2592 struct sbmac_softc *sc; 2592 struct sbmac_softc *sc;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a8800ac10df9..78ea90c40e19 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -54,6 +54,9 @@
54#include <asm/byteorder.h> 54#include <asm/byteorder.h>
55#include <linux/uaccess.h> 55#include <linux/uaccess.h>
56 56
57#include <uapi/linux/net_tstamp.h>
58#include <linux/ptp_clock_kernel.h>
59
57#ifdef CONFIG_SPARC 60#ifdef CONFIG_SPARC
58#include <asm/idprom.h> 61#include <asm/idprom.h>
59#include <asm/prom.h> 62#include <asm/prom.h>
@@ -90,10 +93,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
90 93
91#define DRV_MODULE_NAME "tg3" 94#define DRV_MODULE_NAME "tg3"
92#define TG3_MAJ_NUM 3 95#define TG3_MAJ_NUM 3
93#define TG3_MIN_NUM 125 96#define TG3_MIN_NUM 128
94#define DRV_MODULE_VERSION \ 97#define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 98 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
96#define DRV_MODULE_RELDATE "September 26, 2012" 99#define DRV_MODULE_RELDATE "December 03, 2012"
97 100
98#define RESET_KIND_SHUTDOWN 0 101#define RESET_KIND_SHUTDOWN 0
99#define RESET_KIND_INIT 1 102#define RESET_KIND_INIT 1
@@ -211,7 +214,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
211#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 214#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 215#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
213 216
214static char version[] __devinitdata = 217static char version[] =
215 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; 218 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
216 219
217MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)"); 220MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
@@ -226,6 +229,9 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226module_param(tg3_debug, int, 0); 229module_param(tg3_debug, int, 0);
227MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 230MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228 231
232#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
233#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
234
229static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { 235static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)}, 236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)}, 237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
@@ -245,20 +251,28 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)}, 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)}, 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)}, 253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)}, 254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)}, 255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259 TG3_DRV_DATA_FLAG_5705_10_100},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)}, 260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)}, 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)}, 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)}, 265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)}, 266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)}, 267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)}, 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)}, 270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)}, 271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)}, 272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)}, 273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)}, 274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)}, 276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)}, 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)}, 278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
@@ -266,8 +280,13 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)}, 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)}, 281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)}, 282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284 PCI_VENDOR_ID_LENOVO,
285 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)}, 287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)}, 288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)}, 290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)}, 291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)}, 292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
@@ -286,18 +305,28 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)}, 305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)}, 306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)}, 307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)}, 314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)}, 319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)}, 321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)}, 322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)}, 323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)}, 324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)}, 325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)}, 326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)}, 327 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)}, 330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)}, 331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)}, 332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
@@ -398,19 +427,27 @@ static const struct {
398}; 427};
399 428
400#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys) 429#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
430#define TG3_NVRAM_TEST 0
431#define TG3_LINK_TEST 1
432#define TG3_REGISTER_TEST 2
433#define TG3_MEMORY_TEST 3
434#define TG3_MAC_LOOPB_TEST 4
435#define TG3_PHY_LOOPB_TEST 5
436#define TG3_EXT_LOOPB_TEST 6
437#define TG3_INTERRUPT_TEST 7
401 438
402 439
403static const struct { 440static const struct {
404 const char string[ETH_GSTRING_LEN]; 441 const char string[ETH_GSTRING_LEN];
405} ethtool_test_keys[] = { 442} ethtool_test_keys[] = {
406 { "nvram test (online) " }, 443 [TG3_NVRAM_TEST] = { "nvram test (online) " },
407 { "link test (online) " }, 444 [TG3_LINK_TEST] = { "link test (online) " },
408 { "register test (offline)" }, 445 [TG3_REGISTER_TEST] = { "register test (offline)" },
409 { "memory test (offline)" }, 446 [TG3_MEMORY_TEST] = { "memory test (offline)" },
410 { "mac loopback test (offline)" }, 447 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
411 { "phy loopback test (offline)" }, 448 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
412 { "ext loopback test (offline)" }, 449 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
413 { "interrupt test (offline)" }, 450 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
414}; 451};
415 452
416#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys) 453#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
@@ -2447,6 +2484,18 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2447 return err; 2484 return err;
2448} 2485}
2449 2486
2487static void tg3_carrier_on(struct tg3 *tp)
2488{
2489 netif_carrier_on(tp->dev);
2490 tp->link_up = true;
2491}
2492
2493static void tg3_carrier_off(struct tg3 *tp)
2494{
2495 netif_carrier_off(tp->dev);
2496 tp->link_up = false;
2497}
2498
2450/* This will reset the tigon3 PHY if there is no valid 2499/* This will reset the tigon3 PHY if there is no valid
2451 * link unless the FORCE argument is non-zero. 2500 * link unless the FORCE argument is non-zero.
2452 */ 2501 */
@@ -2465,8 +2514,8 @@ static int tg3_phy_reset(struct tg3 *tp)
2465 if (err != 0) 2514 if (err != 0)
2466 return -EBUSY; 2515 return -EBUSY;
2467 2516
2468 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) { 2517 if (netif_running(tp->dev) && tp->link_up) {
2469 netif_carrier_off(tp->dev); 2518 tg3_carrier_off(tp);
2470 tg3_link_report(tp); 2519 tg3_link_report(tp);
2471 } 2520 }
2472 2521
@@ -4160,6 +4209,24 @@ static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4160 return true; 4209 return true;
4161} 4210}
4162 4211
4212static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4213{
4214 if (curr_link_up != tp->link_up) {
4215 if (curr_link_up) {
4216 tg3_carrier_on(tp);
4217 } else {
4218 tg3_carrier_off(tp);
4219 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4220 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4221 }
4222
4223 tg3_link_report(tp);
4224 return true;
4225 }
4226
4227 return false;
4228}
4229
4163static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) 4230static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4164{ 4231{
4165 int current_link_up; 4232 int current_link_up;
@@ -4192,7 +4259,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4192 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || 4259 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || 4260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && 4261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4195 netif_carrier_ok(tp->dev)) { 4262 tp->link_up) {
4196 tg3_readphy(tp, MII_BMSR, &bmsr); 4263 tg3_readphy(tp, MII_BMSR, &bmsr);
4197 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 4264 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4198 !(bmsr & BMSR_LSTATUS)) 4265 !(bmsr & BMSR_LSTATUS))
@@ -4434,13 +4501,7 @@ relink:
4434 PCI_EXP_LNKCTL_CLKREQ_EN); 4501 PCI_EXP_LNKCTL_CLKREQ_EN);
4435 } 4502 }
4436 4503
4437 if (current_link_up != netif_carrier_ok(tp->dev)) { 4504 tg3_test_and_report_link_chg(tp, current_link_up);
4438 if (current_link_up)
4439 netif_carrier_on(tp->dev);
4440 else
4441 netif_carrier_off(tp->dev);
4442 tg3_link_report(tp);
4443 }
4444 4505
4445 return 0; 4506 return 0;
4446} 4507}
@@ -5080,7 +5141,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5080 orig_active_duplex = tp->link_config.active_duplex; 5141 orig_active_duplex = tp->link_config.active_duplex;
5081 5142
5082 if (!tg3_flag(tp, HW_AUTONEG) && 5143 if (!tg3_flag(tp, HW_AUTONEG) &&
5083 netif_carrier_ok(tp->dev) && 5144 tp->link_up &&
5084 tg3_flag(tp, INIT_COMPLETE)) { 5145 tg3_flag(tp, INIT_COMPLETE)) {
5085 mac_status = tr32(MAC_STATUS); 5146 mac_status = tr32(MAC_STATUS);
5086 mac_status &= (MAC_STATUS_PCS_SYNCED | 5147 mac_status &= (MAC_STATUS_PCS_SYNCED |
@@ -5158,13 +5219,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5158 LED_CTRL_TRAFFIC_OVERRIDE)); 5219 LED_CTRL_TRAFFIC_OVERRIDE));
5159 } 5220 }
5160 5221
5161 if (current_link_up != netif_carrier_ok(tp->dev)) { 5222 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5162 if (current_link_up)
5163 netif_carrier_on(tp->dev);
5164 else
5165 netif_carrier_off(tp->dev);
5166 tg3_link_report(tp);
5167 } else {
5168 u32 now_pause_cfg = tp->link_config.active_flowctrl; 5223 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5169 if (orig_pause_cfg != now_pause_cfg || 5224 if (orig_pause_cfg != now_pause_cfg ||
5170 orig_active_speed != tp->link_config.active_speed || 5225 orig_active_speed != tp->link_config.active_speed ||
@@ -5257,7 +5312,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5257 new_bmcr |= BMCR_SPEED1000; 5312 new_bmcr |= BMCR_SPEED1000;
5258 5313
5259 /* Force a linkdown */ 5314 /* Force a linkdown */
5260 if (netif_carrier_ok(tp->dev)) { 5315 if (tp->link_up) {
5261 u32 adv; 5316 u32 adv;
5262 5317
5263 err |= tg3_readphy(tp, MII_ADVERTISE, &adv); 5318 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
@@ -5269,7 +5324,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5269 BMCR_ANRESTART | 5324 BMCR_ANRESTART |
5270 BMCR_ANENABLE); 5325 BMCR_ANENABLE);
5271 udelay(10); 5326 udelay(10);
5272 netif_carrier_off(tp->dev); 5327 tg3_carrier_off(tp);
5273 } 5328 }
5274 tg3_writephy(tp, MII_BMCR, new_bmcr); 5329 tg3_writephy(tp, MII_BMCR, new_bmcr);
5275 bmcr = new_bmcr; 5330 bmcr = new_bmcr;
@@ -5335,15 +5390,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5335 tp->link_config.active_speed = current_speed; 5390 tp->link_config.active_speed = current_speed;
5336 tp->link_config.active_duplex = current_duplex; 5391 tp->link_config.active_duplex = current_duplex;
5337 5392
5338 if (current_link_up != netif_carrier_ok(tp->dev)) { 5393 tg3_test_and_report_link_chg(tp, current_link_up);
5339 if (current_link_up)
5340 netif_carrier_on(tp->dev);
5341 else {
5342 netif_carrier_off(tp->dev);
5343 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5344 }
5345 tg3_link_report(tp);
5346 }
5347 return err; 5394 return err;
5348} 5395}
5349 5396
@@ -5355,7 +5402,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
5355 return; 5402 return;
5356 } 5403 }
5357 5404
5358 if (!netif_carrier_ok(tp->dev) && 5405 if (!tp->link_up &&
5359 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 5406 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5360 u32 bmcr; 5407 u32 bmcr;
5361 5408
@@ -5385,7 +5432,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
5385 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; 5432 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5386 } 5433 }
5387 } 5434 }
5388 } else if (netif_carrier_ok(tp->dev) && 5435 } else if (tp->link_up &&
5389 (tp->link_config.autoneg == AUTONEG_ENABLE) && 5436 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5390 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { 5437 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5391 u32 phy2; 5438 u32 phy2;
@@ -5451,7 +5498,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5451 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); 5498 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5452 5499
5453 if (!tg3_flag(tp, 5705_PLUS)) { 5500 if (!tg3_flag(tp, 5705_PLUS)) {
5454 if (netif_carrier_ok(tp->dev)) { 5501 if (tp->link_up) {
5455 tw32(HOSTCC_STAT_COAL_TICKS, 5502 tw32(HOSTCC_STAT_COAL_TICKS,
5456 tp->coal.stats_block_coalesce_usecs); 5503 tp->coal.stats_block_coalesce_usecs);
5457 } else { 5504 } else {
@@ -5461,7 +5508,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5461 5508
5462 if (tg3_flag(tp, ASPM_WORKAROUND)) { 5509 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5463 val = tr32(PCIE_PWR_MGMT_THRESH); 5510 val = tr32(PCIE_PWR_MGMT_THRESH);
5464 if (!netif_carrier_ok(tp->dev)) 5511 if (!tp->link_up)
5465 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | 5512 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5466 tp->pwrmgmt_thresh; 5513 tp->pwrmgmt_thresh;
5467 else 5514 else
@@ -5472,6 +5519,190 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5472 return err; 5519 return err;
5473} 5520}
5474 5521
5522/* tp->lock must be held */
5523static u64 tg3_refclk_read(struct tg3 *tp)
5524{
5525 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5526 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5527}
5528
5529/* tp->lock must be held */
5530static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5531{
5532 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5533 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5534 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5535 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5536}
5537
5538static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5539static inline void tg3_full_unlock(struct tg3 *tp);
5540static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5541{
5542 struct tg3 *tp = netdev_priv(dev);
5543
5544 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5545 SOF_TIMESTAMPING_RX_SOFTWARE |
5546 SOF_TIMESTAMPING_SOFTWARE |
5547 SOF_TIMESTAMPING_TX_HARDWARE |
5548 SOF_TIMESTAMPING_RX_HARDWARE |
5549 SOF_TIMESTAMPING_RAW_HARDWARE;
5550
5551 if (tp->ptp_clock)
5552 info->phc_index = ptp_clock_index(tp->ptp_clock);
5553 else
5554 info->phc_index = -1;
5555
5556 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5557
5558 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5559 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5560 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5561 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5562 return 0;
5563}
5564
5565static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5566{
5567 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5568 bool neg_adj = false;
5569 u32 correction = 0;
5570
5571 if (ppb < 0) {
5572 neg_adj = true;
5573 ppb = -ppb;
5574 }
5575
5576 /* Frequency adjustment is performed using hardware with a 24 bit
5577 * accumulator and a programmable correction value. On each clk, the
5578 * correction value gets added to the accumulator and when it
5579 * overflows, the time counter is incremented/decremented.
5580 *
5581 * So conversion from ppb to correction value is
5582 * ppb * (1 << 24) / 1000000000
5583 */
5584 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5585 TG3_EAV_REF_CLK_CORRECT_MASK;
5586
5587 tg3_full_lock(tp, 0);
5588
5589 if (correction)
5590 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5591 TG3_EAV_REF_CLK_CORRECT_EN |
5592 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5593 else
5594 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5595
5596 tg3_full_unlock(tp);
5597
5598 return 0;
5599}
5600
5601static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5602{
5603 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5604
5605 tg3_full_lock(tp, 0);
5606 tp->ptp_adjust += delta;
5607 tg3_full_unlock(tp);
5608
5609 return 0;
5610}
5611
5612static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5613{
5614 u64 ns;
5615 u32 remainder;
5616 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5617
5618 tg3_full_lock(tp, 0);
5619 ns = tg3_refclk_read(tp);
5620 ns += tp->ptp_adjust;
5621 tg3_full_unlock(tp);
5622
5623 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5624 ts->tv_nsec = remainder;
5625
5626 return 0;
5627}
5628
5629static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5630 const struct timespec *ts)
5631{
5632 u64 ns;
5633 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5634
5635 ns = timespec_to_ns(ts);
5636
5637 tg3_full_lock(tp, 0);
5638 tg3_refclk_write(tp, ns);
5639 tp->ptp_adjust = 0;
5640 tg3_full_unlock(tp);
5641
5642 return 0;
5643}
5644
5645static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5646 struct ptp_clock_request *rq, int on)
5647{
5648 return -EOPNOTSUPP;
5649}
5650
5651static const struct ptp_clock_info tg3_ptp_caps = {
5652 .owner = THIS_MODULE,
5653 .name = "tg3 clock",
5654 .max_adj = 250000000,
5655 .n_alarm = 0,
5656 .n_ext_ts = 0,
5657 .n_per_out = 0,
5658 .pps = 0,
5659 .adjfreq = tg3_ptp_adjfreq,
5660 .adjtime = tg3_ptp_adjtime,
5661 .gettime = tg3_ptp_gettime,
5662 .settime = tg3_ptp_settime,
5663 .enable = tg3_ptp_enable,
5664};
5665
5666static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5667 struct skb_shared_hwtstamps *timestamp)
5668{
5669 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5670 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5671 tp->ptp_adjust);
5672}
5673
5674/* tp->lock must be held */
5675static void tg3_ptp_init(struct tg3 *tp)
5676{
5677 if (!tg3_flag(tp, PTP_CAPABLE))
5678 return;
5679
5680 /* Initialize the hardware clock to the system time. */
5681 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5682 tp->ptp_adjust = 0;
5683 tp->ptp_info = tg3_ptp_caps;
5684}
5685
5686/* tp->lock must be held */
5687static void tg3_ptp_resume(struct tg3 *tp)
5688{
5689 if (!tg3_flag(tp, PTP_CAPABLE))
5690 return;
5691
5692 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5693 tp->ptp_adjust = 0;
5694}
5695
5696static void tg3_ptp_fini(struct tg3 *tp)
5697{
5698 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5699 return;
5700
5701 ptp_clock_unregister(tp->ptp_clock);
5702 tp->ptp_clock = NULL;
5703 tp->ptp_adjust = 0;
5704}
5705
5475static inline int tg3_irq_sync(struct tg3 *tp) 5706static inline int tg3_irq_sync(struct tg3 *tp)
5476{ 5707{
5477 return tp->irq_sync; 5708 return tp->irq_sync;
@@ -5652,6 +5883,16 @@ static void tg3_tx(struct tg3_napi *tnapi)
5652 return; 5883 return;
5653 } 5884 }
5654 5885
5886 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5887 struct skb_shared_hwtstamps timestamp;
5888 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5889 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5890
5891 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5892
5893 skb_tstamp_tx(skb, &timestamp);
5894 }
5895
5655 pci_unmap_single(tp->pdev, 5896 pci_unmap_single(tp->pdev,
5656 dma_unmap_addr(ri, mapping), 5897 dma_unmap_addr(ri, mapping),
5657 skb_headlen(skb), 5898 skb_headlen(skb),
@@ -5919,6 +6160,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
5919 dma_addr_t dma_addr; 6160 dma_addr_t dma_addr;
5920 u32 opaque_key, desc_idx, *post_ptr; 6161 u32 opaque_key, desc_idx, *post_ptr;
5921 u8 *data; 6162 u8 *data;
6163 u64 tstamp = 0;
5922 6164
5923 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 6165 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5924 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 6166 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -5953,6 +6195,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
5953 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 6195 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5954 ETH_FCS_LEN; 6196 ETH_FCS_LEN;
5955 6197
6198 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6199 RXD_FLAG_PTPSTAT_PTPV1 ||
6200 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6201 RXD_FLAG_PTPSTAT_PTPV2) {
6202 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6203 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6204 }
6205
5956 if (len > TG3_RX_COPY_THRESH(tp)) { 6206 if (len > TG3_RX_COPY_THRESH(tp)) {
5957 int skb_size; 6207 int skb_size;
5958 unsigned int frag_size; 6208 unsigned int frag_size;
@@ -5996,6 +6246,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
5996 } 6246 }
5997 6247
5998 skb_put(skb, len); 6248 skb_put(skb, len);
6249 if (tstamp)
6250 tg3_hwclock_to_timestamp(tp, tstamp,
6251 skb_hwtstamps(skb));
6252
5999 if ((tp->dev->features & NETIF_F_RXCSUM) && 6253 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6000 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && 6254 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6001 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) 6255 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -6477,17 +6731,24 @@ static inline void tg3_netif_stop(struct tg3 *tp)
6477{ 6731{
6478 tp->dev->trans_start = jiffies; /* prevent tx timeout */ 6732 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6479 tg3_napi_disable(tp); 6733 tg3_napi_disable(tp);
6734 netif_carrier_off(tp->dev);
6480 netif_tx_disable(tp->dev); 6735 netif_tx_disable(tp->dev);
6481} 6736}
6482 6737
6738/* tp->lock must be held */
6483static inline void tg3_netif_start(struct tg3 *tp) 6739static inline void tg3_netif_start(struct tg3 *tp)
6484{ 6740{
6741 tg3_ptp_resume(tp);
6742
6485 /* NOTE: unconditional netif_tx_wake_all_queues is only 6743 /* NOTE: unconditional netif_tx_wake_all_queues is only
6486 * appropriate so long as all callers are assured to 6744 * appropriate so long as all callers are assured to
6487 * have free tx slots (such as after tg3_init_hw) 6745 * have free tx slots (such as after tg3_init_hw)
6488 */ 6746 */
6489 netif_tx_wake_all_queues(tp->dev); 6747 netif_tx_wake_all_queues(tp->dev);
6490 6748
6749 if (tp->link_up)
6750 netif_carrier_on(tp->dev);
6751
6491 tg3_napi_enable(tp); 6752 tg3_napi_enable(tp);
6492 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; 6753 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6493 tg3_enable_ints(tp); 6754 tg3_enable_ints(tp);
@@ -7046,6 +7307,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7046 vlan = vlan_tx_tag_get(skb); 7307 vlan = vlan_tx_tag_get(skb);
7047 } 7308 }
7048 7309
7310 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7311 tg3_flag(tp, TX_TSTAMP_EN)) {
7312 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7313 base_flags |= TXD_FLAG_HWTSTAMP;
7314 }
7315
7049 len = skb_headlen(skb); 7316 len = skb_headlen(skb);
7050 7317
7051 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); 7318 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -8386,7 +8653,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8386 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq); 8653 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8387 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq); 8654 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8388 8655
8389 if (!netif_carrier_ok(tp->dev)) 8656 if (!tp->link_up)
8390 val = 0; 8657 val = 0;
8391 8658
8392 tw32(HOSTCC_STAT_COAL_TICKS, val); 8659 tw32(HOSTCC_STAT_COAL_TICKS, val);
@@ -8662,14 +8929,14 @@ static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8662 if (!tg3_flag(tp, SUPPORT_MSIX)) 8929 if (!tg3_flag(tp, SUPPORT_MSIX))
8663 return; 8930 return;
8664 8931
8665 if (tp->irq_cnt <= 2) { 8932 if (tp->rxq_cnt == 1) {
8666 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); 8933 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8667 return; 8934 return;
8668 } 8935 }
8669 8936
8670 /* Validate table against current IRQ count */ 8937 /* Validate table against current IRQ count */
8671 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { 8938 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8672 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1) 8939 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8673 break; 8940 break;
8674 } 8941 }
8675 8942
@@ -8914,9 +9181,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8914 */ 9181 */
8915 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM; 9182 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8916 9183
8917 tw32(GRC_MODE, 9184 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
8918 tp->grc_mode | 9185 if (tp->rxptpctl)
8919 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP)); 9186 tw32(TG3_RX_PTP_CTL,
9187 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9188
9189 if (tg3_flag(tp, PTP_CAPABLE))
9190 val |= GRC_MODE_TIME_SYNC_ENABLE;
9191
9192 tw32(GRC_MODE, tp->grc_mode | val);
8920 9193
8921 /* Setup the timer prescalar register. Clock is always 66Mhz. */ 9194 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8922 val = tr32(GRC_MISC_CFG); 9195 val = tr32(GRC_MISC_CFG);
@@ -9679,7 +9952,7 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
9679{ 9952{
9680 struct tg3_hw_stats *sp = tp->hw_stats; 9953 struct tg3_hw_stats *sp = tp->hw_stats;
9681 9954
9682 if (!netif_carrier_ok(tp->dev)) 9955 if (!tp->link_up)
9683 return; 9956 return;
9684 9957
9685 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS); 9958 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
@@ -9823,11 +10096,11 @@ static void tg3_timer(unsigned long __opaque)
9823 u32 mac_stat = tr32(MAC_STATUS); 10096 u32 mac_stat = tr32(MAC_STATUS);
9824 int need_setup = 0; 10097 int need_setup = 0;
9825 10098
9826 if (netif_carrier_ok(tp->dev) && 10099 if (tp->link_up &&
9827 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) { 10100 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9828 need_setup = 1; 10101 need_setup = 1;
9829 } 10102 }
9830 if (!netif_carrier_ok(tp->dev) && 10103 if (!tp->link_up &&
9831 (mac_stat & (MAC_STATUS_PCS_SYNCED | 10104 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9832 MAC_STATUS_SIGNAL_DET))) { 10105 MAC_STATUS_SIGNAL_DET))) {
9833 need_setup = 1; 10106 need_setup = 1;
@@ -9890,7 +10163,7 @@ restart_timer:
9890 add_timer(&tp->timer); 10163 add_timer(&tp->timer);
9891} 10164}
9892 10165
9893static void __devinit tg3_timer_init(struct tg3 *tp) 10166static void tg3_timer_init(struct tg3 *tp)
9894{ 10167{
9895 if (tg3_flag(tp, TAGGED_STATUS) && 10168 if (tg3_flag(tp, TAGGED_STATUS) &&
9896 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 && 10169 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
@@ -10316,7 +10589,8 @@ static void tg3_ints_fini(struct tg3 *tp)
10316 tg3_flag_clear(tp, ENABLE_TSS); 10589 tg3_flag_clear(tp, ENABLE_TSS);
10317} 10590}
10318 10591
10319static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq) 10592static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10593 bool init)
10320{ 10594{
10321 struct net_device *dev = tp->dev; 10595 struct net_device *dev = tp->dev;
10322 int i, err; 10596 int i, err;
@@ -10395,6 +10669,12 @@ static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
10395 tg3_flag_set(tp, INIT_COMPLETE); 10669 tg3_flag_set(tp, INIT_COMPLETE);
10396 tg3_enable_ints(tp); 10670 tg3_enable_ints(tp);
10397 10671
10672 if (init)
10673 tg3_ptp_init(tp);
10674 else
10675 tg3_ptp_resume(tp);
10676
10677
10398 tg3_full_unlock(tp); 10678 tg3_full_unlock(tp);
10399 10679
10400 netif_tx_start_all_queues(dev); 10680 netif_tx_start_all_queues(dev);
@@ -10429,10 +10709,8 @@ static void tg3_stop(struct tg3 *tp)
10429{ 10709{
10430 int i; 10710 int i;
10431 10711
10432 tg3_napi_disable(tp);
10433 tg3_reset_task_cancel(tp); 10712 tg3_reset_task_cancel(tp);
10434 10713 tg3_netif_stop(tp);
10435 netif_tx_disable(tp->dev);
10436 10714
10437 tg3_timer_stop(tp); 10715 tg3_timer_stop(tp);
10438 10716
@@ -10481,7 +10759,7 @@ static int tg3_open(struct net_device *dev)
10481 } 10759 }
10482 } 10760 }
10483 10761
10484 netif_carrier_off(tp->dev); 10762 tg3_carrier_off(tp);
10485 10763
10486 err = tg3_power_up(tp); 10764 err = tg3_power_up(tp);
10487 if (err) 10765 if (err)
@@ -10494,11 +10772,19 @@ static int tg3_open(struct net_device *dev)
10494 10772
10495 tg3_full_unlock(tp); 10773 tg3_full_unlock(tp);
10496 10774
10497 err = tg3_start(tp, true, true); 10775 err = tg3_start(tp, true, true, true);
10498 if (err) { 10776 if (err) {
10499 tg3_frob_aux_power(tp, false); 10777 tg3_frob_aux_power(tp, false);
10500 pci_set_power_state(tp->pdev, PCI_D3hot); 10778 pci_set_power_state(tp->pdev, PCI_D3hot);
10501 } 10779 }
10780
10781 if (tg3_flag(tp, PTP_CAPABLE)) {
10782 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10783 &tp->pdev->dev);
10784 if (IS_ERR(tp->ptp_clock))
10785 tp->ptp_clock = NULL;
10786 }
10787
10502 return err; 10788 return err;
10503} 10789}
10504 10790
@@ -10506,6 +10792,8 @@ static int tg3_close(struct net_device *dev)
10506{ 10792{
10507 struct tg3 *tp = netdev_priv(dev); 10793 struct tg3 *tp = netdev_priv(dev);
10508 10794
10795 tg3_ptp_fini(tp);
10796
10509 tg3_stop(tp); 10797 tg3_stop(tp);
10510 10798
10511 /* Clear stats across close / open calls */ 10799 /* Clear stats across close / open calls */
@@ -10514,7 +10802,7 @@ static int tg3_close(struct net_device *dev)
10514 10802
10515 tg3_power_down(tp); 10803 tg3_power_down(tp);
10516 10804
10517 netif_carrier_off(tp->dev); 10805 tg3_carrier_off(tp);
10518 10806
10519 return 0; 10807 return 0;
10520} 10808}
@@ -10888,7 +11176,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10888 cmd->advertising |= ADVERTISED_Asym_Pause; 11176 cmd->advertising |= ADVERTISED_Asym_Pause;
10889 } 11177 }
10890 } 11178 }
10891 if (netif_running(dev) && netif_carrier_ok(dev)) { 11179 if (netif_running(dev) && tp->link_up) {
10892 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed); 11180 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10893 cmd->duplex = tp->link_config.active_duplex; 11181 cmd->duplex = tp->link_config.active_duplex;
10894 cmd->lp_advertising = tp->link_config.rmt_adv; 11182 cmd->lp_advertising = tp->link_config.rmt_adv;
@@ -11406,9 +11694,9 @@ static int tg3_set_channels(struct net_device *dev,
11406 11694
11407 tg3_stop(tp); 11695 tg3_stop(tp);
11408 11696
11409 netif_carrier_off(dev); 11697 tg3_carrier_off(tp);
11410 11698
11411 tg3_start(tp, true, false); 11699 tg3_start(tp, true, false, false);
11412 11700
11413 return 0; 11701 return 0;
11414} 11702}
@@ -11755,7 +12043,7 @@ static int tg3_test_link(struct tg3 *tp)
11755 max = TG3_COPPER_TIMEOUT_SEC; 12043 max = TG3_COPPER_TIMEOUT_SEC;
11756 12044
11757 for (i = 0; i < max; i++) { 12045 for (i = 0; i < max; i++) {
11758 if (netif_carrier_ok(tp->dev)) 12046 if (tp->link_up)
11759 return 0; 12047 return 0;
11760 12048
11761 if (msleep_interruptible(1000)) 12049 if (msleep_interruptible(1000))
@@ -12326,19 +12614,19 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12326 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; 12614 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12327 12615
12328 if (!netif_running(tp->dev)) { 12616 if (!netif_running(tp->dev)) {
12329 data[0] = TG3_LOOPBACK_FAILED; 12617 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12330 data[1] = TG3_LOOPBACK_FAILED; 12618 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12331 if (do_extlpbk) 12619 if (do_extlpbk)
12332 data[2] = TG3_LOOPBACK_FAILED; 12620 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12333 goto done; 12621 goto done;
12334 } 12622 }
12335 12623
12336 err = tg3_reset_hw(tp, 1); 12624 err = tg3_reset_hw(tp, 1);
12337 if (err) { 12625 if (err) {
12338 data[0] = TG3_LOOPBACK_FAILED; 12626 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12339 data[1] = TG3_LOOPBACK_FAILED; 12627 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12340 if (do_extlpbk) 12628 if (do_extlpbk)
12341 data[2] = TG3_LOOPBACK_FAILED; 12629 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12342 goto done; 12630 goto done;
12343 } 12631 }
12344 12632
@@ -12361,11 +12649,11 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12361 tg3_mac_loopback(tp, true); 12649 tg3_mac_loopback(tp, true);
12362 12650
12363 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 12651 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12364 data[0] |= TG3_STD_LOOPBACK_FAILED; 12652 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12365 12653
12366 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 12654 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12367 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 12655 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12368 data[0] |= TG3_JMB_LOOPBACK_FAILED; 12656 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12369 12657
12370 tg3_mac_loopback(tp, false); 12658 tg3_mac_loopback(tp, false);
12371 } 12659 }
@@ -12384,13 +12672,13 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12384 } 12672 }
12385 12673
12386 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 12674 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12387 data[1] |= TG3_STD_LOOPBACK_FAILED; 12675 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12388 if (tg3_flag(tp, TSO_CAPABLE) && 12676 if (tg3_flag(tp, TSO_CAPABLE) &&
12389 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 12677 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12390 data[1] |= TG3_TSO_LOOPBACK_FAILED; 12678 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12391 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 12679 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12392 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 12680 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12393 data[1] |= TG3_JMB_LOOPBACK_FAILED; 12681 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12394 12682
12395 if (do_extlpbk) { 12683 if (do_extlpbk) {
12396 tg3_phy_lpbk_set(tp, 0, true); 12684 tg3_phy_lpbk_set(tp, 0, true);
@@ -12402,13 +12690,16 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12402 mdelay(40); 12690 mdelay(40);
12403 12691
12404 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false)) 12692 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12405 data[2] |= TG3_STD_LOOPBACK_FAILED; 12693 data[TG3_EXT_LOOPB_TEST] |=
12694 TG3_STD_LOOPBACK_FAILED;
12406 if (tg3_flag(tp, TSO_CAPABLE) && 12695 if (tg3_flag(tp, TSO_CAPABLE) &&
12407 tg3_run_loopback(tp, ETH_FRAME_LEN, true)) 12696 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12408 data[2] |= TG3_TSO_LOOPBACK_FAILED; 12697 data[TG3_EXT_LOOPB_TEST] |=
12698 TG3_TSO_LOOPBACK_FAILED;
12409 if (tg3_flag(tp, JUMBO_RING_ENABLE) && 12699 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12410 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false)) 12700 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12411 data[2] |= TG3_JMB_LOOPBACK_FAILED; 12701 data[TG3_EXT_LOOPB_TEST] |=
12702 TG3_JMB_LOOPBACK_FAILED;
12412 } 12703 }
12413 12704
12414 /* Re-enable gphy autopowerdown. */ 12705 /* Re-enable gphy autopowerdown. */
@@ -12416,7 +12707,8 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12416 tg3_phy_toggle_apd(tp, true); 12707 tg3_phy_toggle_apd(tp, true);
12417 } 12708 }
12418 12709
12419 err = (data[0] | data[1] | data[2]) ? -EIO : 0; 12710 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12711 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12420 12712
12421done: 12713done:
12422 tp->phy_flags |= eee_cap; 12714 tp->phy_flags |= eee_cap;
@@ -12441,11 +12733,11 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12441 12733
12442 if (tg3_test_nvram(tp) != 0) { 12734 if (tg3_test_nvram(tp) != 0) {
12443 etest->flags |= ETH_TEST_FL_FAILED; 12735 etest->flags |= ETH_TEST_FL_FAILED;
12444 data[0] = 1; 12736 data[TG3_NVRAM_TEST] = 1;
12445 } 12737 }
12446 if (!doextlpbk && tg3_test_link(tp)) { 12738 if (!doextlpbk && tg3_test_link(tp)) {
12447 etest->flags |= ETH_TEST_FL_FAILED; 12739 etest->flags |= ETH_TEST_FL_FAILED;
12448 data[1] = 1; 12740 data[TG3_LINK_TEST] = 1;
12449 } 12741 }
12450 if (etest->flags & ETH_TEST_FL_OFFLINE) { 12742 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12451 int err, err2 = 0, irq_sync = 0; 12743 int err, err2 = 0, irq_sync = 0;
@@ -12457,7 +12749,6 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12457 } 12749 }
12458 12750
12459 tg3_full_lock(tp, irq_sync); 12751 tg3_full_lock(tp, irq_sync);
12460
12461 tg3_halt(tp, RESET_KIND_SUSPEND, 1); 12752 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12462 err = tg3_nvram_lock(tp); 12753 err = tg3_nvram_lock(tp);
12463 tg3_halt_cpu(tp, RX_CPU_BASE); 12754 tg3_halt_cpu(tp, RX_CPU_BASE);
@@ -12471,25 +12762,25 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12471 12762
12472 if (tg3_test_registers(tp) != 0) { 12763 if (tg3_test_registers(tp) != 0) {
12473 etest->flags |= ETH_TEST_FL_FAILED; 12764 etest->flags |= ETH_TEST_FL_FAILED;
12474 data[2] = 1; 12765 data[TG3_REGISTER_TEST] = 1;
12475 } 12766 }
12476 12767
12477 if (tg3_test_memory(tp) != 0) { 12768 if (tg3_test_memory(tp) != 0) {
12478 etest->flags |= ETH_TEST_FL_FAILED; 12769 etest->flags |= ETH_TEST_FL_FAILED;
12479 data[3] = 1; 12770 data[TG3_MEMORY_TEST] = 1;
12480 } 12771 }
12481 12772
12482 if (doextlpbk) 12773 if (doextlpbk)
12483 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; 12774 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12484 12775
12485 if (tg3_test_loopback(tp, &data[4], doextlpbk)) 12776 if (tg3_test_loopback(tp, data, doextlpbk))
12486 etest->flags |= ETH_TEST_FL_FAILED; 12777 etest->flags |= ETH_TEST_FL_FAILED;
12487 12778
12488 tg3_full_unlock(tp); 12779 tg3_full_unlock(tp);
12489 12780
12490 if (tg3_test_interrupt(tp) != 0) { 12781 if (tg3_test_interrupt(tp) != 0) {
12491 etest->flags |= ETH_TEST_FL_FAILED; 12782 etest->flags |= ETH_TEST_FL_FAILED;
12492 data[7] = 1; 12783 data[TG3_INTERRUPT_TEST] = 1;
12493 } 12784 }
12494 12785
12495 tg3_full_lock(tp, 0); 12786 tg3_full_lock(tp, 0);
@@ -12512,6 +12803,96 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12512 12803
12513} 12804}
12514 12805
12806static int tg3_hwtstamp_ioctl(struct net_device *dev,
12807 struct ifreq *ifr, int cmd)
12808{
12809 struct tg3 *tp = netdev_priv(dev);
12810 struct hwtstamp_config stmpconf;
12811
12812 if (!tg3_flag(tp, PTP_CAPABLE))
12813 return -EINVAL;
12814
12815 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12816 return -EFAULT;
12817
12818 if (stmpconf.flags)
12819 return -EINVAL;
12820
12821 switch (stmpconf.tx_type) {
12822 case HWTSTAMP_TX_ON:
12823 tg3_flag_set(tp, TX_TSTAMP_EN);
12824 break;
12825 case HWTSTAMP_TX_OFF:
12826 tg3_flag_clear(tp, TX_TSTAMP_EN);
12827 break;
12828 default:
12829 return -ERANGE;
12830 }
12831
12832 switch (stmpconf.rx_filter) {
12833 case HWTSTAMP_FILTER_NONE:
12834 tp->rxptpctl = 0;
12835 break;
12836 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12837 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12838 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12839 break;
12840 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12841 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12842 TG3_RX_PTP_CTL_SYNC_EVNT;
12843 break;
12844 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12845 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12846 TG3_RX_PTP_CTL_DELAY_REQ;
12847 break;
12848 case HWTSTAMP_FILTER_PTP_V2_EVENT:
12849 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12850 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12851 break;
12852 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12853 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12854 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12855 break;
12856 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12857 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12858 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12859 break;
12860 case HWTSTAMP_FILTER_PTP_V2_SYNC:
12861 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12862 TG3_RX_PTP_CTL_SYNC_EVNT;
12863 break;
12864 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12865 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12866 TG3_RX_PTP_CTL_SYNC_EVNT;
12867 break;
12868 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12869 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12870 TG3_RX_PTP_CTL_SYNC_EVNT;
12871 break;
12872 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12873 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12874 TG3_RX_PTP_CTL_DELAY_REQ;
12875 break;
12876 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12877 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12878 TG3_RX_PTP_CTL_DELAY_REQ;
12879 break;
12880 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12881 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12882 TG3_RX_PTP_CTL_DELAY_REQ;
12883 break;
12884 default:
12885 return -ERANGE;
12886 }
12887
12888 if (netif_running(dev) && tp->rxptpctl)
12889 tw32(TG3_RX_PTP_CTL,
12890 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12891
12892 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12893 -EFAULT : 0;
12894}
12895
12515static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 12896static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12516{ 12897{
12517 struct mii_ioctl_data *data = if_mii(ifr); 12898 struct mii_ioctl_data *data = if_mii(ifr);
@@ -12562,6 +12943,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12562 12943
12563 return err; 12944 return err;
12564 12945
12946 case SIOCSHWTSTAMP:
12947 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12948
12565 default: 12949 default:
12566 /* do nothing */ 12950 /* do nothing */
12567 break; 12951 break;
@@ -12663,7 +13047,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
12663 .set_rxfh_indir = tg3_set_rxfh_indir, 13047 .set_rxfh_indir = tg3_set_rxfh_indir,
12664 .get_channels = tg3_get_channels, 13048 .get_channels = tg3_get_channels,
12665 .set_channels = tg3_set_channels, 13049 .set_channels = tg3_set_channels,
12666 .get_ts_info = ethtool_op_get_ts_info, 13050 .get_ts_info = tg3_get_ts_info,
12667}; 13051};
12668 13052
12669static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, 13053static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
@@ -12779,7 +13163,7 @@ static const struct net_device_ops tg3_netdev_ops = {
12779#endif 13163#endif
12780}; 13164};
12781 13165
12782static void __devinit tg3_get_eeprom_size(struct tg3 *tp) 13166static void tg3_get_eeprom_size(struct tg3 *tp)
12783{ 13167{
12784 u32 cursize, val, magic; 13168 u32 cursize, val, magic;
12785 13169
@@ -12813,7 +13197,7 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12813 tp->nvram_size = cursize; 13197 tp->nvram_size = cursize;
12814} 13198}
12815 13199
12816static void __devinit tg3_get_nvram_size(struct tg3 *tp) 13200static void tg3_get_nvram_size(struct tg3 *tp)
12817{ 13201{
12818 u32 val; 13202 u32 val;
12819 13203
@@ -12846,7 +13230,7 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12846 tp->nvram_size = TG3_NVRAM_SIZE_512KB; 13230 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12847} 13231}
12848 13232
12849static void __devinit tg3_get_nvram_info(struct tg3 *tp) 13233static void tg3_get_nvram_info(struct tg3 *tp)
12850{ 13234{
12851 u32 nvcfg1; 13235 u32 nvcfg1;
12852 13236
@@ -12897,7 +13281,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12897 } 13281 }
12898} 13282}
12899 13283
12900static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) 13284static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12901{ 13285{
12902 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { 13286 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12903 case FLASH_5752PAGE_SIZE_256: 13287 case FLASH_5752PAGE_SIZE_256:
@@ -12924,7 +13308,7 @@ static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12924 } 13308 }
12925} 13309}
12926 13310
12927static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) 13311static void tg3_get_5752_nvram_info(struct tg3 *tp)
12928{ 13312{
12929 u32 nvcfg1; 13313 u32 nvcfg1;
12930 13314
@@ -12965,7 +13349,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12965 } 13349 }
12966} 13350}
12967 13351
12968static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) 13352static void tg3_get_5755_nvram_info(struct tg3 *tp)
12969{ 13353{
12970 u32 nvcfg1, protect = 0; 13354 u32 nvcfg1, protect = 0;
12971 13355
@@ -13021,7 +13405,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
13021 } 13405 }
13022} 13406}
13023 13407
13024static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) 13408static void tg3_get_5787_nvram_info(struct tg3 *tp)
13025{ 13409{
13026 u32 nvcfg1; 13410 u32 nvcfg1;
13027 13411
@@ -13059,7 +13443,7 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
13059 } 13443 }
13060} 13444}
13061 13445
13062static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) 13446static void tg3_get_5761_nvram_info(struct tg3 *tp)
13063{ 13447{
13064 u32 nvcfg1, protect = 0; 13448 u32 nvcfg1, protect = 0;
13065 13449
@@ -13134,14 +13518,14 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
13134 } 13518 }
13135} 13519}
13136 13520
13137static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) 13521static void tg3_get_5906_nvram_info(struct tg3 *tp)
13138{ 13522{
13139 tp->nvram_jedecnum = JEDEC_ATMEL; 13523 tp->nvram_jedecnum = JEDEC_ATMEL;
13140 tg3_flag_set(tp, NVRAM_BUFFERED); 13524 tg3_flag_set(tp, NVRAM_BUFFERED);
13141 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; 13525 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13142} 13526}
13143 13527
13144static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) 13528static void tg3_get_57780_nvram_info(struct tg3 *tp)
13145{ 13529{
13146 u32 nvcfg1; 13530 u32 nvcfg1;
13147 13531
@@ -13214,7 +13598,7 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
13214} 13598}
13215 13599
13216 13600
13217static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) 13601static void tg3_get_5717_nvram_info(struct tg3 *tp)
13218{ 13602{
13219 u32 nvcfg1; 13603 u32 nvcfg1;
13220 13604
@@ -13292,7 +13676,7 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13292 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); 13676 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13293} 13677}
13294 13678
13295static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) 13679static void tg3_get_5720_nvram_info(struct tg3 *tp)
13296{ 13680{
13297 u32 nvcfg1, nvmpinstrp; 13681 u32 nvcfg1, nvmpinstrp;
13298 13682
@@ -13405,7 +13789,7 @@ static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13405} 13789}
13406 13790
13407/* Chips other than 5700/5701 use the NVRAM for fetching info. */ 13791/* Chips other than 5700/5701 use the NVRAM for fetching info. */
13408static void __devinit tg3_nvram_init(struct tg3 *tp) 13792static void tg3_nvram_init(struct tg3 *tp)
13409{ 13793{
13410 tw32_f(GRC_EEPROM_ADDR, 13794 tw32_f(GRC_EEPROM_ADDR,
13411 (EEPROM_ADDR_FSM_RESET | 13795 (EEPROM_ADDR_FSM_RESET |
@@ -13475,7 +13859,7 @@ struct subsys_tbl_ent {
13475 u32 phy_id; 13859 u32 phy_id;
13476}; 13860};
13477 13861
13478static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = { 13862static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13479 /* Broadcom boards. */ 13863 /* Broadcom boards. */
13480 { TG3PCI_SUBVENDOR_ID_BROADCOM, 13864 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13481 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 }, 13865 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
@@ -13539,7 +13923,7 @@ static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13539 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 } 13923 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13540}; 13924};
13541 13925
13542static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp) 13926static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
13543{ 13927{
13544 int i; 13928 int i;
13545 13929
@@ -13553,7 +13937,7 @@ static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13553 return NULL; 13937 return NULL;
13554} 13938}
13555 13939
13556static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) 13940static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13557{ 13941{
13558 u32 val; 13942 u32 val;
13559 13943
@@ -13753,7 +14137,7 @@ done:
13753 device_set_wakeup_capable(&tp->pdev->dev, false); 14137 device_set_wakeup_capable(&tp->pdev->dev, false);
13754} 14138}
13755 14139
13756static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) 14140static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13757{ 14141{
13758 int i; 14142 int i;
13759 u32 val; 14143 u32 val;
@@ -13776,7 +14160,7 @@ static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13776 * configuration is a 32-bit value that straddles the alignment boundary. 14160 * configuration is a 32-bit value that straddles the alignment boundary.
13777 * We do two 32-bit reads and then shift and merge the results. 14161 * We do two 32-bit reads and then shift and merge the results.
13778 */ 14162 */
13779static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) 14163static u32 tg3_read_otp_phycfg(struct tg3 *tp)
13780{ 14164{
13781 u32 bhalf_otp, thalf_otp; 14165 u32 bhalf_otp, thalf_otp;
13782 14166
@@ -13802,7 +14186,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13802 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); 14186 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13803} 14187}
13804 14188
13805static void __devinit tg3_phy_init_link_config(struct tg3 *tp) 14189static void tg3_phy_init_link_config(struct tg3 *tp)
13806{ 14190{
13807 u32 adv = ADVERTISED_Autoneg; 14191 u32 adv = ADVERTISED_Autoneg;
13808 14192
@@ -13829,7 +14213,7 @@ static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13829 tp->old_link = -1; 14213 tp->old_link = -1;
13830} 14214}
13831 14215
13832static int __devinit tg3_phy_probe(struct tg3 *tp) 14216static int tg3_phy_probe(struct tg3 *tp)
13833{ 14217{
13834 u32 hw_phy_id_1, hw_phy_id_2; 14218 u32 hw_phy_id_1, hw_phy_id_2;
13835 u32 hw_phy_id, hw_phy_id_masked; 14219 u32 hw_phy_id, hw_phy_id_masked;
@@ -13957,7 +14341,7 @@ skip_phy_reset:
13957 return err; 14341 return err;
13958} 14342}
13959 14343
13960static void __devinit tg3_read_vpd(struct tg3 *tp) 14344static void tg3_read_vpd(struct tg3 *tp)
13961{ 14345{
13962 u8 *vpd_data; 14346 u8 *vpd_data;
13963 unsigned int block_end, rosize, len; 14347 unsigned int block_end, rosize, len;
@@ -14026,7 +14410,8 @@ out_not_found:
14026 14410
14027out_no_vpd: 14411out_no_vpd:
14028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 14412 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14029 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717) 14413 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14414 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14030 strcpy(tp->board_part_number, "BCM5717"); 14415 strcpy(tp->board_part_number, "BCM5717");
14031 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718) 14416 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14032 strcpy(tp->board_part_number, "BCM5718"); 14417 strcpy(tp->board_part_number, "BCM5718");
@@ -14077,7 +14462,7 @@ nomatch:
14077 } 14462 }
14078} 14463}
14079 14464
14080static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset) 14465static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14081{ 14466{
14082 u32 val; 14467 u32 val;
14083 14468
@@ -14090,7 +14475,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14090 return 1; 14475 return 1;
14091} 14476}
14092 14477
14093static void __devinit tg3_read_bc_ver(struct tg3 *tp) 14478static void tg3_read_bc_ver(struct tg3 *tp)
14094{ 14479{
14095 u32 val, offset, start, ver_offset; 14480 u32 val, offset, start, ver_offset;
14096 int i, dst_off; 14481 int i, dst_off;
@@ -14142,7 +14527,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
14142 } 14527 }
14143} 14528}
14144 14529
14145static void __devinit tg3_read_hwsb_ver(struct tg3 *tp) 14530static void tg3_read_hwsb_ver(struct tg3 *tp)
14146{ 14531{
14147 u32 val, major, minor; 14532 u32 val, major, minor;
14148 14533
@@ -14158,7 +14543,7 @@ static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
14158 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor); 14543 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14159} 14544}
14160 14545
14161static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val) 14546static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14162{ 14547{
14163 u32 offset, major, minor, build; 14548 u32 offset, major, minor, build;
14164 14549
@@ -14213,7 +14598,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
14213 } 14598 }
14214} 14599}
14215 14600
14216static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp) 14601static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14217{ 14602{
14218 u32 val, offset, start; 14603 u32 val, offset, start;
14219 int i, vlen; 14604 int i, vlen;
@@ -14265,7 +14650,7 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14265 } 14650 }
14266} 14651}
14267 14652
14268static void __devinit tg3_probe_ncsi(struct tg3 *tp) 14653static void tg3_probe_ncsi(struct tg3 *tp)
14269{ 14654{
14270 u32 apedata; 14655 u32 apedata;
14271 14656
@@ -14281,7 +14666,7 @@ static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14281 tg3_flag_set(tp, APE_HAS_NCSI); 14666 tg3_flag_set(tp, APE_HAS_NCSI);
14282} 14667}
14283 14668
14284static void __devinit tg3_read_dash_ver(struct tg3 *tp) 14669static void tg3_read_dash_ver(struct tg3 *tp)
14285{ 14670{
14286 int vlen; 14671 int vlen;
14287 u32 apedata; 14672 u32 apedata;
@@ -14304,7 +14689,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14304 (apedata & APE_FW_VERSION_BLDMSK)); 14689 (apedata & APE_FW_VERSION_BLDMSK));
14305} 14690}
14306 14691
14307static void __devinit tg3_read_fw_ver(struct tg3 *tp) 14692static void tg3_read_fw_ver(struct tg3 *tp)
14308{ 14693{
14309 u32 val; 14694 u32 val;
14310 bool vpd_vers = false; 14695 bool vpd_vers = false;
@@ -14357,7 +14742,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14357 { }, 14742 { },
14358}; 14743};
14359 14744
14360static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) 14745static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14361{ 14746{
14362 struct pci_dev *peer; 14747 struct pci_dev *peer;
14363 unsigned int func, devnr = tp->pdev->devfn & ~7; 14748 unsigned int func, devnr = tp->pdev->devfn & ~7;
@@ -14385,7 +14770,7 @@ static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14385 return peer; 14770 return peer;
14386} 14771}
14387 14772
14388static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) 14773static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14389{ 14774{
14390 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; 14775 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14391 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { 14776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
@@ -14397,6 +14782,7 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14397 tg3_flag_set(tp, CPMU_PRESENT); 14782 tg3_flag_set(tp, CPMU_PRESENT);
14398 14783
14399 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 14784 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14785 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 14786 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14401 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 14787 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14402 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) 14788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
@@ -14424,6 +14810,9 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14424 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) 14810 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14425 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; 14811 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14426 14812
14813 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14814 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14815
14427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 14816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14428 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || 14817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) 14818 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
@@ -14462,7 +14851,29 @@ static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14462 tg3_flag_set(tp, 5705_PLUS); 14851 tg3_flag_set(tp, 5705_PLUS);
14463} 14852}
14464 14853
14465static int __devinit tg3_get_invariants(struct tg3 *tp) 14854static bool tg3_10_100_only_device(struct tg3 *tp,
14855 const struct pci_device_id *ent)
14856{
14857 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14858
14859 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14860 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14861 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14862 return true;
14863
14864 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14865 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14866 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14867 return true;
14868 } else {
14869 return true;
14870 }
14871 }
14872
14873 return false;
14874}
14875
14876static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
14466{ 14877{
14467 u32 misc_ctrl_reg; 14878 u32 misc_ctrl_reg;
14468 u32 pci_state_reg, grc_misc_cfg; 14879 u32 pci_state_reg, grc_misc_cfg;
@@ -15141,22 +15552,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
15141 else 15552 else
15142 tp->mac_mode = 0; 15553 tp->mac_mode = 0;
15143 15554
15144 /* these are limited to 10/100 only */ 15555 if (tg3_10_100_only_device(tp, ent))
15145 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15146 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15147 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15148 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15149 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
15150 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
15151 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
15152 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
15153 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
15154 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
15155 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
15156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
15157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15159 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15160 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY; 15556 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15161 15557
15162 err = tg3_phy_probe(tp); 15558 err = tg3_phy_probe(tp);
@@ -15236,7 +15632,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
15236} 15632}
15237 15633
15238#ifdef CONFIG_SPARC 15634#ifdef CONFIG_SPARC
15239static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp) 15635static int tg3_get_macaddr_sparc(struct tg3 *tp)
15240{ 15636{
15241 struct net_device *dev = tp->dev; 15637 struct net_device *dev = tp->dev;
15242 struct pci_dev *pdev = tp->pdev; 15638 struct pci_dev *pdev = tp->pdev;
@@ -15253,7 +15649,7 @@ static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15253 return -ENODEV; 15649 return -ENODEV;
15254} 15650}
15255 15651
15256static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp) 15652static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15257{ 15653{
15258 struct net_device *dev = tp->dev; 15654 struct net_device *dev = tp->dev;
15259 15655
@@ -15263,7 +15659,7 @@ static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15263} 15659}
15264#endif 15660#endif
15265 15661
15266static int __devinit tg3_get_device_address(struct tg3 *tp) 15662static int tg3_get_device_address(struct tg3 *tp)
15267{ 15663{
15268 struct net_device *dev = tp->dev; 15664 struct net_device *dev = tp->dev;
15269 u32 hi, lo, mac_offset; 15665 u32 hi, lo, mac_offset;
@@ -15342,7 +15738,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
15342#define BOUNDARY_SINGLE_CACHELINE 1 15738#define BOUNDARY_SINGLE_CACHELINE 1
15343#define BOUNDARY_MULTI_CACHELINE 2 15739#define BOUNDARY_MULTI_CACHELINE 2
15344 15740
15345static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val) 15741static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15346{ 15742{
15347 int cacheline_size; 15743 int cacheline_size;
15348 u8 byte; 15744 u8 byte;
@@ -15483,7 +15879,8 @@ out:
15483 return val; 15879 return val;
15484} 15880}
15485 15881
15486static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device) 15882static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
15883 int size, int to_device)
15487{ 15884{
15488 struct tg3_internal_buffer_desc test_desc; 15885 struct tg3_internal_buffer_desc test_desc;
15489 u32 sram_dma_descs; 15886 u32 sram_dma_descs;
@@ -15570,7 +15967,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15570 { }, 15967 { },
15571}; 15968};
15572 15969
15573static int __devinit tg3_test_dma(struct tg3 *tp) 15970static int tg3_test_dma(struct tg3 *tp)
15574{ 15971{
15575 dma_addr_t buf_dma; 15972 dma_addr_t buf_dma;
15576 u32 *buf, saved_dma_rwctrl; 15973 u32 *buf, saved_dma_rwctrl;
@@ -15760,7 +16157,7 @@ out_nofree:
15760 return ret; 16157 return ret;
15761} 16158}
15762 16159
15763static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) 16160static void tg3_init_bufmgr_config(struct tg3 *tp)
15764{ 16161{
15765 if (tg3_flag(tp, 57765_PLUS)) { 16162 if (tg3_flag(tp, 57765_PLUS)) {
15766 tp->bufmgr_config.mbuf_read_dma_low_water = 16163 tp->bufmgr_config.mbuf_read_dma_low_water =
@@ -15816,7 +16213,7 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15816 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER; 16213 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15817} 16214}
15818 16215
15819static char * __devinit tg3_phy_string(struct tg3 *tp) 16216static char *tg3_phy_string(struct tg3 *tp)
15820{ 16217{
15821 switch (tp->phy_id & TG3_PHY_ID_MASK) { 16218 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15822 case TG3_PHY_ID_BCM5400: return "5400"; 16219 case TG3_PHY_ID_BCM5400: return "5400";
@@ -15847,7 +16244,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
15847 } 16244 }
15848} 16245}
15849 16246
15850static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) 16247static char *tg3_bus_string(struct tg3 *tp, char *str)
15851{ 16248{
15852 if (tg3_flag(tp, PCI_EXPRESS)) { 16249 if (tg3_flag(tp, PCI_EXPRESS)) {
15853 strcpy(str, "PCI Express"); 16250 strcpy(str, "PCI Express");
@@ -15883,7 +16280,7 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15883 return str; 16280 return str;
15884} 16281}
15885 16282
15886static void __devinit tg3_init_coal(struct tg3 *tp) 16283static void tg3_init_coal(struct tg3 *tp)
15887{ 16284{
15888 struct ethtool_coalesce *ec = &tp->coal; 16285 struct ethtool_coalesce *ec = &tp->coal;
15889 16286
@@ -15914,7 +16311,7 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
15914 } 16311 }
15915} 16312}
15916 16313
15917static int __devinit tg3_init_one(struct pci_dev *pdev, 16314static int tg3_init_one(struct pci_dev *pdev,
15918 const struct pci_device_id *ent) 16315 const struct pci_device_id *ent)
15919{ 16316{
15920 struct net_device *dev; 16317 struct net_device *dev;
@@ -16013,6 +16410,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
16013 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S || 16410 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16014 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE || 16411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16015 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || 16412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16413 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16016 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 16414 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16017 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || 16415 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16018 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) { 16416 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
@@ -16034,7 +16432,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
16034 dev->netdev_ops = &tg3_netdev_ops; 16432 dev->netdev_ops = &tg3_netdev_ops;
16035 dev->irq = pdev->irq; 16433 dev->irq = pdev->irq;
16036 16434
16037 err = tg3_get_invariants(tp); 16435 err = tg3_get_invariants(tp, ent);
16038 if (err) { 16436 if (err) {
16039 dev_err(&pdev->dev, 16437 dev_err(&pdev->dev,
16040 "Problem fetching invariants of chip, aborting\n"); 16438 "Problem fetching invariants of chip, aborting\n");
@@ -16209,6 +16607,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
16209 16607
16210 pci_set_drvdata(pdev, dev); 16608 pci_set_drvdata(pdev, dev);
16211 16609
16610 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
16612 tg3_flag_set(tp, PTP_CAPABLE);
16613
16212 if (tg3_flag(tp, 5717_PLUS)) { 16614 if (tg3_flag(tp, 5717_PLUS)) {
16213 /* Resume a low-power mode */ 16615 /* Resume a low-power mode */
16214 tg3_frob_aux_power(tp, false); 16616 tg3_frob_aux_power(tp, false);
@@ -16293,7 +16695,7 @@ err_out_disable_pdev:
16293 return err; 16695 return err;
16294} 16696}
16295 16697
16296static void __devexit tg3_remove_one(struct pci_dev *pdev) 16698static void tg3_remove_one(struct pci_dev *pdev)
16297{ 16699{
16298 struct net_device *dev = pci_get_drvdata(pdev); 16700 struct net_device *dev = pci_get_drvdata(pdev);
16299 16701
@@ -16534,8 +16936,8 @@ static void tg3_io_resume(struct pci_dev *pdev)
16534 tg3_full_lock(tp, 0); 16936 tg3_full_lock(tp, 0);
16535 tg3_flag_set(tp, INIT_COMPLETE); 16937 tg3_flag_set(tp, INIT_COMPLETE);
16536 err = tg3_restart_hw(tp, 1); 16938 err = tg3_restart_hw(tp, 1);
16537 tg3_full_unlock(tp);
16538 if (err) { 16939 if (err) {
16940 tg3_full_unlock(tp);
16539 netdev_err(netdev, "Cannot restart hardware after reset.\n"); 16941 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16540 goto done; 16942 goto done;
16541 } 16943 }
@@ -16546,6 +16948,8 @@ static void tg3_io_resume(struct pci_dev *pdev)
16546 16948
16547 tg3_netif_start(tp); 16949 tg3_netif_start(tp);
16548 16950
16951 tg3_full_unlock(tp);
16952
16549 tg3_phy_start(tp); 16953 tg3_phy_start(tp);
16550 16954
16551done: 16955done:
@@ -16562,7 +16966,7 @@ static struct pci_driver tg3_driver = {
16562 .name = DRV_MODULE_NAME, 16966 .name = DRV_MODULE_NAME,
16563 .id_table = tg3_pci_tbl, 16967 .id_table = tg3_pci_tbl,
16564 .probe = tg3_init_one, 16968 .probe = tg3_init_one,
16565 .remove = __devexit_p(tg3_remove_one), 16969 .remove = tg3_remove_one,
16566 .err_handler = &tg3_err_handler, 16970 .err_handler = &tg3_err_handler,
16567 .driver.pm = TG3_PM_OPS, 16971 .driver.pm = TG3_PM_OPS,
16568}; 16972};
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d9308c32102e..d330e81f5793 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -44,12 +44,14 @@
44#define TG3PCI_DEVICE_TIGON3_5761S 0x1688 44#define TG3PCI_DEVICE_TIGON3_5761S 0x1688
45#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689 45#define TG3PCI_DEVICE_TIGON3_5761SE 0x1689
46#define TG3PCI_DEVICE_TIGON3_57780 0x1692 46#define TG3PCI_DEVICE_TIGON3_57780 0x1692
47#define TG3PCI_DEVICE_TIGON3_5787M 0x1693
47#define TG3PCI_DEVICE_TIGON3_57760 0x1690 48#define TG3PCI_DEVICE_TIGON3_57760 0x1690
48#define TG3PCI_DEVICE_TIGON3_57790 0x1694 49#define TG3PCI_DEVICE_TIGON3_57790 0x1694
49#define TG3PCI_DEVICE_TIGON3_57788 0x1691 50#define TG3PCI_DEVICE_TIGON3_57788 0x1691
50#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ 51#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
51#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ 52#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
52#define TG3PCI_DEVICE_TIGON3_5717 0x1655 53#define TG3PCI_DEVICE_TIGON3_5717 0x1655
54#define TG3PCI_DEVICE_TIGON3_5717_C 0x1665
53#define TG3PCI_DEVICE_TIGON3_5718 0x1656 55#define TG3PCI_DEVICE_TIGON3_5718 0x1656
54#define TG3PCI_DEVICE_TIGON3_57781 0x16b1 56#define TG3PCI_DEVICE_TIGON3_57781 0x16b1
55#define TG3PCI_DEVICE_TIGON3_57785 0x16b5 57#define TG3PCI_DEVICE_TIGON3_57785 0x16b5
@@ -95,6 +97,10 @@
95#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099 97#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099
96#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM 98#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM
97#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281 99#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281
100#define TG3PCI_SUBDEVICE_ID_ACER_57780_A 0x0601
101#define TG3PCI_SUBDEVICE_ID_ACER_57780_B 0x0612
102#define TG3PCI_SUBDEVICE_ID_LENOVO_5787M 0x3056
103
98/* 0x30 --> 0x64 unused */ 104/* 0x30 --> 0x64 unused */
99#define TG3PCI_MSI_DATA 0x00000064 105#define TG3PCI_MSI_DATA 0x00000064
100/* 0x66 --> 0x68 unused */ 106/* 0x66 --> 0x68 unused */
@@ -149,6 +155,7 @@
149#define CHIPREV_ID_57780_A0 0x57780000 155#define CHIPREV_ID_57780_A0 0x57780000
150#define CHIPREV_ID_57780_A1 0x57780001 156#define CHIPREV_ID_57780_A1 0x57780001
151#define CHIPREV_ID_5717_A0 0x05717000 157#define CHIPREV_ID_5717_A0 0x05717000
158#define CHIPREV_ID_5717_C0 0x05717200
152#define CHIPREV_ID_57765_A0 0x57785000 159#define CHIPREV_ID_57765_A0 0x57785000
153#define CHIPREV_ID_5719_A0 0x05719000 160#define CHIPREV_ID_5719_A0 0x05719000
154#define CHIPREV_ID_5720_A0 0x05720000 161#define CHIPREV_ID_5720_A0 0x05720000
@@ -765,7 +772,10 @@
765#define SG_DIG_MAC_ACK_STATUS 0x00000004 772#define SG_DIG_MAC_ACK_STATUS 0x00000004
766#define SG_DIG_AUTONEG_COMPLETE 0x00000002 773#define SG_DIG_AUTONEG_COMPLETE 0x00000002
767#define SG_DIG_AUTONEG_ERROR 0x00000001 774#define SG_DIG_AUTONEG_ERROR 0x00000001
768/* 0x5b8 --> 0x600 unused */ 775#define TG3_TX_TSTAMP_LSB 0x000005c0
776#define TG3_TX_TSTAMP_MSB 0x000005c4
777#define TG3_TSTAMP_MASK 0x7fffffffffffffff
778/* 0x5c8 --> 0x600 unused */
769#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */ 779#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */
770#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */ 780#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */
771/* 0x624 --> 0x670 unused */ 781/* 0x624 --> 0x670 unused */
@@ -782,7 +792,36 @@
782#define MAC_RSS_HASH_KEY_7 0x0000068c 792#define MAC_RSS_HASH_KEY_7 0x0000068c
783#define MAC_RSS_HASH_KEY_8 0x00000690 793#define MAC_RSS_HASH_KEY_8 0x00000690
784#define MAC_RSS_HASH_KEY_9 0x00000694 794#define MAC_RSS_HASH_KEY_9 0x00000694
785/* 0x698 --> 0x800 unused */ 795/* 0x698 --> 0x6b0 unused */
796
797#define TG3_RX_TSTAMP_LSB 0x000006b0
798#define TG3_RX_TSTAMP_MSB 0x000006b4
799/* 0x6b8 --> 0x6c8 unused */
800
801#define TG3_RX_PTP_CTL 0x000006c8
802#define TG3_RX_PTP_CTL_SYNC_EVNT 0x00000001
803#define TG3_RX_PTP_CTL_DELAY_REQ 0x00000002
804#define TG3_RX_PTP_CTL_PDLAY_REQ 0x00000004
805#define TG3_RX_PTP_CTL_PDLAY_RES 0x00000008
806#define TG3_RX_PTP_CTL_ALL_V1_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \
807 TG3_RX_PTP_CTL_DELAY_REQ)
808#define TG3_RX_PTP_CTL_ALL_V2_EVENTS (TG3_RX_PTP_CTL_SYNC_EVNT | \
809 TG3_RX_PTP_CTL_DELAY_REQ | \
810 TG3_RX_PTP_CTL_PDLAY_REQ | \
811 TG3_RX_PTP_CTL_PDLAY_RES)
812#define TG3_RX_PTP_CTL_FOLLOW_UP 0x00000100
813#define TG3_RX_PTP_CTL_DELAY_RES 0x00000200
814#define TG3_RX_PTP_CTL_PDRES_FLW_UP 0x00000400
815#define TG3_RX_PTP_CTL_ANNOUNCE 0x00000800
816#define TG3_RX_PTP_CTL_SIGNALING 0x00001000
817#define TG3_RX_PTP_CTL_MANAGEMENT 0x00002000
818#define TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN 0x00800000
819#define TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN 0x01000000
820#define TG3_RX_PTP_CTL_RX_PTP_V2_EN (TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | \
821 TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN)
822#define TG3_RX_PTP_CTL_RX_PTP_V1_EN 0x02000000
823#define TG3_RX_PTP_CTL_HWTS_INTERLOCK 0x04000000
824/* 0x6cc --> 0x800 unused */
786 825
787#define MAC_TX_STATS_OCTETS 0x00000800 826#define MAC_TX_STATS_OCTETS 0x00000800
788#define MAC_TX_STATS_RESV1 0x00000804 827#define MAC_TX_STATS_RESV1 0x00000804
@@ -1662,6 +1701,7 @@
1662#define GRC_MODE_HOST_STACKUP 0x00010000 1701#define GRC_MODE_HOST_STACKUP 0x00010000
1663#define GRC_MODE_HOST_SENDBDS 0x00020000 1702#define GRC_MODE_HOST_SENDBDS 0x00020000
1664#define GRC_MODE_HTX2B_ENABLE 0x00040000 1703#define GRC_MODE_HTX2B_ENABLE 0x00040000
1704#define GRC_MODE_TIME_SYNC_ENABLE 0x00080000
1665#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000 1705#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
1666#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000 1706#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
1667#define GRC_MODE_PCIE_TL_SEL 0x00000000 1707#define GRC_MODE_PCIE_TL_SEL 0x00000000
@@ -1764,7 +1804,17 @@
1764#define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000 1804#define GRC_VCPU_EXT_CTRL_DISABLE_WOL 0x20000000
1765#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */ 1805#define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */
1766 1806
1767/* 0x6c00 --> 0x7000 unused */ 1807#define TG3_EAV_REF_CLCK_LSB 0x00006900
1808#define TG3_EAV_REF_CLCK_MSB 0x00006904
1809#define TG3_EAV_REF_CLCK_CTL 0x00006908
1810#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002
1811#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004
1812#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928
1813#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31)
1814#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30)
1815
1816#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff
1817/* 0x690c --> 0x7000 unused */
1768 1818
1769/* NVRAM Control registers */ 1819/* NVRAM Control registers */
1770#define NVRAM_CMD 0x00007000 1820#define NVRAM_CMD 0x00007000
@@ -2432,6 +2482,7 @@ struct tg3_tx_buffer_desc {
2432#define TXD_FLAG_IP_FRAG 0x0008 2482#define TXD_FLAG_IP_FRAG 0x0008
2433#define TXD_FLAG_JMB_PKT 0x0008 2483#define TXD_FLAG_JMB_PKT 0x0008
2434#define TXD_FLAG_IP_FRAG_END 0x0010 2484#define TXD_FLAG_IP_FRAG_END 0x0010
2485#define TXD_FLAG_HWTSTAMP 0x0020
2435#define TXD_FLAG_VLAN 0x0040 2486#define TXD_FLAG_VLAN 0x0040
2436#define TXD_FLAG_COAL_NOW 0x0080 2487#define TXD_FLAG_COAL_NOW 0x0080
2437#define TXD_FLAG_CPU_PRE_DMA 0x0100 2488#define TXD_FLAG_CPU_PRE_DMA 0x0100
@@ -2473,6 +2524,9 @@ struct tg3_rx_buffer_desc {
2473#define RXD_FLAG_IP_CSUM 0x1000 2524#define RXD_FLAG_IP_CSUM 0x1000
2474#define RXD_FLAG_TCPUDP_CSUM 0x2000 2525#define RXD_FLAG_TCPUDP_CSUM 0x2000
2475#define RXD_FLAG_IS_TCP 0x4000 2526#define RXD_FLAG_IS_TCP 0x4000
2527#define RXD_FLAG_PTPSTAT_MASK 0x0210
2528#define RXD_FLAG_PTPSTAT_PTPV1 0x0010
2529#define RXD_FLAG_PTPSTAT_PTPV2 0x0200
2476 2530
2477 u32 ip_tcp_csum; 2531 u32 ip_tcp_csum;
2478#define RXD_IPCSUM_MASK 0xffff0000 2532#define RXD_IPCSUM_MASK 0xffff0000
@@ -2963,9 +3017,11 @@ enum TG3_FLAGS {
2963 TG3_FLAG_USE_JUMBO_BDFLAG, 3017 TG3_FLAG_USE_JUMBO_BDFLAG,
2964 TG3_FLAG_L1PLLPD_EN, 3018 TG3_FLAG_L1PLLPD_EN,
2965 TG3_FLAG_APE_HAS_NCSI, 3019 TG3_FLAG_APE_HAS_NCSI,
3020 TG3_FLAG_TX_TSTAMP_EN,
2966 TG3_FLAG_4K_FIFO_LIMIT, 3021 TG3_FLAG_4K_FIFO_LIMIT,
2967 TG3_FLAG_5719_RDMA_BUG, 3022 TG3_FLAG_5719_RDMA_BUG,
2968 TG3_FLAG_RESET_TASK_PENDING, 3023 TG3_FLAG_RESET_TASK_PENDING,
3024 TG3_FLAG_PTP_CAPABLE,
2969 TG3_FLAG_5705_PLUS, 3025 TG3_FLAG_5705_PLUS,
2970 TG3_FLAG_IS_5788, 3026 TG3_FLAG_IS_5788,
2971 TG3_FLAG_5750_PLUS, 3027 TG3_FLAG_5750_PLUS,
@@ -3034,6 +3090,10 @@ struct tg3 {
3034 u32 coal_now; 3090 u32 coal_now;
3035 u32 msg_enable; 3091 u32 msg_enable;
3036 3092
3093 struct ptp_clock_info ptp_info;
3094 struct ptp_clock *ptp_clock;
3095 s64 ptp_adjust;
3096
3037 /* begin "tx thread" cacheline section */ 3097 /* begin "tx thread" cacheline section */
3038 void (*write32_tx_mbox) (struct tg3 *, u32, 3098 void (*write32_tx_mbox) (struct tg3 *, u32,
3039 u32); 3099 u32);
@@ -3101,6 +3161,7 @@ struct tg3 {
3101 u32 dma_rwctrl; 3161 u32 dma_rwctrl;
3102 u32 coalesce_mode; 3162 u32 coalesce_mode;
3103 u32 pwrmgmt_thresh; 3163 u32 pwrmgmt_thresh;
3164 u32 rxptpctl;
3104 3165
3105 /* PCI block */ 3166 /* PCI block */
3106 u32 pci_chip_rev_id; 3167 u32 pci_chip_rev_id;
@@ -3262,6 +3323,7 @@ struct tg3 {
3262#if IS_ENABLED(CONFIG_HWMON) 3323#if IS_ENABLED(CONFIG_HWMON)
3263 struct device *hwmon_dev; 3324 struct device *hwmon_dev;
3264#endif 3325#endif
3326 bool link_up;
3265}; 3327};
3266 3328
3267#endif /* !(_T3_H) */ 3329#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 959c58ef972a..3227fdde521b 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2273,7 +2273,6 @@ bfa_ioc_get_type(struct bfa_ioc *ioc)
2273static void 2273static void
2274bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) 2274bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2275{ 2275{
2276 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2277 memcpy(serial_num, 2276 memcpy(serial_num,
2278 (void *)ioc->attr->brcd_serialnum, 2277 (void *)ioc->attr->brcd_serialnum,
2279 BFA_ADAPTER_SERIAL_NUM_LEN); 2278 BFA_ADAPTER_SERIAL_NUM_LEN);
@@ -2282,7 +2281,6 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2282static void 2281static void
2283bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) 2282bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2284{ 2283{
2285 memset(fw_ver, 0, BFA_VERSION_LEN);
2286 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 2284 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2287} 2285}
2288 2286
@@ -2304,7 +2302,6 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2304static void 2302static void
2305bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) 2303bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2306{ 2304{
2307 memset(optrom_ver, 0, BFA_VERSION_LEN);
2308 memcpy(optrom_ver, ioc->attr->optrom_version, 2305 memcpy(optrom_ver, ioc->attr->optrom_version,
2309 BFA_VERSION_LEN); 2306 BFA_VERSION_LEN);
2310} 2307}
@@ -2312,7 +2309,6 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2312static void 2309static void
2313bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 2310bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2314{ 2311{
2315 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2316 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 2312 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2317} 2313}
2318 2314
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index eef6e1f8aecc..7d10e335c27d 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -787,6 +787,7 @@ struct bfi_enet_stats_bpc {
787 787
788/* MAC Rx Statistics */ 788/* MAC Rx Statistics */
789struct bfi_enet_stats_mac { 789struct bfi_enet_stats_mac {
790 u64 stats_clr_cnt; /* times this stats cleared */
790 u64 frame_64; /* both rx and tx counter */ 791 u64 frame_64; /* both rx and tx counter */
791 u64 frame_65_127; /* both rx and tx counter */ 792 u64 frame_65_127; /* both rx and tx counter */
792 u64 frame_128_255; /* both rx and tx counter */ 793 u64 frame_128_255; /* both rx and tx counter */
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index ede532b4e9db..25dae757e9c4 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -138,6 +138,8 @@ do { \
138#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \ 138#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
139 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1)) 139 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
140 140
141#define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth)
142
141#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \ 143#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
142 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1)) 144 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
143 145
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index b8c4e21fbf4c..af3f7bb0b3b8 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -46,7 +46,8 @@
46#define BFI_MAX_INTERPKT_COUNT 0xFF 46#define BFI_MAX_INTERPKT_COUNT 0xFF
47#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */ 47#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */
48#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */ 48#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */
49#define BFI_TX_INTERPKT_COUNT 32 49#define BFI_TX_INTERPKT_COUNT 12 /* Pkt Cnt = 12 */
50#define BFI_TX_INTERPKT_TIMEO 15 /* 15 * 0.5 = 7.5us */
50#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */ 51#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */
51#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */ 52#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */
52#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */ 53#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 71144b396e02..ea6f4a036401 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1355,6 +1355,8 @@ bfa_fsm_state_decl(bna_rx, stopped,
1355 struct bna_rx, enum bna_rx_event); 1355 struct bna_rx, enum bna_rx_event);
1356bfa_fsm_state_decl(bna_rx, start_wait, 1356bfa_fsm_state_decl(bna_rx, start_wait,
1357 struct bna_rx, enum bna_rx_event); 1357 struct bna_rx, enum bna_rx_event);
1358bfa_fsm_state_decl(bna_rx, start_stop_wait,
1359 struct bna_rx, enum bna_rx_event);
1358bfa_fsm_state_decl(bna_rx, rxf_start_wait, 1360bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1359 struct bna_rx, enum bna_rx_event); 1361 struct bna_rx, enum bna_rx_event);
1360bfa_fsm_state_decl(bna_rx, started, 1362bfa_fsm_state_decl(bna_rx, started,
@@ -1432,7 +1434,7 @@ static void bna_rx_sm_start_wait(struct bna_rx *rx,
1432{ 1434{
1433 switch (event) { 1435 switch (event) {
1434 case RX_E_STOP: 1436 case RX_E_STOP:
1435 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); 1437 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1436 break; 1438 break;
1437 1439
1438 case RX_E_FAIL: 1440 case RX_E_FAIL:
@@ -1488,6 +1490,29 @@ bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1488 1490
1489} 1491}
1490 1492
1493static void
1494bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1495{
1496}
1497
1498static void
1499bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1500{
1501 switch (event) {
1502 case RX_E_FAIL:
1503 case RX_E_STOPPED:
1504 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1505 break;
1506
1507 case RX_E_STARTED:
1508 bna_rx_enet_stop(rx);
1509 break;
1510
1511 default:
1512 bfa_sm_fault(event);
1513 }
1514}
1515
1491void 1516void
1492bna_rx_sm_started_entry(struct bna_rx *rx) 1517bna_rx_sm_started_entry(struct bna_rx *rx)
1493{ 1518{
@@ -1908,6 +1933,9 @@ bna_rxq_qpt_setup(struct bna_rxq *rxq,
1908 struct bna_mem_descr *swqpt_mem, 1933 struct bna_mem_descr *swqpt_mem,
1909 struct bna_mem_descr *page_mem) 1934 struct bna_mem_descr *page_mem)
1910{ 1935{
1936 u8 *kva;
1937 u64 dma;
1938 struct bna_dma_addr bna_dma;
1911 int i; 1939 int i;
1912 1940
1913 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 1941 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
@@ -1917,13 +1945,21 @@ bna_rxq_qpt_setup(struct bna_rxq *rxq,
1917 rxq->qpt.page_size = page_size; 1945 rxq->qpt.page_size = page_size;
1918 1946
1919 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; 1947 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1948 rxq->rcb->sw_q = page_mem->kva;
1949
1950 kva = page_mem->kva;
1951 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1920 1952
1921 for (i = 0; i < rxq->qpt.page_count; i++) { 1953 for (i = 0; i < rxq->qpt.page_count; i++) {
1922 rxq->rcb->sw_qpt[i] = page_mem[i].kva; 1954 rxq->rcb->sw_qpt[i] = kva;
1955 kva += PAGE_SIZE;
1956
1957 BNA_SET_DMA_ADDR(dma, &bna_dma);
1923 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = 1958 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1924 page_mem[i].dma.lsb; 1959 bna_dma.lsb;
1925 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = 1960 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1926 page_mem[i].dma.msb; 1961 bna_dma.msb;
1962 dma += PAGE_SIZE;
1927 } 1963 }
1928} 1964}
1929 1965
@@ -1935,6 +1971,9 @@ bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1935 struct bna_mem_descr *swqpt_mem, 1971 struct bna_mem_descr *swqpt_mem,
1936 struct bna_mem_descr *page_mem) 1972 struct bna_mem_descr *page_mem)
1937{ 1973{
1974 u8 *kva;
1975 u64 dma;
1976 struct bna_dma_addr bna_dma;
1938 int i; 1977 int i;
1939 1978
1940 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 1979 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
@@ -1944,14 +1983,21 @@ bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1944 rxp->cq.qpt.page_size = page_size; 1983 rxp->cq.qpt.page_size = page_size;
1945 1984
1946 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; 1985 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1986 rxp->cq.ccb->sw_q = page_mem->kva;
1987
1988 kva = page_mem->kva;
1989 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1947 1990
1948 for (i = 0; i < rxp->cq.qpt.page_count; i++) { 1991 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1949 rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva; 1992 rxp->cq.ccb->sw_qpt[i] = kva;
1993 kva += PAGE_SIZE;
1950 1994
1995 BNA_SET_DMA_ADDR(dma, &bna_dma);
1951 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = 1996 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1952 page_mem[i].dma.lsb; 1997 bna_dma.lsb;
1953 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = 1998 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
1954 page_mem[i].dma.msb; 1999 bna_dma.msb;
2000 dma += PAGE_SIZE;
1955 } 2001 }
1956} 2002}
1957 2003
@@ -2250,8 +2296,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2250 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; 2296 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2251 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; 2297 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2252 mem_info->mem_type = BNA_MEM_T_DMA; 2298 mem_info->mem_type = BNA_MEM_T_DMA;
2253 mem_info->len = PAGE_SIZE; 2299 mem_info->len = PAGE_SIZE * cpage_count;
2254 mem_info->num = cpage_count * q_cfg->num_paths; 2300 mem_info->num = q_cfg->num_paths;
2255 2301
2256 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; 2302 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2257 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; 2303 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
@@ -2268,8 +2314,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2268 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; 2314 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2269 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; 2315 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2270 mem_info->mem_type = BNA_MEM_T_DMA; 2316 mem_info->mem_type = BNA_MEM_T_DMA;
2271 mem_info->len = PAGE_SIZE; 2317 mem_info->len = PAGE_SIZE * dpage_count;
2272 mem_info->num = dpage_count * q_cfg->num_paths; 2318 mem_info->num = q_cfg->num_paths;
2273 2319
2274 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; 2320 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2275 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; 2321 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
@@ -2286,8 +2332,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2286 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; 2332 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2287 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; 2333 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2288 mem_info->mem_type = BNA_MEM_T_DMA; 2334 mem_info->mem_type = BNA_MEM_T_DMA;
2289 mem_info->len = (hpage_count ? PAGE_SIZE : 0); 2335 mem_info->len = PAGE_SIZE * hpage_count;
2290 mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0); 2336 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2291 2337
2292 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; 2338 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2293 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; 2339 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
@@ -2332,7 +2378,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2332 struct bna_mem_descr *dsqpt_mem; 2378 struct bna_mem_descr *dsqpt_mem;
2333 struct bna_mem_descr *hpage_mem; 2379 struct bna_mem_descr *hpage_mem;
2334 struct bna_mem_descr *dpage_mem; 2380 struct bna_mem_descr *dpage_mem;
2335 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0; 2381 int i;
2336 int dpage_count, hpage_count, rcb_idx; 2382 int dpage_count, hpage_count, rcb_idx;
2337 2383
2338 if (!bna_rx_res_check(rx_mod, rx_cfg)) 2384 if (!bna_rx_res_check(rx_mod, rx_cfg))
@@ -2352,14 +2398,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2352 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; 2398 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2353 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; 2399 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2354 2400
2355 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num / 2401 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2356 rx_cfg->num_paths; 2402 PAGE_SIZE;
2357 2403
2358 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num / 2404 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2359 rx_cfg->num_paths; 2405 PAGE_SIZE;
2360 2406
2361 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num / 2407 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2362 rx_cfg->num_paths; 2408 PAGE_SIZE;
2363 2409
2364 rx = bna_rx_get(rx_mod, rx_cfg->rx_type); 2410 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2365 rx->bna = bna; 2411 rx->bna = bna;
@@ -2446,10 +2492,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2446 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; 2492 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2447 2493
2448 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, 2494 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2449 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]); 2495 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2450 q0->rcb->page_idx = dpage_idx;
2451 q0->rcb->page_count = dpage_count;
2452 dpage_idx += dpage_count;
2453 2496
2454 if (rx->rcb_setup_cbfn) 2497 if (rx->rcb_setup_cbfn)
2455 rx->rcb_setup_cbfn(bnad, q0->rcb); 2498 rx->rcb_setup_cbfn(bnad, q0->rcb);
@@ -2475,10 +2518,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2475 2518
2476 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, 2519 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2477 &hqpt_mem[i], &hsqpt_mem[i], 2520 &hqpt_mem[i], &hsqpt_mem[i],
2478 &hpage_mem[hpage_idx]); 2521 &hpage_mem[i]);
2479 q1->rcb->page_idx = hpage_idx;
2480 q1->rcb->page_count = hpage_count;
2481 hpage_idx += hpage_count;
2482 2522
2483 if (rx->rcb_setup_cbfn) 2523 if (rx->rcb_setup_cbfn)
2484 rx->rcb_setup_cbfn(bnad, q1->rcb); 2524 rx->rcb_setup_cbfn(bnad, q1->rcb);
@@ -2510,10 +2550,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2510 rxp->cq.ccb->id = i; 2550 rxp->cq.ccb->id = i;
2511 2551
2512 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, 2552 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2513 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]); 2553 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2514 rxp->cq.ccb->page_idx = cpage_idx;
2515 rxp->cq.ccb->page_count = page_count;
2516 cpage_idx += page_count;
2517 2554
2518 if (rx->ccb_setup_cbfn) 2555 if (rx->ccb_setup_cbfn)
2519 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); 2556 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
@@ -3230,6 +3267,9 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3230 struct bna_mem_descr *swqpt_mem, 3267 struct bna_mem_descr *swqpt_mem,
3231 struct bna_mem_descr *page_mem) 3268 struct bna_mem_descr *page_mem)
3232{ 3269{
3270 u8 *kva;
3271 u64 dma;
3272 struct bna_dma_addr bna_dma;
3233 int i; 3273 int i;
3234 3274
3235 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; 3275 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
@@ -3239,14 +3279,21 @@ bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3239 txq->qpt.page_size = page_size; 3279 txq->qpt.page_size = page_size;
3240 3280
3241 txq->tcb->sw_qpt = (void **) swqpt_mem->kva; 3281 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3282 txq->tcb->sw_q = page_mem->kva;
3283
3284 kva = page_mem->kva;
3285 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3242 3286
3243 for (i = 0; i < page_count; i++) { 3287 for (i = 0; i < page_count; i++) {
3244 txq->tcb->sw_qpt[i] = page_mem[i].kva; 3288 txq->tcb->sw_qpt[i] = kva;
3289 kva += PAGE_SIZE;
3245 3290
3291 BNA_SET_DMA_ADDR(dma, &bna_dma);
3246 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = 3292 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3247 page_mem[i].dma.lsb; 3293 bna_dma.lsb;
3248 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = 3294 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3249 page_mem[i].dma.msb; 3295 bna_dma.msb;
3296 dma += PAGE_SIZE;
3250 } 3297 }
3251} 3298}
3252 3299
@@ -3430,8 +3477,8 @@ bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3430 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; 3477 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3431 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; 3478 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3432 mem_info->mem_type = BNA_MEM_T_DMA; 3479 mem_info->mem_type = BNA_MEM_T_DMA;
3433 mem_info->len = PAGE_SIZE; 3480 mem_info->len = PAGE_SIZE * page_count;
3434 mem_info->num = num_txq * page_count; 3481 mem_info->num = num_txq;
3435 3482
3436 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; 3483 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3437 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; 3484 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
@@ -3457,14 +3504,11 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
3457 struct bna_txq *txq; 3504 struct bna_txq *txq;
3458 struct list_head *qe; 3505 struct list_head *qe;
3459 int page_count; 3506 int page_count;
3460 int page_size;
3461 int page_idx;
3462 int i; 3507 int i;
3463 3508
3464 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; 3509 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3465 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) / 3510 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3466 tx_cfg->num_txq; 3511 PAGE_SIZE;
3467 page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3468 3512
3469 /** 3513 /**
3470 * Get resources 3514 * Get resources
@@ -3529,7 +3573,6 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
3529 /* TxQ */ 3573 /* TxQ */
3530 3574
3531 i = 0; 3575 i = 0;
3532 page_idx = 0;
3533 list_for_each(qe, &tx->txq_q) { 3576 list_for_each(qe, &tx->txq_q) {
3534 txq = (struct bna_txq *)qe; 3577 txq = (struct bna_txq *)qe;
3535 txq->tcb = (struct bna_tcb *) 3578 txq->tcb = (struct bna_tcb *)
@@ -3551,7 +3594,7 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
3551 if (intr_info->intr_type == BNA_INTR_T_INTX) 3594 if (intr_info->intr_type == BNA_INTR_T_INTX)
3552 txq->ib.intr_vector = (1 << txq->ib.intr_vector); 3595 txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3553 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; 3596 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3554 txq->ib.interpkt_timeo = 0; /* Not used */ 3597 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3555 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; 3598 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3556 3599
3557 /* TCB */ 3600 /* TCB */
@@ -3569,14 +3612,11 @@ bna_tx_create(struct bna *bna, struct bnad *bnad,
3569 txq->tcb->id = i; 3612 txq->tcb->id = i;
3570 3613
3571 /* QPT, SWQPT, Pages */ 3614 /* QPT, SWQPT, Pages */
3572 bna_txq_qpt_setup(txq, page_count, page_size, 3615 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3573 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], 3616 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3574 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], 3617 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3575 &res_info[BNA_TX_RES_MEM_T_PAGE]. 3618 &res_info[BNA_TX_RES_MEM_T_PAGE].
3576 res_u.mem_info.mdl[page_idx]); 3619 res_u.mem_info.mdl[i]);
3577 txq->tcb->page_idx = page_idx;
3578 txq->tcb->page_count = page_count;
3579 page_idx += page_count;
3580 3620
3581 /* Callback to bnad for setting up TCB */ 3621 /* Callback to bnad for setting up TCB */
3582 if (tx->tcb_setup_cbfn) 3622 if (tx->tcb_setup_cbfn)
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d3eb8bddfb2a..dc50f7836b6d 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -430,6 +430,7 @@ struct bna_ib {
430struct bna_tcb { 430struct bna_tcb {
431 /* Fast path */ 431 /* Fast path */
432 void **sw_qpt; 432 void **sw_qpt;
433 void *sw_q;
433 void *unmap_q; 434 void *unmap_q;
434 u32 producer_index; 435 u32 producer_index;
435 u32 consumer_index; 436 u32 consumer_index;
@@ -437,8 +438,6 @@ struct bna_tcb {
437 u32 q_depth; 438 u32 q_depth;
438 void __iomem *q_dbell; 439 void __iomem *q_dbell;
439 struct bna_ib_dbell *i_dbell; 440 struct bna_ib_dbell *i_dbell;
440 int page_idx;
441 int page_count;
442 /* Control path */ 441 /* Control path */
443 struct bna_txq *txq; 442 struct bna_txq *txq;
444 struct bnad *bnad; 443 struct bnad *bnad;
@@ -563,13 +562,12 @@ struct bna_tx_mod {
563struct bna_rcb { 562struct bna_rcb {
564 /* Fast path */ 563 /* Fast path */
565 void **sw_qpt; 564 void **sw_qpt;
565 void *sw_q;
566 void *unmap_q; 566 void *unmap_q;
567 u32 producer_index; 567 u32 producer_index;
568 u32 consumer_index; 568 u32 consumer_index;
569 u32 q_depth; 569 u32 q_depth;
570 void __iomem *q_dbell; 570 void __iomem *q_dbell;
571 int page_idx;
572 int page_count;
573 /* Control path */ 571 /* Control path */
574 struct bna_rxq *rxq; 572 struct bna_rxq *rxq;
575 struct bna_ccb *ccb; 573 struct bna_ccb *ccb;
@@ -626,6 +624,7 @@ struct bna_pkt_rate {
626struct bna_ccb { 624struct bna_ccb {
627 /* Fast path */ 625 /* Fast path */
628 void **sw_qpt; 626 void **sw_qpt;
627 void *sw_q;
629 u32 producer_index; 628 u32 producer_index;
630 volatile u32 *hw_producer_index; 629 volatile u32 *hw_producer_index;
631 u32 q_depth; 630 u32 q_depth;
@@ -633,8 +632,6 @@ struct bna_ccb {
633 struct bna_rcb *rcb[2]; 632 struct bna_rcb *rcb[2];
634 void *ctrl; /* For bnad */ 633 void *ctrl; /* For bnad */
635 struct bna_pkt_rate pkt_rate; 634 struct bna_pkt_rate pkt_rate;
636 int page_idx;
637 int page_count;
638 635
639 /* Control path */ 636 /* Control path */
640 struct bna_cq *cq; 637 struct bna_cq *cq;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ce1eac529470..7cce42dc2f20 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -61,23 +61,17 @@ static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
61/* 61/*
62 * Local MACROS 62 * Local MACROS
63 */ 63 */
64#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2)
65
66#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth)
67
68#define BNAD_GET_MBOX_IRQ(_bnad) \ 64#define BNAD_GET_MBOX_IRQ(_bnad) \
69 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ 65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
70 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ 66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
71 ((_bnad)->pcidev->irq)) 67 ((_bnad)->pcidev->irq))
72 68
73#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \ 69#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
74do { \ 70do { \
75 (_res_info)->res_type = BNA_RES_T_MEM; \ 71 (_res_info)->res_type = BNA_RES_T_MEM; \
76 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ 72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
77 (_res_info)->res_u.mem_info.num = (_num); \ 73 (_res_info)->res_u.mem_info.num = (_num); \
78 (_res_info)->res_u.mem_info.len = \ 74 (_res_info)->res_u.mem_info.len = (_size); \
79 sizeof(struct bnad_unmap_q) + \
80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
81} while (0) 75} while (0)
82 76
83static void 77static void
@@ -103,48 +97,58 @@ bnad_remove_from_list(struct bnad *bnad)
103static void 97static void
104bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) 98bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
105{ 99{
106 struct bna_cq_entry *cmpl, *next_cmpl; 100 struct bna_cq_entry *cmpl;
107 unsigned int wi_range, wis = 0, ccb_prod = 0;
108 int i; 101 int i;
109 102
110 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl,
111 wi_range);
112
113 for (i = 0; i < ccb->q_depth; i++) { 103 for (i = 0; i < ccb->q_depth; i++) {
114 wis++; 104 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
115 if (likely(--wi_range))
116 next_cmpl = cmpl + 1;
117 else {
118 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth);
119 wis = 0;
120 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt,
121 next_cmpl, wi_range);
122 }
123 cmpl->valid = 0; 105 cmpl->valid = 0;
124 cmpl = next_cmpl;
125 } 106 }
126} 107}
127 108
109/* Tx Datapath functions */
110
111
112/* Caller should ensure that the entry at unmap_q[index] is valid */
128static u32 113static u32
129bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array, 114bnad_tx_buff_unmap(struct bnad *bnad,
130 u32 index, u32 depth, struct sk_buff *skb, u32 frag) 115 struct bnad_tx_unmap *unmap_q,
116 u32 q_depth, u32 index)
131{ 117{
132 int j; 118 struct bnad_tx_unmap *unmap;
133 array[index].skb = NULL; 119 struct sk_buff *skb;
134 120 int vector, nvecs;
135 dma_unmap_single(pdev, dma_unmap_addr(&array[index], dma_addr), 121
136 skb_headlen(skb), DMA_TO_DEVICE); 122 unmap = &unmap_q[index];
137 dma_unmap_addr_set(&array[index], dma_addr, 0); 123 nvecs = unmap->nvecs;
138 BNA_QE_INDX_ADD(index, 1, depth); 124
125 skb = unmap->skb;
126 unmap->skb = NULL;
127 unmap->nvecs = 0;
128 dma_unmap_single(&bnad->pcidev->dev,
129 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130 skb_headlen(skb), DMA_TO_DEVICE);
131 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
132 nvecs--;
133
134 vector = 0;
135 while (nvecs) {
136 vector++;
137 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
138 vector = 0;
139 BNA_QE_INDX_INC(index, q_depth);
140 unmap = &unmap_q[index];
141 }
139 142
140 for (j = 0; j < frag; j++) { 143 dma_unmap_page(&bnad->pcidev->dev,
141 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), 144 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
142 skb_frag_size(&skb_shinfo(skb)->frags[j]), 145 skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
143 DMA_TO_DEVICE); 146 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
144 dma_unmap_addr_set(&array[index], dma_addr, 0); 147 nvecs--;
145 BNA_QE_INDX_ADD(index, 1, depth);
146 } 148 }
147 149
150 BNA_QE_INDX_INC(index, q_depth);
151
148 return index; 152 return index;
149} 153}
150 154
@@ -154,80 +158,64 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
154 * so DMA unmap & freeing is fine. 158 * so DMA unmap & freeing is fine.
155 */ 159 */
156static void 160static void
157bnad_txq_cleanup(struct bnad *bnad, 161bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
158 struct bna_tcb *tcb)
159{ 162{
160 u32 unmap_cons; 163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
161 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 164 struct sk_buff *skb;
162 struct bnad_skb_unmap *unmap_array; 165 int i;
163 struct sk_buff *skb = NULL;
164 int q;
165
166 unmap_array = unmap_q->unmap_array;
167 166
168 for (q = 0; q < unmap_q->q_depth; q++) { 167 for (i = 0; i < tcb->q_depth; i++) {
169 skb = unmap_array[q].skb; 168 skb = unmap_q[i].skb;
170 if (!skb) 169 if (!skb)
171 continue; 170 continue;
172 171 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
173 unmap_cons = q;
174 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array,
175 unmap_cons, unmap_q->q_depth, skb,
176 skb_shinfo(skb)->nr_frags);
177 172
178 dev_kfree_skb_any(skb); 173 dev_kfree_skb_any(skb);
179 } 174 }
180} 175}
181 176
182/* Data Path Handlers */
183
184/* 177/*
185 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion 178 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
186 * Can be called in a) Interrupt context 179 * Can be called in a) Interrupt context
187 * b) Sending context 180 * b) Sending context
188 */ 181 */
189static u32 182static u32
190bnad_txcmpl_process(struct bnad *bnad, 183bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
191 struct bna_tcb *tcb)
192{ 184{
193 u32 unmap_cons, sent_packets = 0, sent_bytes = 0; 185 u32 sent_packets = 0, sent_bytes = 0;
194 u16 wis, updated_hw_cons; 186 u32 wis, unmap_wis, hw_cons, cons, q_depth;
195 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 187 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
196 struct bnad_skb_unmap *unmap_array; 188 struct bnad_tx_unmap *unmap;
197 struct sk_buff *skb; 189 struct sk_buff *skb;
198 190
199 /* Just return if TX is stopped */ 191 /* Just return if TX is stopped */
200 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 192 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
201 return 0; 193 return 0;
202 194
203 updated_hw_cons = *(tcb->hw_consumer_index); 195 hw_cons = *(tcb->hw_consumer_index);
204 196 cons = tcb->consumer_index;
205 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index, 197 q_depth = tcb->q_depth;
206 updated_hw_cons, tcb->q_depth);
207 198
199 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
208 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); 200 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
209 201
210 unmap_array = unmap_q->unmap_array;
211 unmap_cons = unmap_q->consumer_index;
212
213 prefetch(&unmap_array[unmap_cons + 1]);
214 while (wis) { 202 while (wis) {
215 skb = unmap_array[unmap_cons].skb; 203 unmap = &unmap_q[cons];
204
205 skb = unmap->skb;
216 206
217 sent_packets++; 207 sent_packets++;
218 sent_bytes += skb->len; 208 sent_bytes += skb->len;
219 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
220 209
221 unmap_cons = bnad_pci_unmap_skb(&bnad->pcidev->dev, unmap_array, 210 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
222 unmap_cons, unmap_q->q_depth, skb, 211 wis -= unmap_wis;
223 skb_shinfo(skb)->nr_frags);
224 212
213 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
225 dev_kfree_skb_any(skb); 214 dev_kfree_skb_any(skb);
226 } 215 }
227 216
228 /* Update consumer pointers. */ 217 /* Update consumer pointers. */
229 tcb->consumer_index = updated_hw_cons; 218 tcb->consumer_index = hw_cons;
230 unmap_q->consumer_index = unmap_cons;
231 219
232 tcb->txq->tx_packets += sent_packets; 220 tcb->txq->tx_packets += sent_packets;
233 tcb->txq->tx_bytes += sent_bytes; 221 tcb->txq->tx_bytes += sent_bytes;
@@ -278,133 +266,306 @@ bnad_msix_tx(int irq, void *data)
278 return IRQ_HANDLED; 266 return IRQ_HANDLED;
279} 267}
280 268
281static void 269static inline void
282bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb) 270bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
283{ 271{
284 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 272 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
273
274 unmap_q->reuse_pi = -1;
275 unmap_q->alloc_order = -1;
276 unmap_q->map_size = 0;
277 unmap_q->type = BNAD_RXBUF_NONE;
278}
279
280/* Default is page-based allocation. Multi-buffer support - TBD */
281static int
282bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
283{
284 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
285 int mtu, order;
286
287 bnad_rxq_alloc_uninit(bnad, rcb);
288
289 mtu = bna_enet_mtu_get(&bnad->bna.enet);
290 order = get_order(mtu);
291
292 if (bna_is_small_rxq(rcb->id)) {
293 unmap_q->alloc_order = 0;
294 unmap_q->map_size = rcb->rxq->buffer_size;
295 } else {
296 unmap_q->alloc_order = order;
297 unmap_q->map_size =
298 (rcb->rxq->buffer_size > 2048) ?
299 PAGE_SIZE << order : 2048;
300 }
301
302 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
303
304 unmap_q->type = BNAD_RXBUF_PAGE;
305
306 return 0;
307}
308
309static inline void
310bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
311{
312 if (!unmap->page)
313 return;
314
315 dma_unmap_page(&bnad->pcidev->dev,
316 dma_unmap_addr(&unmap->vector, dma_addr),
317 unmap->vector.len, DMA_FROM_DEVICE);
318 put_page(unmap->page);
319 unmap->page = NULL;
320 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
321 unmap->vector.len = 0;
322}
285 323
286 rcb->producer_index = 0; 324static inline void
287 rcb->consumer_index = 0; 325bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
326{
327 if (!unmap->skb)
328 return;
288 329
289 unmap_q->producer_index = 0; 330 dma_unmap_single(&bnad->pcidev->dev,
290 unmap_q->consumer_index = 0; 331 dma_unmap_addr(&unmap->vector, dma_addr),
332 unmap->vector.len, DMA_FROM_DEVICE);
333 dev_kfree_skb_any(unmap->skb);
334 unmap->skb = NULL;
335 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
336 unmap->vector.len = 0;
291} 337}
292 338
293static void 339static void
294bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) 340bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
295{ 341{
296 struct bnad_unmap_q *unmap_q; 342 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
297 struct bnad_skb_unmap *unmap_array; 343 int i;
298 struct sk_buff *skb;
299 int unmap_cons;
300 344
301 unmap_q = rcb->unmap_q; 345 for (i = 0; i < rcb->q_depth; i++) {
302 unmap_array = unmap_q->unmap_array; 346 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
303 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { 347
304 skb = unmap_array[unmap_cons].skb; 348 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
305 if (!skb) 349 bnad_rxq_cleanup_page(bnad, unmap);
306 continue; 350 else
307 unmap_array[unmap_cons].skb = NULL; 351 bnad_rxq_cleanup_skb(bnad, unmap);
308 dma_unmap_single(&bnad->pcidev->dev,
309 dma_unmap_addr(&unmap_array[unmap_cons],
310 dma_addr),
311 rcb->rxq->buffer_size,
312 DMA_FROM_DEVICE);
313 dev_kfree_skb(skb);
314 } 352 }
315 bnad_rcb_cleanup(bnad, rcb); 353 bnad_rxq_alloc_uninit(bnad, rcb);
316} 354}
317 355
318static void 356static u32
319bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) 357bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
320{ 358{
321 u16 to_alloc, alloced, unmap_prod, wi_range; 359 u32 alloced, prod, q_depth;
322 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 360 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
323 struct bnad_skb_unmap *unmap_array; 361 struct bnad_rx_unmap *unmap, *prev;
324 struct bna_rxq_entry *rxent; 362 struct bna_rxq_entry *rxent;
325 struct sk_buff *skb; 363 struct page *page;
364 u32 page_offset, alloc_size;
326 dma_addr_t dma_addr; 365 dma_addr_t dma_addr;
327 366
367 prod = rcb->producer_index;
368 q_depth = rcb->q_depth;
369
370 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
328 alloced = 0; 371 alloced = 0;
329 to_alloc =
330 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth);
331 372
332 unmap_array = unmap_q->unmap_array; 373 while (nalloc--) {
333 unmap_prod = unmap_q->producer_index; 374 unmap = &unmap_q->unmap[prod];
334 375
335 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); 376 if (unmap_q->reuse_pi < 0) {
377 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
378 unmap_q->alloc_order);
379 page_offset = 0;
380 } else {
381 prev = &unmap_q->unmap[unmap_q->reuse_pi];
382 page = prev->page;
383 page_offset = prev->page_offset + unmap_q->map_size;
384 get_page(page);
385 }
386
387 if (unlikely(!page)) {
388 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
389 rcb->rxq->rxbuf_alloc_failed++;
390 goto finishing;
391 }
392
393 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
394 unmap_q->map_size, DMA_FROM_DEVICE);
395
396 unmap->page = page;
397 unmap->page_offset = page_offset;
398 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
399 unmap->vector.len = unmap_q->map_size;
400 page_offset += unmap_q->map_size;
401
402 if (page_offset < alloc_size)
403 unmap_q->reuse_pi = prod;
404 else
405 unmap_q->reuse_pi = -1;
406
407 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
408 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
409 BNA_QE_INDX_INC(prod, q_depth);
410 alloced++;
411 }
412
413finishing:
414 if (likely(alloced)) {
415 rcb->producer_index = prod;
416 smp_mb();
417 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
418 bna_rxq_prod_indx_doorbell(rcb);
419 }
420
421 return alloced;
422}
423
424static u32
425bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
426{
427 u32 alloced, prod, q_depth, buff_sz;
428 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
429 struct bnad_rx_unmap *unmap;
430 struct bna_rxq_entry *rxent;
431 struct sk_buff *skb;
432 dma_addr_t dma_addr;
433
434 buff_sz = rcb->rxq->buffer_size;
435 prod = rcb->producer_index;
436 q_depth = rcb->q_depth;
437
438 alloced = 0;
439 while (nalloc--) {
440 unmap = &unmap_q->unmap[prod];
441
442 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
336 443
337 while (to_alloc--) {
338 if (!wi_range)
339 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent,
340 wi_range);
341 skb = netdev_alloc_skb_ip_align(bnad->netdev,
342 rcb->rxq->buffer_size);
343 if (unlikely(!skb)) { 444 if (unlikely(!skb)) {
344 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); 445 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
345 rcb->rxq->rxbuf_alloc_failed++; 446 rcb->rxq->rxbuf_alloc_failed++;
346 goto finishing; 447 goto finishing;
347 } 448 }
348 unmap_array[unmap_prod].skb = skb;
349 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 449 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
350 rcb->rxq->buffer_size, 450 buff_sz, DMA_FROM_DEVICE);
351 DMA_FROM_DEVICE); 451
352 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 452 unmap->skb = skb;
353 dma_addr); 453 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
354 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 454 unmap->vector.len = buff_sz;
355 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
356 455
357 rxent++; 456 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
358 wi_range--; 457 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
458 BNA_QE_INDX_INC(prod, q_depth);
359 alloced++; 459 alloced++;
360 } 460 }
361 461
362finishing: 462finishing:
363 if (likely(alloced)) { 463 if (likely(alloced)) {
364 unmap_q->producer_index = unmap_prod; 464 rcb->producer_index = prod;
365 rcb->producer_index = unmap_prod;
366 smp_mb(); 465 smp_mb();
367 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) 466 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
368 bna_rxq_prod_indx_doorbell(rcb); 467 bna_rxq_prod_indx_doorbell(rcb);
369 } 468 }
469
470 return alloced;
370} 471}
371 472
372static inline void 473static inline void
373bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) 474bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
374{ 475{
375 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 476 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
477 u32 to_alloc;
376 478
377 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 479 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
378 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 480 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
379 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 481 return;
380 bnad_rxq_post(bnad, rcb); 482
381 smp_mb__before_clear_bit(); 483 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
382 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 484 bnad_rxq_refill_page(bnad, rcb, to_alloc);
485 else
486 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
487}
488
489#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
490 BNA_CQ_EF_IPV6 | \
491 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
492 BNA_CQ_EF_L4_CKSUM_OK)
493
494#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
495 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
496#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
497 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
498#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
499 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
500#define flags_udp6 (BNA_CQ_EF_IPV6 | \
501 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
502
503static inline struct sk_buff *
504bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl,
505 struct bnad_rx_unmap_q *unmap_q,
506 struct bnad_rx_unmap *unmap,
507 u32 length, u32 flags)
508{
509 struct bnad *bnad = rx_ctrl->bnad;
510 struct sk_buff *skb;
511
512 if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) {
513 skb = napi_get_frags(&rx_ctrl->napi);
514 if (unlikely(!skb))
515 return NULL;
516
517 dma_unmap_page(&bnad->pcidev->dev,
518 dma_unmap_addr(&unmap->vector, dma_addr),
519 unmap->vector.len, DMA_FROM_DEVICE);
520 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
521 unmap->page, unmap->page_offset, length);
522 skb->len += length;
523 skb->data_len += length;
524 skb->truesize += length;
525
526 unmap->page = NULL;
527 unmap->vector.len = 0;
528
529 return skb;
383 } 530 }
531
532 skb = unmap->skb;
533 BUG_ON(!skb);
534
535 dma_unmap_single(&bnad->pcidev->dev,
536 dma_unmap_addr(&unmap->vector, dma_addr),
537 unmap->vector.len, DMA_FROM_DEVICE);
538
539 skb_put(skb, length);
540
541 skb->protocol = eth_type_trans(skb, bnad->netdev);
542
543 unmap->skb = NULL;
544 unmap->vector.len = 0;
545 return skb;
384} 546}
385 547
386static u32 548static u32
387bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) 549bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
388{ 550{
389 struct bna_cq_entry *cmpl, *next_cmpl; 551 struct bna_cq_entry *cq, *cmpl;
390 struct bna_rcb *rcb = NULL; 552 struct bna_rcb *rcb = NULL;
391 unsigned int wi_range, packets = 0, wis = 0; 553 struct bnad_rx_unmap_q *unmap_q;
392 struct bnad_unmap_q *unmap_q; 554 struct bnad_rx_unmap *unmap;
393 struct bnad_skb_unmap *unmap_array;
394 struct sk_buff *skb; 555 struct sk_buff *skb;
395 u32 flags, unmap_cons;
396 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 556 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
397 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); 557 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
398 558 u32 packets = 0, length = 0, flags, masked_flags;
399 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
400 return 0;
401 559
402 prefetch(bnad->netdev); 560 prefetch(bnad->netdev);
403 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, 561
404 wi_range); 562 cq = ccb->sw_q;
405 BUG_ON(!(wi_range <= ccb->q_depth)); 563 cmpl = &cq[ccb->producer_index];
406 while (cmpl->valid && packets < budget) { 564
565 while (cmpl->valid && (packets < budget)) {
407 packets++; 566 packets++;
567 flags = ntohl(cmpl->flags);
568 length = ntohs(cmpl->length);
408 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); 569 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
409 570
410 if (bna_is_small_rxq(cmpl->rxq_id)) 571 if (bna_is_small_rxq(cmpl->rxq_id))
@@ -413,83 +574,63 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
413 rcb = ccb->rcb[0]; 574 rcb = ccb->rcb[0];
414 575
415 unmap_q = rcb->unmap_q; 576 unmap_q = rcb->unmap_q;
416 unmap_array = unmap_q->unmap_array; 577 unmap = &unmap_q->unmap[rcb->consumer_index];
417 unmap_cons = unmap_q->consumer_index; 578
418 579 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
419 skb = unmap_array[unmap_cons].skb; 580 BNA_CQ_EF_FCS_ERROR |
420 BUG_ON(!(skb)); 581 BNA_CQ_EF_TOO_LONG))) {
421 unmap_array[unmap_cons].skb = NULL; 582 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
422 dma_unmap_single(&bnad->pcidev->dev, 583 bnad_rxq_cleanup_page(bnad, unmap);
423 dma_unmap_addr(&unmap_array[unmap_cons], 584 else
424 dma_addr), 585 bnad_rxq_cleanup_skb(bnad, unmap);
425 rcb->rxq->buffer_size,
426 DMA_FROM_DEVICE);
427 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
428
429 /* Should be more efficient ? Performance ? */
430 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
431
432 wis++;
433 if (likely(--wi_range))
434 next_cmpl = cmpl + 1;
435 else {
436 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
437 wis = 0;
438 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt,
439 next_cmpl, wi_range);
440 BUG_ON(!(wi_range <= ccb->q_depth));
441 }
442 prefetch(next_cmpl);
443 586
444 flags = ntohl(cmpl->flags);
445 if (unlikely
446 (flags &
447 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR |
448 BNA_CQ_EF_TOO_LONG))) {
449 dev_kfree_skb_any(skb);
450 rcb->rxq->rx_packets_with_error++; 587 rcb->rxq->rx_packets_with_error++;
451 goto next; 588 goto next;
452 } 589 }
453 590
454 skb_put(skb, ntohs(cmpl->length)); 591 skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap,
592 length, flags);
593
594 if (unlikely(!skb))
595 break;
596
597 masked_flags = flags & flags_cksum_prot_mask;
598
455 if (likely 599 if (likely
456 ((bnad->netdev->features & NETIF_F_RXCSUM) && 600 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
457 (((flags & BNA_CQ_EF_IPV4) && 601 ((masked_flags == flags_tcp4) ||
458 (flags & BNA_CQ_EF_L3_CKSUM_OK)) || 602 (masked_flags == flags_udp4) ||
459 (flags & BNA_CQ_EF_IPV6)) && 603 (masked_flags == flags_tcp6) ||
460 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) && 604 (masked_flags == flags_udp6))))
461 (flags & BNA_CQ_EF_L4_CKSUM_OK)))
462 skb->ip_summed = CHECKSUM_UNNECESSARY; 605 skb->ip_summed = CHECKSUM_UNNECESSARY;
463 else 606 else
464 skb_checksum_none_assert(skb); 607 skb_checksum_none_assert(skb);
465 608
466 rcb->rxq->rx_packets++; 609 rcb->rxq->rx_packets++;
467 rcb->rxq->rx_bytes += skb->len; 610 rcb->rxq->rx_bytes += length;
468 skb->protocol = eth_type_trans(skb, bnad->netdev);
469 611
470 if (flags & BNA_CQ_EF_VLAN) 612 if (flags & BNA_CQ_EF_VLAN)
471 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag)); 613 __vlan_hwaccel_put_tag(skb, ntohs(cmpl->vlan_tag));
472 614
473 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 615 if (BNAD_RXBUF_IS_PAGE(unmap_q->type))
474 napi_gro_receive(&rx_ctrl->napi, skb); 616 napi_gro_frags(&rx_ctrl->napi);
475 else 617 else
476 netif_receive_skb(skb); 618 netif_receive_skb(skb);
477 619
478next: 620next:
479 cmpl->valid = 0; 621 cmpl->valid = 0;
480 cmpl = next_cmpl; 622 BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth);
623 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
624 cmpl = &cq[ccb->producer_index];
481 } 625 }
482 626
483 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); 627 napi_gro_flush(&rx_ctrl->napi, false);
484
485 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) 628 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
486 bna_ib_ack_disable_irq(ccb->i_dbell, packets); 629 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
487 630
488 bnad_refill_rxq(bnad, ccb->rcb[0]); 631 bnad_rxq_post(bnad, ccb->rcb[0]);
489 if (ccb->rcb[1]) 632 if (ccb->rcb[1])
490 bnad_refill_rxq(bnad, ccb->rcb[1]); 633 bnad_rxq_post(bnad, ccb->rcb[1]);
491
492 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
493 634
494 return packets; 635 return packets;
495} 636}
@@ -764,12 +905,9 @@ bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
764{ 905{
765 struct bnad_tx_info *tx_info = 906 struct bnad_tx_info *tx_info =
766 (struct bnad_tx_info *)tcb->txq->tx->priv; 907 (struct bnad_tx_info *)tcb->txq->tx->priv;
767 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
768 908
909 tcb->priv = tcb;
769 tx_info->tcb[tcb->id] = tcb; 910 tx_info->tcb[tcb->id] = tcb;
770 unmap_q->producer_index = 0;
771 unmap_q->consumer_index = 0;
772 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH;
773} 911}
774 912
775static void 913static void
@@ -783,16 +921,6 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
783} 921}
784 922
785static void 923static void
786bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
787{
788 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
789
790 unmap_q->producer_index = 0;
791 unmap_q->consumer_index = 0;
792 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
793}
794
795static void
796bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) 924bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
797{ 925{
798 struct bnad_rx_info *rx_info = 926 struct bnad_rx_info *rx_info =
@@ -878,10 +1006,9 @@ bnad_tx_cleanup(struct delayed_work *work)
878 struct bnad_tx_info *tx_info = 1006 struct bnad_tx_info *tx_info =
879 container_of(work, struct bnad_tx_info, tx_cleanup_work); 1007 container_of(work, struct bnad_tx_info, tx_cleanup_work);
880 struct bnad *bnad = NULL; 1008 struct bnad *bnad = NULL;
881 struct bnad_unmap_q *unmap_q;
882 struct bna_tcb *tcb; 1009 struct bna_tcb *tcb;
883 unsigned long flags; 1010 unsigned long flags;
884 uint32_t i, pending = 0; 1011 u32 i, pending = 0;
885 1012
886 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { 1013 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
887 tcb = tx_info->tcb[i]; 1014 tcb = tx_info->tcb[i];
@@ -897,10 +1024,6 @@ bnad_tx_cleanup(struct delayed_work *work)
897 1024
898 bnad_txq_cleanup(bnad, tcb); 1025 bnad_txq_cleanup(bnad, tcb);
899 1026
900 unmap_q = tcb->unmap_q;
901 unmap_q->producer_index = 0;
902 unmap_q->consumer_index = 0;
903
904 smp_mb__before_clear_bit(); 1027 smp_mb__before_clear_bit();
905 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 1028 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
906 } 1029 }
@@ -916,7 +1039,6 @@ bnad_tx_cleanup(struct delayed_work *work)
916 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1039 spin_unlock_irqrestore(&bnad->bna_lock, flags);
917} 1040}
918 1041
919
920static void 1042static void
921bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) 1043bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
922{ 1044{
@@ -965,7 +1087,7 @@ bnad_rx_cleanup(void *work)
965 struct bnad_rx_ctrl *rx_ctrl; 1087 struct bnad_rx_ctrl *rx_ctrl;
966 struct bnad *bnad = NULL; 1088 struct bnad *bnad = NULL;
967 unsigned long flags; 1089 unsigned long flags;
968 uint32_t i; 1090 u32 i;
969 1091
970 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1092 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
971 rx_ctrl = &rx_info->rx_ctrl[i]; 1093 rx_ctrl = &rx_info->rx_ctrl[i];
@@ -1022,9 +1144,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1022 struct bna_ccb *ccb; 1144 struct bna_ccb *ccb;
1023 struct bna_rcb *rcb; 1145 struct bna_rcb *rcb;
1024 struct bnad_rx_ctrl *rx_ctrl; 1146 struct bnad_rx_ctrl *rx_ctrl;
1025 struct bnad_unmap_q *unmap_q; 1147 int i, j;
1026 int i;
1027 int j;
1028 1148
1029 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1149 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1030 rx_ctrl = &rx_info->rx_ctrl[i]; 1150 rx_ctrl = &rx_info->rx_ctrl[i];
@@ -1039,19 +1159,10 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1039 if (!rcb) 1159 if (!rcb)
1040 continue; 1160 continue;
1041 1161
1162 bnad_rxq_alloc_init(bnad, rcb);
1042 set_bit(BNAD_RXQ_STARTED, &rcb->flags); 1163 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1043 set_bit(BNAD_RXQ_POST_OK, &rcb->flags); 1164 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1044 unmap_q = rcb->unmap_q; 1165 bnad_rxq_post(bnad, rcb);
1045
1046 /* Now allocate & post buffers for this RCB */
1047 /* !!Allocation in callback context */
1048 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1049 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1050 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1051 bnad_rxq_post(bnad, rcb);
1052 smp_mb__before_clear_bit();
1053 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1054 }
1055 } 1166 }
1056 } 1167 }
1057} 1168}
@@ -1775,10 +1886,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1775 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1886 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1776 1887
1777 /* Fill Unmap Q memory requirements */ 1888 /* Fill Unmap Q memory requirements */
1778 BNAD_FILL_UNMAPQ_MEM_REQ( 1889 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1779 &res_info[BNA_TX_RES_MEM_T_UNMAPQ], 1890 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1780 bnad->num_txq_per_tx, 1891 bnad->txq_depth));
1781 BNAD_TX_UNMAPQ_DEPTH);
1782 1892
1783 /* Allocate resources */ 1893 /* Allocate resources */
1784 err = bnad_tx_res_alloc(bnad, res_info, tx_id); 1894 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
@@ -1916,7 +2026,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1916 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; 2026 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
1917 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 2027 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1918 static const struct bna_rx_event_cbfn rx_cbfn = { 2028 static const struct bna_rx_event_cbfn rx_cbfn = {
1919 .rcb_setup_cbfn = bnad_cb_rcb_setup, 2029 .rcb_setup_cbfn = NULL,
1920 .rcb_destroy_cbfn = NULL, 2030 .rcb_destroy_cbfn = NULL,
1921 .ccb_setup_cbfn = bnad_cb_ccb_setup, 2031 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1922 .ccb_destroy_cbfn = bnad_cb_ccb_destroy, 2032 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
@@ -1938,11 +2048,12 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1938 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2048 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1939 2049
1940 /* Fill Unmap Q memory requirements */ 2050 /* Fill Unmap Q memory requirements */
1941 BNAD_FILL_UNMAPQ_MEM_REQ( 2051 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1942 &res_info[BNA_RX_RES_MEM_T_UNMAPQ],
1943 rx_config->num_paths + 2052 rx_config->num_paths +
1944 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 : 2053 ((rx_config->rxp_type == BNA_RXP_SINGLE) ?
1945 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH); 2054 0 : rx_config->num_paths),
2055 ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) +
2056 sizeof(struct bnad_rx_unmap_q)));
1946 2057
1947 /* Allocate resource */ 2058 /* Allocate resource */
1948 err = bnad_rx_res_alloc(bnad, res_info, rx_id); 2059 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
@@ -2523,125 +2634,34 @@ bnad_stop(struct net_device *netdev)
2523} 2634}
2524 2635
2525/* TX */ 2636/* TX */
2526/* 2637/* Returns 0 for success */
2527 * bnad_start_xmit : Netdev entry point for Transmit 2638static int
2528 * Called under lock held by net_device 2639bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2529 */ 2640 struct sk_buff *skb, struct bna_txq_entry *txqent)
2530static netdev_tx_t
2531bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2532{ 2641{
2533 struct bnad *bnad = netdev_priv(netdev); 2642 u16 flags = 0;
2534 u32 txq_id = 0; 2643 u32 gso_size;
2535 struct bna_tcb *tcb = bnad->tx_info[0].tcb[txq_id]; 2644 u16 vlan_tag = 0;
2536
2537 u16 txq_prod, vlan_tag = 0;
2538 u32 unmap_prod, wis, wis_used, wi_range;
2539 u32 vectors, vect_id, i, acked;
2540 int err;
2541 unsigned int len;
2542 u32 gso_size;
2543
2544 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
2545 dma_addr_t dma_addr;
2546 struct bna_txq_entry *txqent;
2547 u16 flags;
2548
2549 if (unlikely(skb->len <= ETH_HLEN)) {
2550 dev_kfree_skb(skb);
2551 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2552 return NETDEV_TX_OK;
2553 }
2554 if (unlikely(skb_headlen(skb) > BFI_TX_MAX_DATA_PER_VECTOR)) {
2555 dev_kfree_skb(skb);
2556 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_too_long);
2557 return NETDEV_TX_OK;
2558 }
2559 if (unlikely(skb_headlen(skb) == 0)) {
2560 dev_kfree_skb(skb);
2561 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2562 return NETDEV_TX_OK;
2563 }
2564
2565 /*
2566 * Takes care of the Tx that is scheduled between clearing the flag
2567 * and the netif_tx_stop_all_queues() call.
2568 */
2569 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2570 dev_kfree_skb(skb);
2571 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2572 return NETDEV_TX_OK;
2573 }
2574
2575 vectors = 1 + skb_shinfo(skb)->nr_frags;
2576 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2577 dev_kfree_skb(skb);
2578 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2579 return NETDEV_TX_OK;
2580 }
2581 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2582 acked = 0;
2583 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2584 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2585 if ((u16) (*tcb->hw_consumer_index) !=
2586 tcb->consumer_index &&
2587 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2588 acked = bnad_txcmpl_process(bnad, tcb);
2589 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2590 bna_ib_ack(tcb->i_dbell, acked);
2591 smp_mb__before_clear_bit();
2592 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2593 } else {
2594 netif_stop_queue(netdev);
2595 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2596 }
2597
2598 smp_mb();
2599 /*
2600 * Check again to deal with race condition between
2601 * netif_stop_queue here, and netif_wake_queue in
2602 * interrupt handler which is not inside netif tx lock.
2603 */
2604 if (likely
2605 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) ||
2606 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) {
2607 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2608 return NETDEV_TX_BUSY;
2609 } else {
2610 netif_wake_queue(netdev);
2611 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2612 }
2613 }
2614
2615 unmap_prod = unmap_q->producer_index;
2616 flags = 0;
2617
2618 txq_prod = tcb->producer_index;
2619 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range);
2620 txqent->hdr.wi.reserved = 0;
2621 txqent->hdr.wi.num_vectors = vectors;
2622 2645
2623 if (vlan_tx_tag_present(skb)) { 2646 if (vlan_tx_tag_present(skb)) {
2624 vlan_tag = (u16) vlan_tx_tag_get(skb); 2647 vlan_tag = (u16)vlan_tx_tag_get(skb);
2625 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2648 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2626 } 2649 }
2627 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { 2650 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2628 vlan_tag = 2651 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2629 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff); 2652 | (vlan_tag & 0x1fff);
2630 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2653 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2631 } 2654 }
2632
2633 txqent->hdr.wi.vlan_tag = htons(vlan_tag); 2655 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2634 2656
2635 if (skb_is_gso(skb)) { 2657 if (skb_is_gso(skb)) {
2636 gso_size = skb_shinfo(skb)->gso_size; 2658 gso_size = skb_shinfo(skb)->gso_size;
2637 2659 if (unlikely(gso_size > bnad->netdev->mtu)) {
2638 if (unlikely(gso_size > netdev->mtu)) {
2639 dev_kfree_skb(skb);
2640 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); 2660 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2641 return NETDEV_TX_OK; 2661 return -EINVAL;
2642 } 2662 }
2643 if (unlikely((gso_size + skb_transport_offset(skb) + 2663 if (unlikely((gso_size + skb_transport_offset(skb) +
2644 tcp_hdrlen(skb)) >= skb->len)) { 2664 tcp_hdrlen(skb)) >= skb->len)) {
2645 txqent->hdr.wi.opcode = 2665 txqent->hdr.wi.opcode =
2646 __constant_htons(BNA_TXQ_WI_SEND); 2666 __constant_htons(BNA_TXQ_WI_SEND);
2647 txqent->hdr.wi.lso_mss = 0; 2667 txqent->hdr.wi.lso_mss = 0;
@@ -2652,25 +2672,22 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2652 txqent->hdr.wi.lso_mss = htons(gso_size); 2672 txqent->hdr.wi.lso_mss = htons(gso_size);
2653 } 2673 }
2654 2674
2655 err = bnad_tso_prepare(bnad, skb); 2675 if (bnad_tso_prepare(bnad, skb)) {
2656 if (unlikely(err)) {
2657 dev_kfree_skb(skb);
2658 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); 2676 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2659 return NETDEV_TX_OK; 2677 return -EINVAL;
2660 } 2678 }
2679
2661 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); 2680 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2662 txqent->hdr.wi.l4_hdr_size_n_offset = 2681 txqent->hdr.wi.l4_hdr_size_n_offset =
2663 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET 2682 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2664 (tcp_hdrlen(skb) >> 2, 2683 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2665 skb_transport_offset(skb))); 2684 } else {
2666 } else {
2667 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); 2685 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
2668 txqent->hdr.wi.lso_mss = 0; 2686 txqent->hdr.wi.lso_mss = 0;
2669 2687
2670 if (unlikely(skb->len > (netdev->mtu + ETH_HLEN))) { 2688 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
2671 dev_kfree_skb(skb);
2672 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); 2689 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2673 return NETDEV_TX_OK; 2690 return -EINVAL;
2674 } 2691 }
2675 2692
2676 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2693 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2678,11 +2695,13 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2678 2695
2679 if (skb->protocol == __constant_htons(ETH_P_IP)) 2696 if (skb->protocol == __constant_htons(ETH_P_IP))
2680 proto = ip_hdr(skb)->protocol; 2697 proto = ip_hdr(skb)->protocol;
2698#ifdef NETIF_F_IPV6_CSUM
2681 else if (skb->protocol == 2699 else if (skb->protocol ==
2682 __constant_htons(ETH_P_IPV6)) { 2700 __constant_htons(ETH_P_IPV6)) {
2683 /* nexthdr may not be TCP immediately. */ 2701 /* nexthdr may not be TCP immediately. */
2684 proto = ipv6_hdr(skb)->nexthdr; 2702 proto = ipv6_hdr(skb)->nexthdr;
2685 } 2703 }
2704#endif
2686 if (proto == IPPROTO_TCP) { 2705 if (proto == IPPROTO_TCP) {
2687 flags |= BNA_TXQ_WI_CF_TCP_CKSUM; 2706 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2688 txqent->hdr.wi.l4_hdr_size_n_offset = 2707 txqent->hdr.wi.l4_hdr_size_n_offset =
@@ -2692,12 +2711,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2692 BNAD_UPDATE_CTR(bnad, tcpcsum_offload); 2711 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2693 2712
2694 if (unlikely(skb_headlen(skb) < 2713 if (unlikely(skb_headlen(skb) <
2695 skb_transport_offset(skb) + tcp_hdrlen(skb))) { 2714 skb_transport_offset(skb) +
2696 dev_kfree_skb(skb); 2715 tcp_hdrlen(skb))) {
2697 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); 2716 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2698 return NETDEV_TX_OK; 2717 return -EINVAL;
2699 } 2718 }
2700
2701 } else if (proto == IPPROTO_UDP) { 2719 } else if (proto == IPPROTO_UDP) {
2702 flags |= BNA_TXQ_WI_CF_UDP_CKSUM; 2720 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2703 txqent->hdr.wi.l4_hdr_size_n_offset = 2721 txqent->hdr.wi.l4_hdr_size_n_offset =
@@ -2706,51 +2724,149 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2706 2724
2707 BNAD_UPDATE_CTR(bnad, udpcsum_offload); 2725 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2708 if (unlikely(skb_headlen(skb) < 2726 if (unlikely(skb_headlen(skb) <
2709 skb_transport_offset(skb) + 2727 skb_transport_offset(skb) +
2710 sizeof(struct udphdr))) { 2728 sizeof(struct udphdr))) {
2711 dev_kfree_skb(skb);
2712 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); 2729 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2713 return NETDEV_TX_OK; 2730 return -EINVAL;
2714 } 2731 }
2715 } else { 2732 } else {
2716 dev_kfree_skb(skb); 2733
2717 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); 2734 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2718 return NETDEV_TX_OK; 2735 return -EINVAL;
2719 } 2736 }
2720 } else { 2737 } else
2721 txqent->hdr.wi.l4_hdr_size_n_offset = 0; 2738 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2722 }
2723 } 2739 }
2724 2740
2725 txqent->hdr.wi.flags = htons(flags); 2741 txqent->hdr.wi.flags = htons(flags);
2726
2727 txqent->hdr.wi.frame_length = htonl(skb->len); 2742 txqent->hdr.wi.frame_length = htonl(skb->len);
2728 2743
2729 unmap_q->unmap_array[unmap_prod].skb = skb; 2744 return 0;
2745}
2746
2747/*
2748 * bnad_start_xmit : Netdev entry point for Transmit
2749 * Called under lock held by net_device
2750 */
2751static netdev_tx_t
2752bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2753{
2754 struct bnad *bnad = netdev_priv(netdev);
2755 u32 txq_id = 0;
2756 struct bna_tcb *tcb = NULL;
2757 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2758 u32 prod, q_depth, vect_id;
2759 u32 wis, vectors, len;
2760 int i;
2761 dma_addr_t dma_addr;
2762 struct bna_txq_entry *txqent;
2763
2730 len = skb_headlen(skb); 2764 len = skb_headlen(skb);
2731 txqent->vector[0].length = htons(len);
2732 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2733 skb_headlen(skb), DMA_TO_DEVICE);
2734 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2735 dma_addr);
2736 2765
2737 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); 2766 /* Sanity checks for the skb */
2738 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
2739 2767
2740 vect_id = 0; 2768 if (unlikely(skb->len <= ETH_HLEN)) {
2741 wis_used = 1; 2769 dev_kfree_skb(skb);
2770 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2771 return NETDEV_TX_OK;
2772 }
2773 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2774 dev_kfree_skb(skb);
2775 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2776 return NETDEV_TX_OK;
2777 }
2778 if (unlikely(len == 0)) {
2779 dev_kfree_skb(skb);
2780 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2781 return NETDEV_TX_OK;
2782 }
2783
2784 tcb = bnad->tx_info[0].tcb[txq_id];
2785 q_depth = tcb->q_depth;
2786 prod = tcb->producer_index;
2787
2788 unmap_q = tcb->unmap_q;
2742 2789
2743 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2790 /*
2791 * Takes care of the Tx that is scheduled between clearing the flag
2792 * and the netif_tx_stop_all_queues() call.
2793 */
2794 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2795 dev_kfree_skb(skb);
2796 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2797 return NETDEV_TX_OK;
2798 }
2799
2800 vectors = 1 + skb_shinfo(skb)->nr_frags;
2801 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2802
2803 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2804 dev_kfree_skb(skb);
2805 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2806 return NETDEV_TX_OK;
2807 }
2808
2809 /* Check for available TxQ resources */
2810 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2811 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2812 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2813 u32 sent;
2814 sent = bnad_txcmpl_process(bnad, tcb);
2815 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2816 bna_ib_ack(tcb->i_dbell, sent);
2817 smp_mb__before_clear_bit();
2818 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2819 } else {
2820 netif_stop_queue(netdev);
2821 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2822 }
2823
2824 smp_mb();
2825 /*
2826 * Check again to deal with race condition between
2827 * netif_stop_queue here, and netif_wake_queue in
2828 * interrupt handler which is not inside netif tx lock.
2829 */
2830 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2831 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2832 return NETDEV_TX_BUSY;
2833 } else {
2834 netif_wake_queue(netdev);
2835 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2836 }
2837 }
2838
2839 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2840 head_unmap = &unmap_q[prod];
2841
2842 /* Program the opcode, flags, frame_len, num_vectors in WI */
2843 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
2844 dev_kfree_skb(skb);
2845 return NETDEV_TX_OK;
2846 }
2847 txqent->hdr.wi.reserved = 0;
2848 txqent->hdr.wi.num_vectors = vectors;
2849
2850 head_unmap->skb = skb;
2851 head_unmap->nvecs = 0;
2852
2853 /* Program the vectors */
2854 unmap = head_unmap;
2855 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
2856 len, DMA_TO_DEVICE);
2857 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
2858 txqent->vector[0].length = htons(len);
2859 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
2860 head_unmap->nvecs++;
2861
2862 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
2744 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 2863 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
2745 u16 size = skb_frag_size(frag); 2864 u16 size = skb_frag_size(frag);
2746 2865
2747 if (unlikely(size == 0)) { 2866 if (unlikely(size == 0)) {
2748 unmap_prod = unmap_q->producer_index; 2867 /* Undo the changes starting at tcb->producer_index */
2749 2868 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
2750 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev, 2869 tcb->producer_index);
2751 unmap_q->unmap_array,
2752 unmap_prod, unmap_q->q_depth, skb,
2753 i);
2754 dev_kfree_skb(skb); 2870 dev_kfree_skb(skb);
2755 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); 2871 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
2756 return NETDEV_TX_OK; 2872 return NETDEV_TX_OK;
@@ -2758,47 +2874,35 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2758 2874
2759 len += size; 2875 len += size;
2760 2876
2761 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) { 2877 vect_id++;
2878 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
2762 vect_id = 0; 2879 vect_id = 0;
2763 if (--wi_range) 2880 BNA_QE_INDX_INC(prod, q_depth);
2764 txqent++; 2881 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2765 else {
2766 BNA_QE_INDX_ADD(txq_prod, wis_used,
2767 tcb->q_depth);
2768 wis_used = 0;
2769 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt,
2770 txqent, wi_range);
2771 }
2772 wis_used++;
2773 txqent->hdr.wi_ext.opcode = 2882 txqent->hdr.wi_ext.opcode =
2774 __constant_htons(BNA_TXQ_WI_EXTENSION); 2883 __constant_htons(BNA_TXQ_WI_EXTENSION);
2884 unmap = &unmap_q[prod];
2775 } 2885 }
2776 2886
2777 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
2778 txqent->vector[vect_id].length = htons(size);
2779 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, 2887 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
2780 0, size, DMA_TO_DEVICE); 2888 0, size, DMA_TO_DEVICE);
2781 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
2782 dma_addr);
2783 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2889 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
2784 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 2890 txqent->vector[vect_id].length = htons(size);
2891 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
2892 dma_addr);
2893 head_unmap->nvecs++;
2785 } 2894 }
2786 2895
2787 if (unlikely(len != skb->len)) { 2896 if (unlikely(len != skb->len)) {
2788 unmap_prod = unmap_q->producer_index; 2897 /* Undo the changes starting at tcb->producer_index */
2789 2898 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
2790 unmap_prod = bnad_pci_unmap_skb(&bnad->pcidev->dev,
2791 unmap_q->unmap_array, unmap_prod,
2792 unmap_q->q_depth, skb,
2793 skb_shinfo(skb)->nr_frags);
2794 dev_kfree_skb(skb); 2899 dev_kfree_skb(skb);
2795 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); 2900 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
2796 return NETDEV_TX_OK; 2901 return NETDEV_TX_OK;
2797 } 2902 }
2798 2903
2799 unmap_q->producer_index = unmap_prod; 2904 BNA_QE_INDX_INC(prod, q_depth);
2800 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth); 2905 tcb->producer_index = prod;
2801 tcb->producer_index = txq_prod;
2802 2906
2803 smp_mb(); 2907 smp_mb();
2804 2908
@@ -3226,7 +3330,7 @@ bnad_pci_uninit(struct pci_dev *pdev)
3226 pci_disable_device(pdev); 3330 pci_disable_device(pdev);
3227} 3331}
3228 3332
3229static int __devinit 3333static int
3230bnad_pci_probe(struct pci_dev *pdev, 3334bnad_pci_probe(struct pci_dev *pdev,
3231 const struct pci_device_id *pcidev_id) 3335 const struct pci_device_id *pcidev_id)
3232{ 3336{
@@ -3320,7 +3424,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3320 if (err) 3424 if (err)
3321 goto res_free; 3425 goto res_free;
3322 3426
3323
3324 /* Set up timers */ 3427 /* Set up timers */
3325 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 3428 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
3326 ((unsigned long)bnad)); 3429 ((unsigned long)bnad));
@@ -3426,7 +3529,7 @@ unlock_mutex:
3426 return err; 3529 return err;
3427} 3530}
3428 3531
3429static void __devexit 3532static void
3430bnad_pci_remove(struct pci_dev *pdev) 3533bnad_pci_remove(struct pci_dev *pdev)
3431{ 3534{
3432 struct net_device *netdev = pci_get_drvdata(pdev); 3535 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3490,7 +3593,7 @@ static struct pci_driver bnad_pci_driver = {
3490 .name = BNAD_NAME, 3593 .name = BNAD_NAME,
3491 .id_table = bnad_pci_id_table, 3594 .id_table = bnad_pci_id_table,
3492 .probe = bnad_pci_probe, 3595 .probe = bnad_pci_probe,
3493 .remove = __devexit_p(bnad_pci_remove), 3596 .remove = bnad_pci_remove,
3494}; 3597};
3495 3598
3496static int __init 3599static int __init
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index d78339224751..c1d0bc059bfd 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
71#define BNAD_NAME "bna" 71#define BNAD_NAME "bna"
72#define BNAD_NAME_LEN 64 72#define BNAD_NAME_LEN 64
73 73
74#define BNAD_VERSION "3.0.23.0" 74#define BNAD_VERSION "3.1.2.1"
75 75
76#define BNAD_MAILBOX_MSIX_INDEX 0 76#define BNAD_MAILBOX_MSIX_INDEX 0
77#define BNAD_MAILBOX_MSIX_VECTORS 1 77#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -83,12 +83,9 @@ struct bnad_rx_ctrl {
83 83
84#define BNAD_IOCETH_TIMEOUT 10000 84#define BNAD_IOCETH_TIMEOUT 10000
85 85
86#define BNAD_MAX_Q_DEPTH 0x10000 86#define BNAD_MIN_Q_DEPTH 512
87#define BNAD_MIN_Q_DEPTH 0x200 87#define BNAD_MAX_RXQ_DEPTH 2048
88 88#define BNAD_MAX_TXQ_DEPTH 2048
89#define BNAD_MAX_RXQ_DEPTH (BNAD_MAX_Q_DEPTH / bnad_rxqs_per_cq)
90/* keeping MAX TX and RX Q depth equal */
91#define BNAD_MAX_TXQ_DEPTH BNAD_MAX_RXQ_DEPTH
92 89
93#define BNAD_JUMBO_MTU 9000 90#define BNAD_JUMBO_MTU 9000
94 91
@@ -101,9 +98,8 @@ struct bnad_rx_ctrl {
101#define BNAD_TXQ_TX_STARTED 1 98#define BNAD_TXQ_TX_STARTED 1
102 99
103/* Bit positions for rcb->flags */ 100/* Bit positions for rcb->flags */
104#define BNAD_RXQ_REFILL 0 101#define BNAD_RXQ_STARTED 0
105#define BNAD_RXQ_STARTED 1 102#define BNAD_RXQ_POST_OK 1
106#define BNAD_RXQ_POST_OK 2
107 103
108/* Resource limits */ 104/* Resource limits */
109#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) 105#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
@@ -221,18 +217,43 @@ struct bnad_rx_info {
221 struct work_struct rx_cleanup_work; 217 struct work_struct rx_cleanup_work;
222} ____cacheline_aligned; 218} ____cacheline_aligned;
223 219
224/* Unmap queues for Tx / Rx cleanup */ 220struct bnad_tx_vector {
225struct bnad_skb_unmap { 221 DEFINE_DMA_UNMAP_ADDR(dma_addr);
222};
223
224struct bnad_tx_unmap {
226 struct sk_buff *skb; 225 struct sk_buff *skb;
226 u32 nvecs;
227 struct bnad_tx_vector vectors[BFI_TX_MAX_VECTORS_PER_WI];
228};
229
230struct bnad_rx_vector {
227 DEFINE_DMA_UNMAP_ADDR(dma_addr); 231 DEFINE_DMA_UNMAP_ADDR(dma_addr);
232 u32 len;
233};
234
235struct bnad_rx_unmap {
236 struct page *page;
237 u32 page_offset;
238 struct sk_buff *skb;
239 struct bnad_rx_vector vector;
228}; 240};
229 241
230struct bnad_unmap_q { 242enum bnad_rxbuf_type {
231 u32 producer_index; 243 BNAD_RXBUF_NONE = 0,
232 u32 consumer_index; 244 BNAD_RXBUF_SKB = 1,
233 u32 q_depth; 245 BNAD_RXBUF_PAGE = 2,
234 /* This should be the last one */ 246 BNAD_RXBUF_MULTI = 3
235 struct bnad_skb_unmap unmap_array[1]; 247};
248
249#define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE)
250
251struct bnad_rx_unmap_q {
252 int reuse_pi;
253 int alloc_order;
254 u32 map_size;
255 enum bnad_rxbuf_type type;
256 struct bnad_rx_unmap unmap[0];
236}; 257};
237 258
238/* Bit mask values for bnad->cfg_flags */ 259/* Bit mask values for bnad->cfg_flags */
@@ -252,11 +273,6 @@ struct bnad_unmap_q {
252#define BNAD_RF_STATS_TIMER_RUNNING 5 273#define BNAD_RF_STATS_TIMER_RUNNING 5
253#define BNAD_RF_TX_PRIO_SET 6 274#define BNAD_RF_TX_PRIO_SET 6
254 275
255
256/* Define for Fast Path flags */
257/* Defined as bit positions */
258#define BNAD_FP_IN_RX_PATH 0
259
260struct bnad { 276struct bnad {
261 struct net_device *netdev; 277 struct net_device *netdev;
262 u32 id; 278 u32 id;
@@ -284,8 +300,8 @@ struct bnad {
284 u8 tx_coalescing_timeo; 300 u8 tx_coalescing_timeo;
285 u8 rx_coalescing_timeo; 301 u8 rx_coalescing_timeo;
286 302
287 struct bna_rx_config rx_config[BNAD_MAX_RX]; 303 struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned;
288 struct bna_tx_config tx_config[BNAD_MAX_TX]; 304 struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned;
289 305
290 void __iomem *bar0; /* BAR0 address */ 306 void __iomem *bar0; /* BAR0 address */
291 307
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 40e1e84f4984..455b5a2e59d4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -102,6 +102,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
102 "rx_unmap_q_alloc_failed", 102 "rx_unmap_q_alloc_failed",
103 "rxbuf_alloc_failed", 103 "rxbuf_alloc_failed",
104 104
105 "mac_stats_clr_cnt",
105 "mac_frame_64", 106 "mac_frame_64",
106 "mac_frame_65_127", 107 "mac_frame_65_127",
107 "mac_frame_128_255", 108 "mac_frame_128_255",
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 32e8f178ab76..14ca9317c915 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
37 37
38extern char bfa_version[]; 38extern char bfa_version[];
39 39
40#define CNA_FW_FILE_CT "ctfw.bin" 40#define CNA_FW_FILE_CT "ctfw-3.1.0.0.bin"
41#define CNA_FW_FILE_CT2 "ct2fw.bin" 41#define CNA_FW_FILE_CT2 "ct2fw-3.1.0.0.bin"
42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ 42#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
43 43
44#pragma pack(1) 44#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index db931916da08..ceb0de0cf62c 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -2,13 +2,10 @@
2# Atmel device configuration 2# Atmel device configuration
3# 3#
4 4
5config HAVE_NET_MACB
6 bool
7
8config NET_CADENCE 5config NET_CADENCE
9 bool "Cadence devices" 6 bool "Cadence devices"
7 depends on HAS_IOMEM
10 default y 8 default y
11 depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
12 ---help--- 9 ---help---
13 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
14 Make sure you know the name of your card. Read the Ethernet-HOWTO, 11 Make sure you know the name of your card. Read the Ethernet-HOWTO,
@@ -25,16 +22,14 @@ if NET_CADENCE
25 22
26config ARM_AT91_ETHER 23config ARM_AT91_ETHER
27 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
28 depends on ARM && ARCH_AT91RM9200
29 select NET_CORE 25 select NET_CORE
30 select MII 26 select MACB
31 ---help--- 27 ---help---
32 If you wish to compile a kernel for the AT91RM9200 and enable 28 If you wish to compile a kernel for the AT91RM9200 and enable
33 ethernet support, then you should always answer Y to this. 29 ethernet support, then you should always answer Y to this.
34 30
35config MACB 31config MACB
36 tristate "Cadence MACB/GEM support" 32 tristate "Cadence MACB/GEM support"
37 depends on HAVE_NET_MACB
38 select PHYLIB 33 select PHYLIB
39 ---help--- 34 ---help---
40 The Cadence MACB ethernet interface is found on many Atmel AT32 and 35 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 35fc6edbacf8..3becdb2deb46 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -6,11 +6,6 @@
6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc. 6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
7 * Initial version by Rick Bronson 01/11/2003 7 * Initial version by Rick Bronson 01/11/2003
8 * 8 *
9 * Intel LXT971A PHY support by Christopher Bahns & David Knickerbocker
10 * (Polaroid Corporation)
11 *
12 * Realtek RTL8201(B)L PHY support by Roman Avramenko <roman@imsystems.ru>
13 *
14 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
@@ -20,7 +15,6 @@
20#include <linux/module.h> 15#include <linux/module.h>
21#include <linux/init.h> 16#include <linux/init.h>
22#include <linux/interrupt.h> 17#include <linux/interrupt.h>
23#include <linux/mii.h>
24#include <linux/netdevice.h> 18#include <linux/netdevice.h>
25#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
26#include <linux/skbuff.h> 20#include <linux/skbuff.h>
@@ -31,956 +25,248 @@
31#include <linux/clk.h> 25#include <linux/clk.h>
32#include <linux/gfp.h> 26#include <linux/gfp.h>
33#include <linux/phy.h> 27#include <linux/phy.h>
34#include <linux/platform_data/atmel.h> 28#include <linux/io.h>
35 29#include <linux/of.h>
36#include <asm/io.h> 30#include <linux/of_device.h>
37#include <asm/uaccess.h> 31#include <linux/of_net.h>
38#include <asm/mach-types.h> 32#include <linux/pinctrl/consumer.h>
39
40#include <mach/at91rm9200_emac.h>
41#include <asm/gpio.h>
42
43#include "at91_ether.h"
44
45#define DRV_NAME "at91_ether"
46#define DRV_VERSION "1.0"
47
48#define LINK_POLL_INTERVAL (HZ)
49
50/* ..................................................................... */
51
52/*
53 * Read from a EMAC register.
54 */
55static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg)
56{
57 return __raw_readl(lp->emac_base + reg);
58}
59
60/*
61 * Write to a EMAC register.
62 */
63static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value)
64{
65 __raw_writel(value, lp->emac_base + reg);
66}
67
68/* ........................... PHY INTERFACE ........................... */
69
70/*
71 * Enable the MDIO bit in MAC control register
72 * When not called from an interrupt-handler, access to the PHY must be
73 * protected by a spinlock.
74 */
75static void enable_mdi(struct at91_private *lp)
76{
77 unsigned long ctl;
78
79 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
80 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
81}
82
83/*
84 * Disable the MDIO bit in the MAC control register
85 */
86static void disable_mdi(struct at91_private *lp)
87{
88 unsigned long ctl;
89
90 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
91 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
92}
93
94/*
95 * Wait until the PHY operation is complete.
96 */
97static inline void at91_phy_wait(struct at91_private *lp)
98{
99 unsigned long timeout = jiffies + 2;
100
101 while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
102 if (time_after(jiffies, timeout)) {
103 printk("at91_ether: MIO timeout\n");
104 break;
105 }
106 cpu_relax();
107 }
108}
109
110/*
111 * Write value to the a PHY register
112 * Note: MDI interface is assumed to already have been enabled.
113 */
114static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value)
115{
116 at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
117 | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
118
119 /* Wait until IDLE bit in Network Status register is cleared */
120 at91_phy_wait(lp);
121}
122
123/*
124 * Read value stored in a PHY register.
125 * Note: MDI interface is assumed to already have been enabled.
126 */
127static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value)
128{
129 at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
130 | ((phy_addr & 0x1f) << 23) | (address << 18));
131
132 /* Wait until IDLE bit in Network Status register is cleared */
133 at91_phy_wait(lp);
134
135 *value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA;
136}
137
138/* ........................... PHY MANAGEMENT .......................... */
139
140/*
141 * Access the PHY to determine the current link speed and mode, and update the
142 * MAC accordingly.
143 * If no link or auto-negotiation is busy, then no changes are made.
144 */
145static void update_linkspeed(struct net_device *dev, int silent)
146{
147 struct at91_private *lp = netdev_priv(dev);
148 unsigned int bmsr, bmcr, lpa, mac_cfg;
149 unsigned int speed, duplex;
150
151 if (!mii_link_ok(&lp->mii)) { /* no link */
152 netif_carrier_off(dev);
153 if (!silent)
154 printk(KERN_INFO "%s: Link down.\n", dev->name);
155 return;
156 }
157
158 /* Link up, or auto-negotiation still in progress */
159 read_phy(lp, lp->phy_address, MII_BMSR, &bmsr);
160 read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
161 if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
162 if (!(bmsr & BMSR_ANEGCOMPLETE))
163 return; /* Do nothing - another interrupt generated when negotiation complete */
164
165 read_phy(lp, lp->phy_address, MII_LPA, &lpa);
166 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
167 else speed = SPEED_10;
168 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
169 else duplex = DUPLEX_HALF;
170 } else {
171 speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
172 duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
173 }
174
175 /* Update the MAC */
176 mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
177 if (speed == SPEED_100) {
178 if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
179 mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
180 else /* 100 Half Duplex */
181 mac_cfg |= AT91_EMAC_SPD;
182 } else {
183 if (duplex == DUPLEX_FULL) /* 10 Full Duplex */
184 mac_cfg |= AT91_EMAC_FD;
185 else {} /* 10 Half Duplex */
186 }
187 at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg);
188
189 if (!silent)
190 printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
191 netif_carrier_on(dev);
192}
193
194/*
195 * Handle interrupts from the PHY
196 */
197static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
198{
199 struct net_device *dev = (struct net_device *) dev_id;
200 struct at91_private *lp = netdev_priv(dev);
201 unsigned int phy;
202
203 /*
204 * This hander is triggered on both edges, but the PHY chips expect
205 * level-triggering. We therefore have to check if the PHY actually has
206 * an IRQ pending.
207 */
208 enable_mdi(lp);
209 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
210 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
211 if (!(phy & (1 << 0)))
212 goto done;
213 }
214 else if (lp->phy_type == MII_LXT971A_ID) {
215 read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
216 if (!(phy & (1 << 2)))
217 goto done;
218 }
219 else if (lp->phy_type == MII_BCM5221_ID) {
220 read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
221 if (!(phy & (1 << 0)))
222 goto done;
223 }
224 else if (lp->phy_type == MII_KS8721_ID) {
225 read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
226 if (!(phy & ((1 << 2) | 1)))
227 goto done;
228 }
229 else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
230 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy);
231 if (!(phy & ((1 << 2) | 1)))
232 goto done;
233 }
234 else if (lp->phy_type == MII_DP83848_ID) {
235 read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
236 if (!(phy & (1 << 7)))
237 goto done;
238 }
239
240 update_linkspeed(dev, 0);
241
242done:
243 disable_mdi(lp);
244
245 return IRQ_HANDLED;
246}
247
248/*
249 * Initialize and enable the PHY interrupt for link-state changes
250 */
251static void enable_phyirq(struct net_device *dev)
252{
253 struct at91_private *lp = netdev_priv(dev);
254 unsigned int dsintr, irq_number;
255 int status;
256
257 if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
258 /*
259 * PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
260 * or board does not have it connected.
261 */
262 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
263 return;
264 }
265
266 irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
267 status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
268 if (status) {
269 printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
270 return;
271 }
272
273 spin_lock_irq(&lp->lock);
274 enable_mdi(lp);
275
276 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
277 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
278 dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
279 write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
280 }
281 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
282 read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
283 dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
284 write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
285 }
286 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
287 dsintr = (1 << 15) | ( 1 << 14);
288 write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
289 }
290 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
291 dsintr = (1 << 10) | ( 1 << 8);
292 write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
293 }
294 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
295 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
296 dsintr = dsintr | 0x500; /* set bits 8, 10 */
297 write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
298 }
299 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
300 read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
301 dsintr = dsintr | 0x3c; /* set bits 2..5 */
302 write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
303 read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
304 dsintr = dsintr | 0x3; /* set bits 0,1 */
305 write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
306 }
307
308 disable_mdi(lp);
309 spin_unlock_irq(&lp->lock);
310}
311
312/*
313 * Disable the PHY interrupt
314 */
315static void disable_phyirq(struct net_device *dev)
316{
317 struct at91_private *lp = netdev_priv(dev);
318 unsigned int dsintr;
319 unsigned int irq_number;
320
321 if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
322 del_timer_sync(&lp->check_timer);
323 return;
324 }
325
326 spin_lock_irq(&lp->lock);
327 enable_mdi(lp);
328
329 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
330 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
331 dsintr = dsintr | 0xf00; /* set bits 8..11 */
332 write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
333 }
334 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
335 read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
336 dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
337 write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
338 }
339 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
340 read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr);
341 dsintr = ~(1 << 14);
342 write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
343 }
344 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
345 read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr);
346 dsintr = ~((1 << 10) | (1 << 8));
347 write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
348 }
349 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
350 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
351 dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
352 write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
353 }
354 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
355 read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
356 dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
357 write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
358 read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
359 dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
360 write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
361 }
362
363 disable_mdi(lp);
364 spin_unlock_irq(&lp->lock);
365
366 irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
367 free_irq(irq_number, dev); /* Free interrupt handler */
368}
369
370/*
371 * Perform a software reset of the PHY.
372 */
373#if 0
374static void reset_phy(struct net_device *dev)
375{
376 struct at91_private *lp = netdev_priv(dev);
377 unsigned int bmcr;
378
379 spin_lock_irq(&lp->lock);
380 enable_mdi(lp);
381
382 /* Perform PHY reset */
383 write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET);
384
385 /* Wait until PHY reset is complete */
386 do {
387 read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
388 } while (!(bmcr & BMCR_RESET));
389
390 disable_mdi(lp);
391 spin_unlock_irq(&lp->lock);
392}
393#endif
394
395static void at91ether_check_link(unsigned long dev_id)
396{
397 struct net_device *dev = (struct net_device *) dev_id;
398 struct at91_private *lp = netdev_priv(dev);
399
400 enable_mdi(lp);
401 update_linkspeed(dev, 1);
402 disable_mdi(lp);
403
404 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
405}
406
407/*
408 * Perform any PHY-specific initialization.
409 */
410static void __init initialize_phy(struct at91_private *lp)
411{
412 unsigned int val;
413
414 spin_lock_irq(&lp->lock);
415 enable_mdi(lp);
416
417 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
418 read_phy(lp, lp->phy_address, MII_DSCR_REG, &val);
419 if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
420 lp->phy_media = PORT_FIBRE;
421 } else if (machine_is_csb337()) {
422 /* mix link activity status into LED2 link state */
423 write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22);
424 } else if (machine_is_ecbat91())
425 write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A);
426
427 disable_mdi(lp);
428 spin_unlock_irq(&lp->lock);
429}
430
431/* ......................... ADDRESS MANAGEMENT ........................ */
432
433/*
434 * NOTE: Your bootloader must always set the MAC address correctly before
435 * booting into Linux.
436 *
437 * - It must always set the MAC address after reset, even if it doesn't
438 * happen to access the Ethernet while it's booting. Some versions of
439 * U-Boot on the AT91RM9200-DK do not do this.
440 *
441 * - Likewise it must store the addresses in the correct byte order.
442 * MicroMonitor (uMon) on the CSB337 does this incorrectly (and
443 * continues to do so, for bug-compatibility).
444 */
445
446static short __init unpack_mac_address(struct net_device *dev, unsigned int hi, unsigned int lo)
447{
448 char addr[6];
449
450 if (machine_is_csb337()) {
451 addr[5] = (lo & 0xff); /* The CSB337 bootloader stores the MAC the wrong-way around */
452 addr[4] = (lo & 0xff00) >> 8;
453 addr[3] = (lo & 0xff0000) >> 16;
454 addr[2] = (lo & 0xff000000) >> 24;
455 addr[1] = (hi & 0xff);
456 addr[0] = (hi & 0xff00) >> 8;
457 }
458 else {
459 addr[0] = (lo & 0xff);
460 addr[1] = (lo & 0xff00) >> 8;
461 addr[2] = (lo & 0xff0000) >> 16;
462 addr[3] = (lo & 0xff000000) >> 24;
463 addr[4] = (hi & 0xff);
464 addr[5] = (hi & 0xff00) >> 8;
465 }
466
467 if (is_valid_ether_addr(addr)) {
468 memcpy(dev->dev_addr, &addr, 6);
469 return 1;
470 }
471 return 0;
472}
473
474/*
475 * Set the ethernet MAC address in dev->dev_addr
476 */
477static void __init get_mac_address(struct net_device *dev)
478{
479 struct at91_private *lp = netdev_priv(dev);
480
481 /* Check Specific-Address 1 */
482 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L)))
483 return;
484 /* Check Specific-Address 2 */
485 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L)))
486 return;
487 /* Check Specific-Address 3 */
488 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L)))
489 return;
490 /* Check Specific-Address 4 */
491 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L)))
492 return;
493
494 printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
495}
496
497/*
498 * Program the hardware MAC address from dev->dev_addr.
499 */
500static void update_mac_address(struct net_device *dev)
501{
502 struct at91_private *lp = netdev_priv(dev);
503
504 at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
505 at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
506
507 at91_emac_write(lp, AT91_EMAC_SA2L, 0);
508 at91_emac_write(lp, AT91_EMAC_SA2H, 0);
509}
510
511/*
512 * Store the new hardware address in dev->dev_addr, and update the MAC.
513 */
514static int set_mac_address(struct net_device *dev, void* addr)
515{
516 struct sockaddr *address = addr;
517
518 if (!is_valid_ether_addr(address->sa_data))
519 return -EADDRNOTAVAIL;
520
521 memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
522 update_mac_address(dev);
523 33
524 printk("%s: Setting MAC address to %pM\n", dev->name, 34#include "macb.h"
525 dev->dev_addr);
526 35
527 return 0; 36/* 1518 rounded up */
528} 37#define MAX_RBUFF_SZ 0x600
529 38/* max number of receive buffers */
530static int inline hash_bit_value(int bitnr, __u8 *addr) 39#define MAX_RX_DESCR 9
531{
532 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
533 return 1;
534 return 0;
535}
536 40
537/* 41/* Initialize and start the Receiver and Transmit subsystems */
538 * The hash address register is 64 bits long and takes up two locations in the memory map. 42static int at91ether_start(struct net_device *dev)
539 * The least significant bits are stored in EMAC_HSL and the most significant
540 * bits in EMAC_HSH.
541 *
542 * The unicast hash enable and the multicast hash enable bits in the network configuration
543 * register enable the reception of hash matched frames. The destination address is
544 * reduced to a 6 bit index into the 64 bit hash register using the following hash function.
545 * The hash function is an exclusive or of every sixth bit of the destination address.
546 * hash_index[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
547 * hash_index[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
548 * hash_index[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
549 * hash_index[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
550 * hash_index[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
551 * hash_index[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
552 * da[0] represents the least significant bit of the first byte received, that is, the multicast/
553 * unicast indicator, and da[47] represents the most significant bit of the last byte
554 * received.
555 * If the hash index points to a bit that is set in the hash register then the frame will be
556 * matched according to whether the frame is multicast or unicast.
557 * A multicast match will be signalled if the multicast hash enable bit is set, da[0] is 1 and
558 * the hash index points to a bit set in the hash register.
559 * A unicast match will be signalled if the unicast hash enable bit is set, da[0] is 0 and the
560 * hash index points to a bit set in the hash register.
561 * To receive all multicast frames, the hash register should be set with all ones and the
562 * multicast hash enable bit should be set in the network configuration register.
563 */
564
565/*
566 * Return the hash index value for the specified address.
567 */
568static int hash_get_index(__u8 *addr)
569{
570 int i, j, bitval;
571 int hash_index = 0;
572
573 for (j = 0; j < 6; j++) {
574 for (i = 0, bitval = 0; i < 8; i++)
575 bitval ^= hash_bit_value(i*6 + j, addr);
576
577 hash_index |= (bitval << j);
578 }
579
580 return hash_index;
581}
582
583/*
584 * Add multicast addresses to the internal multicast-hash table.
585 */
586static void at91ether_sethashtable(struct net_device *dev)
587{ 43{
588 struct at91_private *lp = netdev_priv(dev); 44 struct macb *lp = netdev_priv(dev);
589 struct netdev_hw_addr *ha; 45 dma_addr_t addr;
590 unsigned long mc_filter[2]; 46 u32 ctl;
591 unsigned int bitnr; 47 int i;
592
593 mc_filter[0] = mc_filter[1] = 0;
594
595 netdev_for_each_mc_addr(ha, dev) {
596 bitnr = hash_get_index(ha->addr);
597 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
598 }
599
600 at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]);
601 at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]);
602}
603 48
604/* 49 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
605 * Enable/Disable promiscuous and multicast modes. 50 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
606 */ 51 &lp->rx_ring_dma, GFP_KERNEL);
607static void at91ether_set_multicast_list(struct net_device *dev) 52 if (!lp->rx_ring) {
608{ 53 netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
609 struct at91_private *lp = netdev_priv(dev); 54 return -ENOMEM;
610 unsigned long cfg;
611
612 cfg = at91_emac_read(lp, AT91_EMAC_CFG);
613
614 if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
615 cfg |= AT91_EMAC_CAF;
616 else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */
617 cfg &= ~AT91_EMAC_CAF;
618
619 if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
620 at91_emac_write(lp, AT91_EMAC_HSH, -1);
621 at91_emac_write(lp, AT91_EMAC_HSL, -1);
622 cfg |= AT91_EMAC_MTI;
623 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
624 at91ether_sethashtable(dev);
625 cfg |= AT91_EMAC_MTI;
626 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
627 at91_emac_write(lp, AT91_EMAC_HSH, 0);
628 at91_emac_write(lp, AT91_EMAC_HSL, 0);
629 cfg &= ~AT91_EMAC_MTI;
630 } 55 }
631 56
632 at91_emac_write(lp, AT91_EMAC_CFG, cfg); 57 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
633} 58 MAX_RX_DESCR * MAX_RBUFF_SZ,
634 59 &lp->rx_buffers_dma, GFP_KERNEL);
635/* ......................... ETHTOOL SUPPORT ........................... */ 60 if (!lp->rx_buffers) {
636 61 netdev_err(dev, "unable to alloc rx data DMA buffer\n");
637static int mdio_read(struct net_device *dev, int phy_id, int location)
638{
639 struct at91_private *lp = netdev_priv(dev);
640 unsigned int value;
641
642 read_phy(lp, phy_id, location, &value);
643 return value;
644}
645
646static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
647{
648 struct at91_private *lp = netdev_priv(dev);
649
650 write_phy(lp, phy_id, location, value);
651}
652
653static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
654{
655 struct at91_private *lp = netdev_priv(dev);
656 int ret;
657
658 spin_lock_irq(&lp->lock);
659 enable_mdi(lp);
660 62
661 ret = mii_ethtool_gset(&lp->mii, cmd); 63 dma_free_coherent(&lp->pdev->dev,
662 64 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
663 disable_mdi(lp); 65 lp->rx_ring, lp->rx_ring_dma);
664 spin_unlock_irq(&lp->lock); 66 lp->rx_ring = NULL;
665 67 return -ENOMEM;
666 if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
667 cmd->supported = SUPPORTED_FIBRE;
668 cmd->port = PORT_FIBRE;
669 } 68 }
670 69
671 return ret; 70 addr = lp->rx_buffers_dma;
672}
673
674static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
675{
676 struct at91_private *lp = netdev_priv(dev);
677 int ret;
678
679 spin_lock_irq(&lp->lock);
680 enable_mdi(lp);
681
682 ret = mii_ethtool_sset(&lp->mii, cmd);
683
684 disable_mdi(lp);
685 spin_unlock_irq(&lp->lock);
686
687 return ret;
688}
689
690static int at91ether_nwayreset(struct net_device *dev)
691{
692 struct at91_private *lp = netdev_priv(dev);
693 int ret;
694
695 spin_lock_irq(&lp->lock);
696 enable_mdi(lp);
697
698 ret = mii_nway_restart(&lp->mii);
699
700 disable_mdi(lp);
701 spin_unlock_irq(&lp->lock);
702
703 return ret;
704}
705
706static void at91ether_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
707{
708 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
709 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
710 strlcpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
711}
712
713static const struct ethtool_ops at91ether_ethtool_ops = {
714 .get_settings = at91ether_get_settings,
715 .set_settings = at91ether_set_settings,
716 .get_drvinfo = at91ether_get_drvinfo,
717 .nway_reset = at91ether_nwayreset,
718 .get_link = ethtool_op_get_link,
719};
720
721static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
722{
723 struct at91_private *lp = netdev_priv(dev);
724 int res;
725
726 if (!netif_running(dev))
727 return -EINVAL;
728
729 spin_lock_irq(&lp->lock);
730 enable_mdi(lp);
731 res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
732 disable_mdi(lp);
733 spin_unlock_irq(&lp->lock);
734
735 return res;
736}
737
738/* ................................ MAC ................................ */
739
740/*
741 * Initialize and start the Receiver and Transmit subsystems
742 */
743static void at91ether_start(struct net_device *dev)
744{
745 struct at91_private *lp = netdev_priv(dev);
746 struct recv_desc_bufs *dlist, *dlist_phys;
747 int i;
748 unsigned long ctl;
749
750 dlist = lp->dlist;
751 dlist_phys = lp->dlist_phys;
752
753 for (i = 0; i < MAX_RX_DESCR; i++) { 71 for (i = 0; i < MAX_RX_DESCR; i++) {
754 dlist->descriptors[i].addr = (unsigned int) &dlist_phys->recv_buf[i][0]; 72 lp->rx_ring[i].addr = addr;
755 dlist->descriptors[i].size = 0; 73 lp->rx_ring[i].ctrl = 0;
74 addr += MAX_RBUFF_SZ;
756 } 75 }
757 76
758 /* Set the Wrap bit on the last descriptor */ 77 /* Set the Wrap bit on the last descriptor */
759 dlist->descriptors[i-1].addr |= EMAC_DESC_WRAP; 78 lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
760 79
761 /* Reset buffer index */ 80 /* Reset buffer index */
762 lp->rxBuffIndex = 0; 81 lp->rx_tail = 0;
763 82
764 /* Program address of descriptor list in Rx Buffer Queue register */ 83 /* Program address of descriptor list in Rx Buffer Queue register */
765 at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys); 84 macb_writel(lp, RBQP, lp->rx_ring_dma);
766 85
767 /* Enable Receive and Transmit */ 86 /* Enable Receive and Transmit */
768 ctl = at91_emac_read(lp, AT91_EMAC_CTL); 87 ctl = macb_readl(lp, NCR);
769 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE); 88 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
89
90 return 0;
770} 91}
771 92
772/* 93/* Open the ethernet interface */
773 * Open the ethernet interface
774 */
775static int at91ether_open(struct net_device *dev) 94static int at91ether_open(struct net_device *dev)
776{ 95{
777 struct at91_private *lp = netdev_priv(dev); 96 struct macb *lp = netdev_priv(dev);
778 unsigned long ctl; 97 u32 ctl;
779 98 int ret;
780 if (!is_valid_ether_addr(dev->dev_addr))
781 return -EADDRNOTAVAIL;
782
783 clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
784 99
785 /* Clear internal statistics */ 100 /* Clear internal statistics */
786 ctl = at91_emac_read(lp, AT91_EMAC_CTL); 101 ctl = macb_readl(lp, NCR);
787 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR); 102 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
788 103
789 /* Update the MAC address (incase user has changed it) */ 104 macb_set_hwaddr(lp);
790 update_mac_address(dev);
791 105
792 /* Enable PHY interrupt */ 106 ret = at91ether_start(dev);
793 enable_phyirq(dev); 107 if (ret)
108 return ret;
794 109
795 /* Enable MAC interrupts */ 110 /* Enable MAC interrupts */
796 at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA 111 macb_writel(lp, IER, MACB_BIT(RCOMP) |
797 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM 112 MACB_BIT(RXUBR) |
798 | AT91_EMAC_ROVR | AT91_EMAC_ABT); 113 MACB_BIT(ISR_TUND) |
799 114 MACB_BIT(ISR_RLE) |
800 /* Determine current link speed */ 115 MACB_BIT(TCOMP) |
801 spin_lock_irq(&lp->lock); 116 MACB_BIT(ISR_ROVR) |
802 enable_mdi(lp); 117 MACB_BIT(HRESP));
803 update_linkspeed(dev, 0); 118
804 disable_mdi(lp); 119 /* schedule a link state check */
805 spin_unlock_irq(&lp->lock); 120 phy_start(lp->phy_dev);
806 121
807 at91ether_start(dev);
808 netif_start_queue(dev); 122 netif_start_queue(dev);
123
809 return 0; 124 return 0;
810} 125}
811 126
812/* 127/* Close the interface */
813 * Close the interface
814 */
815static int at91ether_close(struct net_device *dev) 128static int at91ether_close(struct net_device *dev)
816{ 129{
817 struct at91_private *lp = netdev_priv(dev); 130 struct macb *lp = netdev_priv(dev);
818 unsigned long ctl; 131 u32 ctl;
819 132
820 /* Disable Receiver and Transmitter */ 133 /* Disable Receiver and Transmitter */
821 ctl = at91_emac_read(lp, AT91_EMAC_CTL); 134 ctl = macb_readl(lp, NCR);
822 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE)); 135 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
823
824 /* Disable PHY interrupt */
825 disable_phyirq(dev);
826 136
827 /* Disable MAC interrupts */ 137 /* Disable MAC interrupts */
828 at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA 138 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
829 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM 139 MACB_BIT(RXUBR) |
830 | AT91_EMAC_ROVR | AT91_EMAC_ABT); 140 MACB_BIT(ISR_TUND) |
141 MACB_BIT(ISR_RLE) |
142 MACB_BIT(TCOMP) |
143 MACB_BIT(ISR_ROVR) |
144 MACB_BIT(HRESP));
831 145
832 netif_stop_queue(dev); 146 netif_stop_queue(dev);
833 147
834 clk_disable(lp->ether_clk); /* Disable Peripheral clock */ 148 dma_free_coherent(&lp->pdev->dev,
149 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
150 lp->rx_ring, lp->rx_ring_dma);
151 lp->rx_ring = NULL;
152
153 dma_free_coherent(&lp->pdev->dev,
154 MAX_RX_DESCR * MAX_RBUFF_SZ,
155 lp->rx_buffers, lp->rx_buffers_dma);
156 lp->rx_buffers = NULL;
835 157
836 return 0; 158 return 0;
837} 159}
838 160
839/* 161/* Transmit packet */
840 * Transmit packet.
841 */
842static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) 162static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
843{ 163{
844 struct at91_private *lp = netdev_priv(dev); 164 struct macb *lp = netdev_priv(dev);
845 165
846 if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) { 166 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
847 netif_stop_queue(dev); 167 netif_stop_queue(dev);
848 168
849 /* Store packet information (to free when Tx completed) */ 169 /* Store packet information (to free when Tx completed) */
850 lp->skb = skb; 170 lp->skb = skb;
851 lp->skb_length = skb->len; 171 lp->skb_length = skb->len;
852 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); 172 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
853 dev->stats.tx_bytes += skb->len; 173 DMA_TO_DEVICE);
854 174
855 /* Set address of the data in the Transmit Address register */ 175 /* Set address of the data in the Transmit Address register */
856 at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr); 176 macb_writel(lp, TAR, lp->skb_physaddr);
857 /* Set length of the packet in the Transmit Control register */ 177 /* Set length of the packet in the Transmit Control register */
858 at91_emac_write(lp, AT91_EMAC_TCR, skb->len); 178 macb_writel(lp, TCR, skb->len);
859 179
860 } else { 180 } else {
861 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); 181 netdev_err(dev, "%s called, but device is busy!\n", __func__);
862 return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) 182 return NETDEV_TX_BUSY;
863 on this skb, he also reports -ENETDOWN and printk's, so either
864 we free and return(0) or don't free and return 1 */
865 } 183 }
866 184
867 return NETDEV_TX_OK; 185 return NETDEV_TX_OK;
868} 186}
869 187
870/* 188/* Extract received frame from buffer descriptors and sent to upper layers.
871 * Update the current statistics from the internal statistics registers.
872 */
873static struct net_device_stats *at91ether_stats(struct net_device *dev)
874{
875 struct at91_private *lp = netdev_priv(dev);
876 int ale, lenerr, seqe, lcol, ecol;
877
878 if (netif_running(dev)) {
879 dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK); /* Good frames received */
880 ale = at91_emac_read(lp, AT91_EMAC_ALE);
881 dev->stats.rx_frame_errors += ale; /* Alignment errors */
882 lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF);
883 dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
884 seqe = at91_emac_read(lp, AT91_EMAC_SEQE);
885 dev->stats.rx_crc_errors += seqe; /* CRC error */
886 dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */
887 dev->stats.rx_errors += (ale + lenerr + seqe
888 + at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB));
889
890 dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA); /* Frames successfully transmitted */
891 dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE); /* Transmit FIFO underruns */
892 dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE); /* Carrier Sense errors */
893 dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */
894
895 lcol = at91_emac_read(lp, AT91_EMAC_LCOL);
896 ecol = at91_emac_read(lp, AT91_EMAC_ECOL);
897 dev->stats.tx_window_errors += lcol; /* Late collisions */
898 dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
899
900 dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol);
901 }
902 return &dev->stats;
903}
904
905/*
906 * Extract received frame from buffer descriptors and sent to upper layers.
907 * (Called from interrupt context) 189 * (Called from interrupt context)
908 */ 190 */
909static void at91ether_rx(struct net_device *dev) 191static void at91ether_rx(struct net_device *dev)
910{ 192{
911 struct at91_private *lp = netdev_priv(dev); 193 struct macb *lp = netdev_priv(dev);
912 struct recv_desc_bufs *dlist;
913 unsigned char *p_recv; 194 unsigned char *p_recv;
914 struct sk_buff *skb; 195 struct sk_buff *skb;
915 unsigned int pktlen; 196 unsigned int pktlen;
916 197
917 dlist = lp->dlist; 198 while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
918 while (dlist->descriptors[lp->rxBuffIndex].addr & EMAC_DESC_DONE) { 199 p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
919 p_recv = dlist->recv_buf[lp->rxBuffIndex]; 200 pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
920 pktlen = dlist->descriptors[lp->rxBuffIndex].size & 0x7ff; /* Length of frame including FCS */
921 skb = netdev_alloc_skb(dev, pktlen + 2); 201 skb = netdev_alloc_skb(dev, pktlen + 2);
922 if (skb != NULL) { 202 if (skb) {
923 skb_reserve(skb, 2); 203 skb_reserve(skb, 2);
924 memcpy(skb_put(skb, pktlen), p_recv, pktlen); 204 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
925 205
926 skb->protocol = eth_type_trans(skb, dev); 206 skb->protocol = eth_type_trans(skb, dev);
927 dev->stats.rx_bytes += pktlen; 207 lp->stats.rx_packets++;
208 lp->stats.rx_bytes += pktlen;
928 netif_rx(skb); 209 netif_rx(skb);
210 } else {
211 lp->stats.rx_dropped++;
212 netdev_notice(dev, "Memory squeeze, dropping packet.\n");
929 } 213 }
930 else {
931 dev->stats.rx_dropped += 1;
932 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
933 }
934 214
935 if (dlist->descriptors[lp->rxBuffIndex].size & EMAC_MULTICAST) 215 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
936 dev->stats.multicast++; 216 lp->stats.multicast++;
217
218 /* reset ownership bit */
219 lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
937 220
938 dlist->descriptors[lp->rxBuffIndex].addr &= ~EMAC_DESC_DONE; /* reset ownership bit */ 221 /* wrap after last buffer */
939 if (lp->rxBuffIndex == MAX_RX_DESCR-1) /* wrap after last buffer */ 222 if (lp->rx_tail == MAX_RX_DESCR - 1)
940 lp->rxBuffIndex = 0; 223 lp->rx_tail = 0;
941 else 224 else
942 lp->rxBuffIndex++; 225 lp->rx_tail++;
943 } 226 }
944} 227}
945 228
946/* 229/* MAC interrupt handler */
947 * MAC interrupt handler
948 */
949static irqreturn_t at91ether_interrupt(int irq, void *dev_id) 230static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
950{ 231{
951 struct net_device *dev = (struct net_device *) dev_id; 232 struct net_device *dev = dev_id;
952 struct at91_private *lp = netdev_priv(dev); 233 struct macb *lp = netdev_priv(dev);
953 unsigned long intstatus, ctl; 234 u32 intstatus, ctl;
954 235
955 /* MAC Interrupt Status register indicates what interrupts are pending. 236 /* MAC Interrupt Status register indicates what interrupts are pending.
956 It is automatically cleared once read. */ 237 * It is automatically cleared once read.
957 intstatus = at91_emac_read(lp, AT91_EMAC_ISR); 238 */
239 intstatus = macb_readl(lp, ISR);
958 240
959 if (intstatus & AT91_EMAC_RCOM) /* Receive complete */ 241 /* Receive complete */
242 if (intstatus & MACB_BIT(RCOMP))
960 at91ether_rx(dev); 243 at91ether_rx(dev);
961 244
962 if (intstatus & AT91_EMAC_TCOM) { /* Transmit complete */ 245 /* Transmit complete */
963 /* The TCOM bit is set even if the transmission failed. */ 246 if (intstatus & MACB_BIT(TCOMP)) {
964 if (intstatus & (AT91_EMAC_TUND | AT91_EMAC_RTRY)) 247 /* The TCOM bit is set even if the transmission failed */
965 dev->stats.tx_errors += 1; 248 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
249 lp->stats.tx_errors++;
966 250
967 if (lp->skb) { 251 if (lp->skb) {
968 dev_kfree_skb_irq(lp->skb); 252 dev_kfree_skb_irq(lp->skb);
969 lp->skb = NULL; 253 lp->skb = NULL;
970 dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE); 254 dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
255 lp->stats.tx_packets++;
256 lp->stats.tx_bytes += lp->skb_length;
971 } 257 }
972 netif_wake_queue(dev); 258 netif_wake_queue(dev);
973 } 259 }
974 260
975 /* Work-around for Errata #11 */ 261 /* Work-around for EMAC Errata section 41.3.1 */
976 if (intstatus & AT91_EMAC_RBNA) { 262 if (intstatus & MACB_BIT(RXUBR)) {
977 ctl = at91_emac_read(lp, AT91_EMAC_CTL); 263 ctl = macb_readl(lp, NCR);
978 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE); 264 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
979 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE); 265 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
980 } 266 }
981 267
982 if (intstatus & AT91_EMAC_ROVR) 268 if (intstatus & MACB_BIT(ISR_ROVR))
983 printk("%s: ROVR error\n", dev->name); 269 netdev_err(dev, "ROVR error\n");
984 270
985 return IRQ_HANDLED; 271 return IRQ_HANDLED;
986} 272}
@@ -1000,10 +286,10 @@ static const struct net_device_ops at91ether_netdev_ops = {
1000 .ndo_open = at91ether_open, 286 .ndo_open = at91ether_open,
1001 .ndo_stop = at91ether_close, 287 .ndo_stop = at91ether_close,
1002 .ndo_start_xmit = at91ether_start_xmit, 288 .ndo_start_xmit = at91ether_start_xmit,
1003 .ndo_get_stats = at91ether_stats, 289 .ndo_get_stats = macb_get_stats,
1004 .ndo_set_rx_mode = at91ether_set_multicast_list, 290 .ndo_set_rx_mode = macb_set_rx_mode,
1005 .ndo_set_mac_address = set_mac_address, 291 .ndo_set_mac_address = eth_mac_addr,
1006 .ndo_do_ioctl = at91ether_ioctl, 292 .ndo_do_ioctl = macb_ioctl,
1007 .ndo_validate_addr = eth_validate_addr, 293 .ndo_validate_addr = eth_validate_addr,
1008 .ndo_change_mtu = eth_change_mtu, 294 .ndo_change_mtu = eth_change_mtu,
1009#ifdef CONFIG_NET_POLL_CONTROLLER 295#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1011,237 +297,195 @@ static const struct net_device_ops at91ether_netdev_ops = {
1011#endif 297#endif
1012}; 298};
1013 299
1014/* 300#if defined(CONFIG_OF)
1015 * Detect the PHY type, and its address. 301static const struct of_device_id at91ether_dt_ids[] = {
1016 */ 302 { .compatible = "cdns,at91rm9200-emac" },
1017static int __init at91ether_phy_detect(struct at91_private *lp) 303 { .compatible = "cdns,emac" },
304 { /* sentinel */ }
305};
306
307MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
308
309static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
1018{ 310{
1019 unsigned int phyid1, phyid2; 311 struct device_node *np = pdev->dev.of_node;
1020 unsigned long phy_id;
1021 unsigned short phy_address = 0;
1022
1023 while (phy_address < PHY_MAX_ADDR) {
1024 /* Read the PHY ID registers */
1025 enable_mdi(lp);
1026 read_phy(lp, phy_address, MII_PHYSID1, &phyid1);
1027 read_phy(lp, phy_address, MII_PHYSID2, &phyid2);
1028 disable_mdi(lp);
1029
1030 phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
1031 switch (phy_id) {
1032 case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
1033 case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
1034 case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
1035 case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
1036 case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
1037 case MII_DP83847_ID: /* National Semiconductor DP83847: */
1038 case MII_DP83848_ID: /* National Semiconductor DP83848: */
1039 case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
1040 case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
1041 case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
1042 case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
1043 /* store detected values */
1044 lp->phy_type = phy_id; /* Type of PHY connected */
1045 lp->phy_address = phy_address; /* MDI address of PHY */
1046 return 1;
1047 }
1048 312
1049 phy_address++; 313 if (np)
1050 } 314 return of_get_phy_mode(np);
1051 315
1052 return 0; /* not detected */ 316 return -ENODEV;
1053} 317}
1054 318
319static int at91ether_get_hwaddr_dt(struct macb *bp)
320{
321 struct device_node *np = bp->pdev->dev.of_node;
1055 322
1056/* 323 if (np) {
1057 * Detect MAC & PHY and perform ethernet interface initialization 324 const char *mac = of_get_mac_address(np);
1058 */ 325 if (mac) {
326 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
327 return 0;
328 }
329 }
330
331 return -ENODEV;
332}
333#else
334static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
335{
336 return -ENODEV;
337}
338static int at91ether_get_hwaddr_dt(struct macb *bp)
339{
340 return -ENODEV;
341}
342#endif
343
344/* Detect MAC & PHY and perform ethernet interface initialization */
1059static int __init at91ether_probe(struct platform_device *pdev) 345static int __init at91ether_probe(struct platform_device *pdev)
1060{ 346{
1061 struct macb_platform_data *board_data = pdev->dev.platform_data; 347 struct macb_platform_data *board_data = pdev->dev.platform_data;
1062 struct resource *regs; 348 struct resource *regs;
1063 struct net_device *dev; 349 struct net_device *dev;
1064 struct at91_private *lp; 350 struct phy_device *phydev;
351 struct pinctrl *pinctrl;
352 struct macb *lp;
1065 int res; 353 int res;
354 u32 reg;
1066 355
1067 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 356 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1068 if (!regs) 357 if (!regs)
1069 return -ENOENT; 358 return -ENOENT;
1070 359
1071 dev = alloc_etherdev(sizeof(struct at91_private)); 360 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
361 if (IS_ERR(pinctrl)) {
362 res = PTR_ERR(pinctrl);
363 if (res == -EPROBE_DEFER)
364 return res;
365
366 dev_warn(&pdev->dev, "No pinctrl provided\n");
367 }
368
369 dev = alloc_etherdev(sizeof(struct macb));
1072 if (!dev) 370 if (!dev)
1073 return -ENOMEM; 371 return -ENOMEM;
1074 372
1075 lp = netdev_priv(dev); 373 lp = netdev_priv(dev);
1076 lp->board_data = *board_data; 374 lp->pdev = pdev;
375 lp->dev = dev;
1077 spin_lock_init(&lp->lock); 376 spin_lock_init(&lp->lock);
1078 377
1079 dev->base_addr = regs->start; /* physical base address */ 378 /* physical base address */
1080 lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1); 379 dev->base_addr = regs->start;
1081 if (!lp->emac_base) { 380 lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
381 if (!lp->regs) {
1082 res = -ENOMEM; 382 res = -ENOMEM;
1083 goto err_free_dev; 383 goto err_free_dev;
1084 } 384 }
1085 385
1086 /* Clock */ 386 /* Clock */
1087 lp->ether_clk = clk_get(&pdev->dev, "ether_clk"); 387 lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
1088 if (IS_ERR(lp->ether_clk)) { 388 if (IS_ERR(lp->pclk)) {
1089 res = PTR_ERR(lp->ether_clk); 389 res = PTR_ERR(lp->pclk);
1090 goto err_ioumap; 390 goto err_free_dev;
1091 } 391 }
1092 clk_enable(lp->ether_clk); 392 clk_enable(lp->pclk);
1093 393
1094 /* Install the interrupt handler */ 394 /* Install the interrupt handler */
1095 dev->irq = platform_get_irq(pdev, 0); 395 dev->irq = platform_get_irq(pdev, 0);
1096 if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { 396 res = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt, 0, dev->name, dev);
1097 res = -EBUSY; 397 if (res)
1098 goto err_disable_clock; 398 goto err_disable_clock;
1099 }
1100
1101 /* Allocate memory for DMA Receive descriptors */
1102 lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
1103 if (lp->dlist == NULL) {
1104 res = -ENOMEM;
1105 goto err_free_irq;
1106 }
1107 399
1108 ether_setup(dev); 400 ether_setup(dev);
1109 dev->netdev_ops = &at91ether_netdev_ops; 401 dev->netdev_ops = &at91ether_netdev_ops;
1110 dev->ethtool_ops = &at91ether_ethtool_ops; 402 dev->ethtool_ops = &macb_ethtool_ops;
1111 platform_set_drvdata(pdev, dev); 403 platform_set_drvdata(pdev, dev);
1112 SET_NETDEV_DEV(dev, &pdev->dev); 404 SET_NETDEV_DEV(dev, &pdev->dev);
1113 405
1114 get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */ 406 res = at91ether_get_hwaddr_dt(lp);
1115 update_mac_address(dev); /* Program ethernet address into MAC */ 407 if (res < 0)
1116 408 macb_get_hwaddr(lp);
1117 at91_emac_write(lp, AT91_EMAC_CTL, 0);
1118 409
1119 if (board_data->is_rmii) 410 res = at91ether_get_phy_mode_dt(pdev);
1120 at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII); 411 if (res < 0) {
1121 else 412 if (board_data && board_data->is_rmii)
1122 at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG); 413 lp->phy_interface = PHY_INTERFACE_MODE_RMII;
1123 414 else
1124 /* Detect PHY */ 415 lp->phy_interface = PHY_INTERFACE_MODE_MII;
1125 if (!at91ether_phy_detect(lp)) { 416 } else {
1126 printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n"); 417 lp->phy_interface = res;
1127 res = -ENODEV;
1128 goto err_free_dmamem;
1129 } 418 }
1130 419
1131 initialize_phy(lp); 420 macb_writel(lp, NCR, 0);
421
422 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
423 if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
424 reg |= MACB_BIT(RM9200_RMII);
1132 425
1133 lp->mii.dev = dev; /* Support for ethtool */ 426 macb_writel(lp, NCFGR, reg);
1134 lp->mii.mdio_read = mdio_read;
1135 lp->mii.mdio_write = mdio_write;
1136 lp->mii.phy_id = lp->phy_address;
1137 lp->mii.phy_id_mask = 0x1f;
1138 lp->mii.reg_num_mask = 0x1f;
1139 427
1140 /* Register the network interface */ 428 /* Register the network interface */
1141 res = register_netdev(dev); 429 res = register_netdev(dev);
1142 if (res) 430 if (res)
1143 goto err_free_dmamem; 431 goto err_disable_clock;
1144 432
1145 /* Determine current link speed */ 433 if (macb_mii_init(lp) != 0)
1146 spin_lock_irq(&lp->lock); 434 goto err_out_unregister_netdev;
1147 enable_mdi(lp); 435
1148 update_linkspeed(dev, 0); 436 /* will be enabled in open() */
1149 disable_mdi(lp); 437 netif_carrier_off(dev);
1150 spin_unlock_irq(&lp->lock); 438
1151 netif_carrier_off(dev); /* will be enabled in open() */ 439 phydev = lp->phy_dev;
1152 440 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1153 /* If board has no PHY IRQ, use a timer to poll the PHY */ 441 phydev->drv->name, dev_name(&phydev->dev),
1154 if (gpio_is_valid(lp->board_data.phy_irq_pin)) { 442 phydev->irq);
1155 gpio_request(board_data->phy_irq_pin, "ethernet_phy");
1156 } else {
1157 /* If board has no PHY IRQ, use a timer to poll the PHY */
1158 init_timer(&lp->check_timer);
1159 lp->check_timer.data = (unsigned long)dev;
1160 lp->check_timer.function = at91ether_check_link;
1161 }
1162 443
1163 /* Display ethernet banner */ 444 /* Display ethernet banner */
1164 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n", 445 netdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
1165 dev->name, (uint) dev->base_addr, dev->irq, 446 dev->base_addr, dev->irq, dev->dev_addr);
1166 at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
1167 at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
1168 dev->dev_addr);
1169 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
1170 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
1171 else if (lp->phy_type == MII_LXT971A_ID)
1172 printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
1173 else if (lp->phy_type == MII_RTL8201_ID)
1174 printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
1175 else if (lp->phy_type == MII_BCM5221_ID)
1176 printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
1177 else if (lp->phy_type == MII_DP83847_ID)
1178 printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
1179 else if (lp->phy_type == MII_DP83848_ID)
1180 printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
1181 else if (lp->phy_type == MII_AC101L_ID)
1182 printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
1183 else if (lp->phy_type == MII_KS8721_ID)
1184 printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
1185 else if (lp->phy_type == MII_T78Q21x3_ID)
1186 printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
1187 else if (lp->phy_type == MII_LAN83C185_ID)
1188 printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
1189
1190 clk_disable(lp->ether_clk); /* Disable Peripheral clock */
1191 447
1192 return 0; 448 return 0;
1193 449
1194 450err_out_unregister_netdev:
1195err_free_dmamem: 451 unregister_netdev(dev);
1196 platform_set_drvdata(pdev, NULL);
1197 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1198err_free_irq:
1199 free_irq(dev->irq, dev);
1200err_disable_clock: 452err_disable_clock:
1201 clk_disable(lp->ether_clk); 453 clk_disable(lp->pclk);
1202 clk_put(lp->ether_clk);
1203err_ioumap:
1204 iounmap(lp->emac_base);
1205err_free_dev: 454err_free_dev:
1206 free_netdev(dev); 455 free_netdev(dev);
1207 return res; 456 return res;
1208} 457}
1209 458
1210static int __devexit at91ether_remove(struct platform_device *pdev) 459static int at91ether_remove(struct platform_device *pdev)
1211{ 460{
1212 struct net_device *dev = platform_get_drvdata(pdev); 461 struct net_device *dev = platform_get_drvdata(pdev);
1213 struct at91_private *lp = netdev_priv(dev); 462 struct macb *lp = netdev_priv(dev);
1214 463
1215 if (gpio_is_valid(lp->board_data.phy_irq_pin)) 464 if (lp->phy_dev)
1216 gpio_free(lp->board_data.phy_irq_pin); 465 phy_disconnect(lp->phy_dev);
1217 466
467 mdiobus_unregister(lp->mii_bus);
468 kfree(lp->mii_bus->irq);
469 mdiobus_free(lp->mii_bus);
1218 unregister_netdev(dev); 470 unregister_netdev(dev);
1219 free_irq(dev->irq, dev); 471 clk_disable(lp->pclk);
1220 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1221 clk_put(lp->ether_clk);
1222
1223 platform_set_drvdata(pdev, NULL);
1224 free_netdev(dev); 472 free_netdev(dev);
473 platform_set_drvdata(pdev, NULL);
474
1225 return 0; 475 return 0;
1226} 476}
1227 477
1228#ifdef CONFIG_PM 478#ifdef CONFIG_PM
1229
1230static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) 479static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
1231{ 480{
1232 struct net_device *net_dev = platform_get_drvdata(pdev); 481 struct net_device *net_dev = platform_get_drvdata(pdev);
1233 struct at91_private *lp = netdev_priv(net_dev); 482 struct macb *lp = netdev_priv(net_dev);
1234 483
1235 if (netif_running(net_dev)) { 484 if (netif_running(net_dev)) {
1236 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1237 int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
1238 disable_irq(phy_irq);
1239 }
1240
1241 netif_stop_queue(net_dev); 485 netif_stop_queue(net_dev);
1242 netif_device_detach(net_dev); 486 netif_device_detach(net_dev);
1243 487
1244 clk_disable(lp->ether_clk); 488 clk_disable(lp->pclk);
1245 } 489 }
1246 return 0; 490 return 0;
1247} 491}
@@ -1249,34 +493,29 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
1249static int at91ether_resume(struct platform_device *pdev) 493static int at91ether_resume(struct platform_device *pdev)
1250{ 494{
1251 struct net_device *net_dev = platform_get_drvdata(pdev); 495 struct net_device *net_dev = platform_get_drvdata(pdev);
1252 struct at91_private *lp = netdev_priv(net_dev); 496 struct macb *lp = netdev_priv(net_dev);
1253 497
1254 if (netif_running(net_dev)) { 498 if (netif_running(net_dev)) {
1255 clk_enable(lp->ether_clk); 499 clk_enable(lp->pclk);
1256 500
1257 netif_device_attach(net_dev); 501 netif_device_attach(net_dev);
1258 netif_start_queue(net_dev); 502 netif_start_queue(net_dev);
1259
1260 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1261 int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
1262 enable_irq(phy_irq);
1263 }
1264 } 503 }
1265 return 0; 504 return 0;
1266} 505}
1267
1268#else 506#else
1269#define at91ether_suspend NULL 507#define at91ether_suspend NULL
1270#define at91ether_resume NULL 508#define at91ether_resume NULL
1271#endif 509#endif
1272 510
1273static struct platform_driver at91ether_driver = { 511static struct platform_driver at91ether_driver = {
1274 .remove = __devexit_p(at91ether_remove), 512 .remove = at91ether_remove,
1275 .suspend = at91ether_suspend, 513 .suspend = at91ether_suspend,
1276 .resume = at91ether_resume, 514 .resume = at91ether_resume,
1277 .driver = { 515 .driver = {
1278 .name = DRV_NAME, 516 .name = "at91_ether",
1279 .owner = THIS_MODULE, 517 .owner = THIS_MODULE,
518 .of_match_table = of_match_ptr(at91ether_dt_ids),
1280 }, 519 },
1281}; 520};
1282 521
@@ -1296,4 +535,4 @@ module_exit(at91ether_exit)
1296MODULE_LICENSE("GPL"); 535MODULE_LICENSE("GPL");
1297MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); 536MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
1298MODULE_AUTHOR("Andrew Victor"); 537MODULE_AUTHOR("Andrew Victor");
1299MODULE_ALIAS("platform:" DRV_NAME); 538MODULE_ALIAS("platform:at91_ether");
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
deleted file mode 100644
index 0ef6328fa7f8..000000000000
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * Ethernet driver for the Atmel AT91RM9200 (Thunder)
3 *
4 * Copyright (C) SAN People (Pty) Ltd
5 *
6 * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
7 * Initial version by Rick Bronson.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef AT91_ETHERNET
16#define AT91_ETHERNET
17
18
19/* Davicom 9161 PHY */
20#define MII_DM9161_ID 0x0181b880
21#define MII_DM9161A_ID 0x0181b8a0
22#define MII_DSCR_REG 16
23#define MII_DSCSR_REG 17
24#define MII_DSINTR_REG 21
25
26/* Intel LXT971A PHY */
27#define MII_LXT971A_ID 0x001378E0
28#define MII_ISINTE_REG 18
29#define MII_ISINTS_REG 19
30#define MII_LEDCTRL_REG 20
31
32/* Realtek RTL8201 PHY */
33#define MII_RTL8201_ID 0x00008200
34
35/* Broadcom BCM5221 PHY */
36#define MII_BCM5221_ID 0x004061e0
37#define MII_BCMINTR_REG 26
38
39/* National Semiconductor DP83847 */
40#define MII_DP83847_ID 0x20005c30
41
42/* National Semiconductor DP83848 */
43#define MII_DP83848_ID 0x20005c90
44#define MII_DPPHYSTS_REG 16
45#define MII_DPMICR_REG 17
46#define MII_DPMISR_REG 18
47
48/* Altima AC101L PHY */
49#define MII_AC101L_ID 0x00225520
50
51/* Micrel KS8721 PHY */
52#define MII_KS8721_ID 0x00221610
53
54/* Teridian 78Q2123/78Q2133 */
55#define MII_T78Q21x3_ID 0x000e7230
56#define MII_T78Q21INT_REG 17
57
58/* SMSC LAN83C185 */
59#define MII_LAN83C185_ID 0x0007C0A0
60
61/* ........................................................................ */
62
63#define MAX_RBUFF_SZ 0x600 /* 1518 rounded up */
64#define MAX_RX_DESCR 9 /* max number of receive buffers */
65
66#define EMAC_DESC_DONE 0x00000001 /* bit for if DMA is done */
67#define EMAC_DESC_WRAP 0x00000002 /* bit for wrap */
68
69#define EMAC_BROADCAST 0x80000000 /* broadcast address */
70#define EMAC_MULTICAST 0x40000000 /* multicast address */
71#define EMAC_UNICAST 0x20000000 /* unicast address */
72
73struct rbf_t
74{
75 unsigned int addr;
76 unsigned long size;
77};
78
79struct recv_desc_bufs
80{
81 struct rbf_t descriptors[MAX_RX_DESCR]; /* must be on sizeof (rbf_t) boundary */
82 char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ]; /* must be on long boundary */
83};
84
85struct at91_private
86{
87 struct mii_if_info mii; /* ethtool support */
88 struct macb_platform_data board_data; /* board-specific
89 * configuration (shared with
90 * macb for common data */
91 void __iomem *emac_base; /* base register address */
92 struct clk *ether_clk; /* clock */
93
94 /* PHY */
95 unsigned long phy_type; /* type of PHY (PHY_ID) */
96 spinlock_t lock; /* lock for MDI interface */
97 short phy_media; /* media interface type */
98 unsigned short phy_address; /* 5-bit MDI address of PHY (0..31) */
99 struct timer_list check_timer; /* Poll link status */
100
101 /* Transmit */
102 struct sk_buff *skb; /* holds skb until xmit interrupt completes */
103 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
104 int skb_length; /* saved skb length for pci_unmap_single */
105
106 /* Receive */
107 int rxBuffIndex; /* index into receive descriptor list */
108 struct recv_desc_bufs *dlist; /* descriptor list address */
109 struct recv_desc_bufs *dlist_phys; /* descriptor list physical address */
110};
111
112#endif
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 033064b7b576..a9b0830fb39d 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -14,8 +14,10 @@
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/circ_buf.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/gpio.h>
19#include <linux/interrupt.h> 21#include <linux/interrupt.h>
20#include <linux/netdevice.h> 22#include <linux/netdevice.h>
21#include <linux/etherdevice.h> 23#include <linux/etherdevice.h>
@@ -26,37 +28,74 @@
26#include <linux/of.h> 28#include <linux/of.h>
27#include <linux/of_device.h> 29#include <linux/of_device.h>
28#include <linux/of_net.h> 30#include <linux/of_net.h>
31#include <linux/pinctrl/consumer.h>
29 32
30#include "macb.h" 33#include "macb.h"
31 34
32#define RX_BUFFER_SIZE 128 35#define RX_BUFFER_SIZE 128
33#define RX_RING_SIZE 512 36#define RX_RING_SIZE 512 /* must be power of 2 */
34#define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) 37#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
35 38
36/* Make the IP header word-aligned (the ethernet header is 14 bytes) */ 39#define TX_RING_SIZE 128 /* must be power of 2 */
37#define RX_OFFSET 2 40#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
38 41
39#define TX_RING_SIZE 128 42/* level of occupied TX descriptors under which we wake up TX process */
40#define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) 43#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
41#define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE)
42 44
43#define TX_RING_GAP(bp) \ 45#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
44 (TX_RING_SIZE - (bp)->tx_pending) 46 | MACB_BIT(ISR_ROVR))
45#define TX_BUFFS_AVAIL(bp) \ 47#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
46 (((bp)->tx_tail <= (bp)->tx_head) ? \ 48 | MACB_BIT(ISR_RLE) \
47 (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ 49 | MACB_BIT(TXERR))
48 (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) 50#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
49#define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1))
50 51
51#define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) 52/*
53 * Graceful stop timeouts in us. We should allow up to
54 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
55 */
56#define MACB_HALT_TIMEOUT 1230
52 57
53/* minimum number of free TX descriptors before waking up TX process */ 58/* Ring buffer accessors */
54#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) 59static unsigned int macb_tx_ring_wrap(unsigned int index)
60{
61 return index & (TX_RING_SIZE - 1);
62}
55 63
56#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ 64static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
57 | MACB_BIT(ISR_ROVR)) 65{
66 return &bp->tx_ring[macb_tx_ring_wrap(index)];
67}
58 68
59static void __macb_set_hwaddr(struct macb *bp) 69static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
70{
71 return &bp->tx_skb[macb_tx_ring_wrap(index)];
72}
73
74static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
75{
76 dma_addr_t offset;
77
78 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
79
80 return bp->tx_ring_dma + offset;
81}
82
83static unsigned int macb_rx_ring_wrap(unsigned int index)
84{
85 return index & (RX_RING_SIZE - 1);
86}
87
88static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
89{
90 return &bp->rx_ring[macb_rx_ring_wrap(index)];
91}
92
93static void *macb_rx_buffer(struct macb *bp, unsigned int index)
94{
95 return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
96}
97
98void macb_set_hwaddr(struct macb *bp)
60{ 99{
61 u32 bottom; 100 u32 bottom;
62 u16 top; 101 u16 top;
@@ -65,31 +104,58 @@ static void __macb_set_hwaddr(struct macb *bp)
65 macb_or_gem_writel(bp, SA1B, bottom); 104 macb_or_gem_writel(bp, SA1B, bottom);
66 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); 105 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
67 macb_or_gem_writel(bp, SA1T, top); 106 macb_or_gem_writel(bp, SA1T, top);
107
108 /* Clear unused address register sets */
109 macb_or_gem_writel(bp, SA2B, 0);
110 macb_or_gem_writel(bp, SA2T, 0);
111 macb_or_gem_writel(bp, SA3B, 0);
112 macb_or_gem_writel(bp, SA3T, 0);
113 macb_or_gem_writel(bp, SA4B, 0);
114 macb_or_gem_writel(bp, SA4T, 0);
68} 115}
116EXPORT_SYMBOL_GPL(macb_set_hwaddr);
69 117
70static void __init macb_get_hwaddr(struct macb *bp) 118void macb_get_hwaddr(struct macb *bp)
71{ 119{
120 struct macb_platform_data *pdata;
72 u32 bottom; 121 u32 bottom;
73 u16 top; 122 u16 top;
74 u8 addr[6]; 123 u8 addr[6];
124 int i;
75 125
76 bottom = macb_or_gem_readl(bp, SA1B); 126 pdata = bp->pdev->dev.platform_data;
77 top = macb_or_gem_readl(bp, SA1T);
78 127
79 addr[0] = bottom & 0xff; 128 /* Check all 4 address register for vaild address */
80 addr[1] = (bottom >> 8) & 0xff; 129 for (i = 0; i < 4; i++) {
81 addr[2] = (bottom >> 16) & 0xff; 130 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
82 addr[3] = (bottom >> 24) & 0xff; 131 top = macb_or_gem_readl(bp, SA1T + i * 8);
83 addr[4] = top & 0xff; 132
84 addr[5] = (top >> 8) & 0xff; 133 if (pdata && pdata->rev_eth_addr) {
134 addr[5] = bottom & 0xff;
135 addr[4] = (bottom >> 8) & 0xff;
136 addr[3] = (bottom >> 16) & 0xff;
137 addr[2] = (bottom >> 24) & 0xff;
138 addr[1] = top & 0xff;
139 addr[0] = (top & 0xff00) >> 8;
140 } else {
141 addr[0] = bottom & 0xff;
142 addr[1] = (bottom >> 8) & 0xff;
143 addr[2] = (bottom >> 16) & 0xff;
144 addr[3] = (bottom >> 24) & 0xff;
145 addr[4] = top & 0xff;
146 addr[5] = (top >> 8) & 0xff;
147 }
85 148
86 if (is_valid_ether_addr(addr)) { 149 if (is_valid_ether_addr(addr)) {
87 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 150 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
88 } else { 151 return;
89 netdev_info(bp->dev, "invalid hw address, using random\n"); 152 }
90 eth_hw_addr_random(bp->dev);
91 } 153 }
154
155 netdev_info(bp->dev, "invalid hw address, using random\n");
156 eth_hw_addr_random(bp->dev);
92} 157}
158EXPORT_SYMBOL_GPL(macb_get_hwaddr);
93 159
94static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 160static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
95{ 161{
@@ -152,13 +218,17 @@ static void macb_handle_link_change(struct net_device *dev)
152 218
153 reg = macb_readl(bp, NCFGR); 219 reg = macb_readl(bp, NCFGR);
154 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 220 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
221 if (macb_is_gem(bp))
222 reg &= ~GEM_BIT(GBE);
155 223
156 if (phydev->duplex) 224 if (phydev->duplex)
157 reg |= MACB_BIT(FD); 225 reg |= MACB_BIT(FD);
158 if (phydev->speed == SPEED_100) 226 if (phydev->speed == SPEED_100)
159 reg |= MACB_BIT(SPD); 227 reg |= MACB_BIT(SPD);
228 if (phydev->speed == SPEED_1000)
229 reg |= GEM_BIT(GBE);
160 230
161 macb_writel(bp, NCFGR, reg); 231 macb_or_gem_writel(bp, NCFGR, reg);
162 232
163 bp->speed = phydev->speed; 233 bp->speed = phydev->speed;
164 bp->duplex = phydev->duplex; 234 bp->duplex = phydev->duplex;
@@ -196,7 +266,9 @@ static void macb_handle_link_change(struct net_device *dev)
196static int macb_mii_probe(struct net_device *dev) 266static int macb_mii_probe(struct net_device *dev)
197{ 267{
198 struct macb *bp = netdev_priv(dev); 268 struct macb *bp = netdev_priv(dev);
269 struct macb_platform_data *pdata;
199 struct phy_device *phydev; 270 struct phy_device *phydev;
271 int phy_irq;
200 int ret; 272 int ret;
201 273
202 phydev = phy_find_first(bp->mii_bus); 274 phydev = phy_find_first(bp->mii_bus);
@@ -205,7 +277,14 @@ static int macb_mii_probe(struct net_device *dev)
205 return -1; 277 return -1;
206 } 278 }
207 279
208 /* TODO : add pin_irq */ 280 pdata = dev_get_platdata(&bp->pdev->dev);
281 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
282 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
283 if (!ret) {
284 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
285 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
286 }
287 }
209 288
210 /* attach the mac to the phy */ 289 /* attach the mac to the phy */
211 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0, 290 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
@@ -216,7 +295,10 @@ static int macb_mii_probe(struct net_device *dev)
216 } 295 }
217 296
218 /* mask with MAC supported features */ 297 /* mask with MAC supported features */
219 phydev->supported &= PHY_BASIC_FEATURES; 298 if (macb_is_gem(bp))
299 phydev->supported &= PHY_GBIT_FEATURES;
300 else
301 phydev->supported &= PHY_BASIC_FEATURES;
220 302
221 phydev->advertising = phydev->supported; 303 phydev->advertising = phydev->supported;
222 304
@@ -228,7 +310,7 @@ static int macb_mii_probe(struct net_device *dev)
228 return 0; 310 return 0;
229} 311}
230 312
231static int macb_mii_init(struct macb *bp) 313int macb_mii_init(struct macb *bp)
232{ 314{
233 struct macb_platform_data *pdata; 315 struct macb_platform_data *pdata;
234 int err = -ENXIO, i; 316 int err = -ENXIO, i;
@@ -284,6 +366,7 @@ err_out_free_mdiobus:
284err_out: 366err_out:
285 return err; 367 return err;
286} 368}
369EXPORT_SYMBOL_GPL(macb_mii_init);
287 370
288static void macb_update_stats(struct macb *bp) 371static void macb_update_stats(struct macb *bp)
289{ 372{
@@ -297,93 +380,148 @@ static void macb_update_stats(struct macb *bp)
297 *p += __raw_readl(reg); 380 *p += __raw_readl(reg);
298} 381}
299 382
300static void macb_tx(struct macb *bp) 383static int macb_halt_tx(struct macb *bp)
301{ 384{
302 unsigned int tail; 385 unsigned long halt_time, timeout;
303 unsigned int head; 386 u32 status;
304 u32 status;
305 387
306 status = macb_readl(bp, TSR); 388 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
307 macb_writel(bp, TSR, status);
308 389
309 netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status); 390 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
391 do {
392 halt_time = jiffies;
393 status = macb_readl(bp, TSR);
394 if (!(status & MACB_BIT(TGO)))
395 return 0;
310 396
311 if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { 397 usleep_range(10, 250);
312 int i; 398 } while (time_before(halt_time, timeout));
313 netdev_err(bp->dev, "TX %s, resetting buffers\n",
314 status & MACB_BIT(UND) ?
315 "underrun" : "retry limit exceeded");
316 399
317 /* Transfer ongoing, disable transmitter, to avoid confusion */ 400 return -ETIMEDOUT;
318 if (status & MACB_BIT(TGO)) 401}
319 macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
320 402
321 head = bp->tx_head; 403static void macb_tx_error_task(struct work_struct *work)
404{
405 struct macb *bp = container_of(work, struct macb, tx_error_task);
406 struct macb_tx_skb *tx_skb;
407 struct sk_buff *skb;
408 unsigned int tail;
322 409
323 /*Mark all the buffer as used to avoid sending a lost buffer*/ 410 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
324 for (i = 0; i < TX_RING_SIZE; i++) 411 bp->tx_tail, bp->tx_head);
325 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
326 412
327 /* Add wrap bit */ 413 /* Make sure nobody is trying to queue up new packets */
328 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); 414 netif_stop_queue(bp->dev);
329 415
330 /* free transmit buffer in upper layer*/ 416 /*
331 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 417 * Stop transmission now
332 struct ring_info *rp = &bp->tx_skb[tail]; 418 * (in case we have just queued new packets)
333 struct sk_buff *skb = rp->skb; 419 */
420 if (macb_halt_tx(bp))
421 /* Just complain for now, reinitializing TX path can be good */
422 netdev_err(bp->dev, "BUG: halt tx timed out\n");
334 423
335 BUG_ON(skb == NULL); 424 /* No need for the lock here as nobody will interrupt us anymore */
336 425
337 rmb(); 426 /*
427 * Treat frames in TX queue including the ones that caused the error.
428 * Free transmit buffers in upper layer.
429 */
430 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
431 struct macb_dma_desc *desc;
432 u32 ctrl;
433
434 desc = macb_tx_desc(bp, tail);
435 ctrl = desc->ctrl;
436 tx_skb = macb_tx_skb(bp, tail);
437 skb = tx_skb->skb;
438
439 if (ctrl & MACB_BIT(TX_USED)) {
440 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
441 macb_tx_ring_wrap(tail), skb->data);
442 bp->stats.tx_packets++;
443 bp->stats.tx_bytes += skb->len;
444 } else {
445 /*
446 * "Buffers exhausted mid-frame" errors may only happen
447 * if the driver is buggy, so complain loudly about those.
448 * Statistics are updated by hardware.
449 */
450 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
451 netdev_err(bp->dev,
452 "BUG: TX buffers exhausted mid-frame\n");
338 453
339 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 454 desc->ctrl = ctrl | MACB_BIT(TX_USED);
340 DMA_TO_DEVICE);
341 rp->skb = NULL;
342 dev_kfree_skb_irq(skb);
343 } 455 }
344 456
345 bp->tx_head = bp->tx_tail = 0; 457 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
346 458 DMA_TO_DEVICE);
347 /* Enable the transmitter again */ 459 tx_skb->skb = NULL;
348 if (status & MACB_BIT(TGO)) 460 dev_kfree_skb(skb);
349 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
350 } 461 }
351 462
352 if (!(status & MACB_BIT(COMP))) 463 /* Make descriptor updates visible to hardware */
353 /* 464 wmb();
354 * This may happen when a buffer becomes complete 465
355 * between reading the ISR and scanning the 466 /* Reinitialize the TX desc queue */
356 * descriptors. Nothing to worry about. 467 macb_writel(bp, TBQP, bp->tx_ring_dma);
357 */ 468 /* Make TX ring reflect state of hardware */
358 return; 469 bp->tx_head = bp->tx_tail = 0;
470
471 /* Now we are ready to start transmission again */
472 netif_wake_queue(bp->dev);
473
474 /* Housework before enabling TX IRQ */
475 macb_writel(bp, TSR, macb_readl(bp, TSR));
476 macb_writel(bp, IER, MACB_TX_INT_FLAGS);
477}
478
479static void macb_tx_interrupt(struct macb *bp)
480{
481 unsigned int tail;
482 unsigned int head;
483 u32 status;
484
485 status = macb_readl(bp, TSR);
486 macb_writel(bp, TSR, status);
487
488 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
489 (unsigned long)status);
359 490
360 head = bp->tx_head; 491 head = bp->tx_head;
361 for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { 492 for (tail = bp->tx_tail; tail != head; tail++) {
362 struct ring_info *rp = &bp->tx_skb[tail]; 493 struct macb_tx_skb *tx_skb;
363 struct sk_buff *skb = rp->skb; 494 struct sk_buff *skb;
364 u32 bufstat; 495 struct macb_dma_desc *desc;
496 u32 ctrl;
365 497
366 BUG_ON(skb == NULL); 498 desc = macb_tx_desc(bp, tail);
367 499
500 /* Make hw descriptor updates visible to CPU */
368 rmb(); 501 rmb();
369 bufstat = bp->tx_ring[tail].ctrl;
370 502
371 if (!(bufstat & MACB_BIT(TX_USED))) 503 ctrl = desc->ctrl;
504
505 if (!(ctrl & MACB_BIT(TX_USED)))
372 break; 506 break;
373 507
374 netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n", 508 tx_skb = macb_tx_skb(bp, tail);
375 tail, skb->data); 509 skb = tx_skb->skb;
376 dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, 510
511 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
512 macb_tx_ring_wrap(tail), skb->data);
513 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
377 DMA_TO_DEVICE); 514 DMA_TO_DEVICE);
378 bp->stats.tx_packets++; 515 bp->stats.tx_packets++;
379 bp->stats.tx_bytes += skb->len; 516 bp->stats.tx_bytes += skb->len;
380 rp->skb = NULL; 517 tx_skb->skb = NULL;
381 dev_kfree_skb_irq(skb); 518 dev_kfree_skb_irq(skb);
382 } 519 }
383 520
384 bp->tx_tail = tail; 521 bp->tx_tail = tail;
385 if (netif_queue_stopped(bp->dev) && 522 if (netif_queue_stopped(bp->dev)
386 TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) 523 && CIRC_CNT(bp->tx_head, bp->tx_tail,
524 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
387 netif_wake_queue(bp->dev); 525 netif_wake_queue(bp->dev);
388} 526}
389 527
@@ -392,31 +530,48 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
392{ 530{
393 unsigned int len; 531 unsigned int len;
394 unsigned int frag; 532 unsigned int frag;
395 unsigned int offset = 0; 533 unsigned int offset;
396 struct sk_buff *skb; 534 struct sk_buff *skb;
535 struct macb_dma_desc *desc;
397 536
398 len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); 537 desc = macb_rx_desc(bp, last_frag);
538 len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
399 539
400 netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", 540 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
401 first_frag, last_frag, len); 541 macb_rx_ring_wrap(first_frag),
542 macb_rx_ring_wrap(last_frag), len);
402 543
403 skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET); 544 /*
545 * The ethernet header starts NET_IP_ALIGN bytes into the
546 * first buffer. Since the header is 14 bytes, this makes the
547 * payload word-aligned.
548 *
549 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
550 * the two padding bytes into the skb so that we avoid hitting
551 * the slowpath in memcpy(), and pull them off afterwards.
552 */
553 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
404 if (!skb) { 554 if (!skb) {
405 bp->stats.rx_dropped++; 555 bp->stats.rx_dropped++;
406 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 556 for (frag = first_frag; ; frag++) {
407 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 557 desc = macb_rx_desc(bp, frag);
558 desc->addr &= ~MACB_BIT(RX_USED);
408 if (frag == last_frag) 559 if (frag == last_frag)
409 break; 560 break;
410 } 561 }
562
563 /* Make descriptor updates visible to hardware */
411 wmb(); 564 wmb();
565
412 return 1; 566 return 1;
413 } 567 }
414 568
415 skb_reserve(skb, RX_OFFSET); 569 offset = 0;
570 len += NET_IP_ALIGN;
416 skb_checksum_none_assert(skb); 571 skb_checksum_none_assert(skb);
417 skb_put(skb, len); 572 skb_put(skb, len);
418 573
419 for (frag = first_frag; ; frag = NEXT_RX(frag)) { 574 for (frag = first_frag; ; frag++) {
420 unsigned int frag_len = RX_BUFFER_SIZE; 575 unsigned int frag_len = RX_BUFFER_SIZE;
421 576
422 if (offset + frag_len > len) { 577 if (offset + frag_len > len) {
@@ -424,22 +579,24 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
424 frag_len = len - offset; 579 frag_len = len - offset;
425 } 580 }
426 skb_copy_to_linear_data_offset(skb, offset, 581 skb_copy_to_linear_data_offset(skb, offset,
427 (bp->rx_buffers + 582 macb_rx_buffer(bp, frag), frag_len);
428 (RX_BUFFER_SIZE * frag)),
429 frag_len);
430 offset += RX_BUFFER_SIZE; 583 offset += RX_BUFFER_SIZE;
431 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 584 desc = macb_rx_desc(bp, frag);
432 wmb(); 585 desc->addr &= ~MACB_BIT(RX_USED);
433 586
434 if (frag == last_frag) 587 if (frag == last_frag)
435 break; 588 break;
436 } 589 }
437 590
591 /* Make descriptor updates visible to hardware */
592 wmb();
593
594 __skb_pull(skb, NET_IP_ALIGN);
438 skb->protocol = eth_type_trans(skb, bp->dev); 595 skb->protocol = eth_type_trans(skb, bp->dev);
439 596
440 bp->stats.rx_packets++; 597 bp->stats.rx_packets++;
441 bp->stats.rx_bytes += len; 598 bp->stats.rx_bytes += skb->len;
442 netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n", 599 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
443 skb->len, skb->csum); 600 skb->len, skb->csum);
444 netif_receive_skb(skb); 601 netif_receive_skb(skb);
445 602
@@ -452,8 +609,12 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
452{ 609{
453 unsigned int frag; 610 unsigned int frag;
454 611
455 for (frag = begin; frag != end; frag = NEXT_RX(frag)) 612 for (frag = begin; frag != end; frag++) {
456 bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); 613 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
614 desc->addr &= ~MACB_BIT(RX_USED);
615 }
616
617 /* Make descriptor updates visible to hardware */
457 wmb(); 618 wmb();
458 619
459 /* 620 /*
@@ -466,15 +627,18 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
466static int macb_rx(struct macb *bp, int budget) 627static int macb_rx(struct macb *bp, int budget)
467{ 628{
468 int received = 0; 629 int received = 0;
469 unsigned int tail = bp->rx_tail; 630 unsigned int tail;
470 int first_frag = -1; 631 int first_frag = -1;
471 632
472 for (; budget > 0; tail = NEXT_RX(tail)) { 633 for (tail = bp->rx_tail; budget > 0; tail++) {
634 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
473 u32 addr, ctrl; 635 u32 addr, ctrl;
474 636
637 /* Make hw descriptor updates visible to CPU */
475 rmb(); 638 rmb();
476 addr = bp->rx_ring[tail].addr; 639
477 ctrl = bp->rx_ring[tail].ctrl; 640 addr = desc->addr;
641 ctrl = desc->ctrl;
478 642
479 if (!(addr & MACB_BIT(RX_USED))) 643 if (!(addr & MACB_BIT(RX_USED)))
480 break; 644 break;
@@ -517,7 +681,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
517 681
518 work_done = 0; 682 work_done = 0;
519 683
520 netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n", 684 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
521 (unsigned long)status, budget); 685 (unsigned long)status, budget);
522 686
523 work_done = macb_rx(bp, budget); 687 work_done = macb_rx(bp, budget);
@@ -552,10 +716,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
552 while (status) { 716 while (status) {
553 /* close possible race with dev_close */ 717 /* close possible race with dev_close */
554 if (unlikely(!netif_running(dev))) { 718 if (unlikely(!netif_running(dev))) {
555 macb_writel(bp, IDR, ~0UL); 719 macb_writel(bp, IDR, -1);
556 break; 720 break;
557 } 721 }
558 722
723 netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
724
559 if (status & MACB_RX_INT_FLAGS) { 725 if (status & MACB_RX_INT_FLAGS) {
560 /* 726 /*
561 * There's no point taking any more interrupts 727 * There's no point taking any more interrupts
@@ -567,14 +733,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
567 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 733 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
568 734
569 if (napi_schedule_prep(&bp->napi)) { 735 if (napi_schedule_prep(&bp->napi)) {
570 netdev_dbg(bp->dev, "scheduling RX softirq\n"); 736 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
571 __napi_schedule(&bp->napi); 737 __napi_schedule(&bp->napi);
572 } 738 }
573 } 739 }
574 740
575 if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | 741 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
576 MACB_BIT(ISR_RLE))) 742 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
577 macb_tx(bp); 743 schedule_work(&bp->tx_error_task);
744 break;
745 }
746
747 if (status & MACB_BIT(TCOMP))
748 macb_tx_interrupt(bp);
578 749
579 /* 750 /*
580 * Link change detection isn't possible with RMII, so we'll 751 * Link change detection isn't possible with RMII, so we'll
@@ -626,11 +797,13 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
626 struct macb *bp = netdev_priv(dev); 797 struct macb *bp = netdev_priv(dev);
627 dma_addr_t mapping; 798 dma_addr_t mapping;
628 unsigned int len, entry; 799 unsigned int len, entry;
800 struct macb_dma_desc *desc;
801 struct macb_tx_skb *tx_skb;
629 u32 ctrl; 802 u32 ctrl;
630 unsigned long flags; 803 unsigned long flags;
631 804
632#ifdef DEBUG 805#if defined(DEBUG) && defined(VERBOSE_DEBUG)
633 netdev_dbg(bp->dev, 806 netdev_vdbg(bp->dev,
634 "start_xmit: len %u head %p data %p tail %p end %p\n", 807 "start_xmit: len %u head %p data %p tail %p end %p\n",
635 skb->len, skb->head, skb->data, 808 skb->len, skb->head, skb->data,
636 skb_tail_pointer(skb), skb_end_pointer(skb)); 809 skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -642,7 +815,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
642 spin_lock_irqsave(&bp->lock, flags); 815 spin_lock_irqsave(&bp->lock, flags);
643 816
644 /* This is a hard error, log it. */ 817 /* This is a hard error, log it. */
645 if (TX_BUFFS_AVAIL(bp) < 1) { 818 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
646 netif_stop_queue(dev); 819 netif_stop_queue(dev);
647 spin_unlock_irqrestore(&bp->lock, flags); 820 spin_unlock_irqrestore(&bp->lock, flags);
648 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); 821 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -651,13 +824,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
651 return NETDEV_TX_BUSY; 824 return NETDEV_TX_BUSY;
652 } 825 }
653 826
654 entry = bp->tx_head; 827 entry = macb_tx_ring_wrap(bp->tx_head);
655 netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry); 828 bp->tx_head++;
829 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
656 mapping = dma_map_single(&bp->pdev->dev, skb->data, 830 mapping = dma_map_single(&bp->pdev->dev, skb->data,
657 len, DMA_TO_DEVICE); 831 len, DMA_TO_DEVICE);
658 bp->tx_skb[entry].skb = skb; 832
659 bp->tx_skb[entry].mapping = mapping; 833 tx_skb = &bp->tx_skb[entry];
660 netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", 834 tx_skb->skb = skb;
835 tx_skb->mapping = mapping;
836 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
661 skb->data, (unsigned long)mapping); 837 skb->data, (unsigned long)mapping);
662 838
663 ctrl = MACB_BF(TX_FRMLEN, len); 839 ctrl = MACB_BF(TX_FRMLEN, len);
@@ -665,18 +841,18 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
665 if (entry == (TX_RING_SIZE - 1)) 841 if (entry == (TX_RING_SIZE - 1))
666 ctrl |= MACB_BIT(TX_WRAP); 842 ctrl |= MACB_BIT(TX_WRAP);
667 843
668 bp->tx_ring[entry].addr = mapping; 844 desc = &bp->tx_ring[entry];
669 bp->tx_ring[entry].ctrl = ctrl; 845 desc->addr = mapping;
670 wmb(); 846 desc->ctrl = ctrl;
671 847
672 entry = NEXT_TX(entry); 848 /* Make newly initialized descriptor visible to hardware */
673 bp->tx_head = entry; 849 wmb();
674 850
675 skb_tx_timestamp(skb); 851 skb_tx_timestamp(skb);
676 852
677 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); 853 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
678 854
679 if (TX_BUFFS_AVAIL(bp) < 1) 855 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
680 netif_stop_queue(dev); 856 netif_stop_queue(dev);
681 857
682 spin_unlock_irqrestore(&bp->lock, flags); 858 spin_unlock_irqrestore(&bp->lock, flags);
@@ -712,7 +888,7 @@ static int macb_alloc_consistent(struct macb *bp)
712{ 888{
713 int size; 889 int size;
714 890
715 size = TX_RING_SIZE * sizeof(struct ring_info); 891 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
716 bp->tx_skb = kmalloc(size, GFP_KERNEL); 892 bp->tx_skb = kmalloc(size, GFP_KERNEL);
717 if (!bp->tx_skb) 893 if (!bp->tx_skb)
718 goto out_err; 894 goto out_err;
@@ -775,9 +951,6 @@ static void macb_init_rings(struct macb *bp)
775 951
776static void macb_reset_hw(struct macb *bp) 952static void macb_reset_hw(struct macb *bp)
777{ 953{
778 /* Make sure we have the write buffer for ourselves */
779 wmb();
780
781 /* 954 /*
782 * Disable RX and TX (XXX: Should we halt the transmission 955 * Disable RX and TX (XXX: Should we halt the transmission
783 * more gracefully?) 956 * more gracefully?)
@@ -788,11 +961,11 @@ static void macb_reset_hw(struct macb *bp)
788 macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); 961 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
789 962
790 /* Clear all status flags */ 963 /* Clear all status flags */
791 macb_writel(bp, TSR, ~0UL); 964 macb_writel(bp, TSR, -1);
792 macb_writel(bp, RSR, ~0UL); 965 macb_writel(bp, RSR, -1);
793 966
794 /* Disable all interrupts */ 967 /* Disable all interrupts */
795 macb_writel(bp, IDR, ~0UL); 968 macb_writel(bp, IDR, -1);
796 macb_readl(bp, ISR); 969 macb_readl(bp, ISR);
797} 970}
798 971
@@ -860,8 +1033,12 @@ static u32 macb_dbw(struct macb *bp)
860} 1033}
861 1034
862/* 1035/*
863 * Configure the receive DMA engine to use the correct receive buffer size. 1036 * Configure the receive DMA engine
864 * This is a configurable parameter for GEM. 1037 * - use the correct receive buffer size
1038 * - set the possibility to use INCR16 bursts
1039 * (if not supported by FIFO, it will fallback to default)
1040 * - set both rx/tx packet buffers to full memory size
1041 * These are configurable parameters for GEM.
865 */ 1042 */
866static void macb_configure_dma(struct macb *bp) 1043static void macb_configure_dma(struct macb *bp)
867{ 1044{
@@ -870,6 +1047,8 @@ static void macb_configure_dma(struct macb *bp)
870 if (macb_is_gem(bp)) { 1047 if (macb_is_gem(bp)) {
871 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1048 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
872 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); 1049 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
1050 dmacfg |= GEM_BF(FBLDO, 16);
1051 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
873 gem_writel(bp, DMACFG, dmacfg); 1052 gem_writel(bp, DMACFG, dmacfg);
874 } 1053 }
875} 1054}
@@ -879,9 +1058,10 @@ static void macb_init_hw(struct macb *bp)
879 u32 config; 1058 u32 config;
880 1059
881 macb_reset_hw(bp); 1060 macb_reset_hw(bp);
882 __macb_set_hwaddr(bp); 1061 macb_set_hwaddr(bp);
883 1062
884 config = macb_mdc_clk_div(bp); 1063 config = macb_mdc_clk_div(bp);
1064 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
885 config |= MACB_BIT(PAE); /* PAuse Enable */ 1065 config |= MACB_BIT(PAE); /* PAuse Enable */
886 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 1066 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
887 config |= MACB_BIT(BIG); /* Receive oversized frames */ 1067 config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -891,6 +1071,8 @@ static void macb_init_hw(struct macb *bp)
891 config |= MACB_BIT(NBC); /* No BroadCast */ 1071 config |= MACB_BIT(NBC); /* No BroadCast */
892 config |= macb_dbw(bp); 1072 config |= macb_dbw(bp);
893 macb_writel(bp, NCFGR, config); 1073 macb_writel(bp, NCFGR, config);
1074 bp->speed = SPEED_10;
1075 bp->duplex = DUPLEX_HALF;
894 1076
895 macb_configure_dma(bp); 1077 macb_configure_dma(bp);
896 1078
@@ -902,13 +1084,8 @@ static void macb_init_hw(struct macb *bp)
902 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 1084 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
903 1085
904 /* Enable interrupts */ 1086 /* Enable interrupts */
905 macb_writel(bp, IER, (MACB_BIT(RCOMP) 1087 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
906 | MACB_BIT(RXUBR) 1088 | MACB_TX_INT_FLAGS
907 | MACB_BIT(ISR_TUND)
908 | MACB_BIT(ISR_RLE)
909 | MACB_BIT(TXERR)
910 | MACB_BIT(TCOMP)
911 | MACB_BIT(ISR_ROVR)
912 | MACB_BIT(HRESP))); 1089 | MACB_BIT(HRESP)));
913 1090
914} 1091}
@@ -996,7 +1173,7 @@ static void macb_sethashtable(struct net_device *dev)
996/* 1173/*
997 * Enable/Disable promiscuous and multicast modes. 1174 * Enable/Disable promiscuous and multicast modes.
998 */ 1175 */
999static void macb_set_rx_mode(struct net_device *dev) 1176void macb_set_rx_mode(struct net_device *dev)
1000{ 1177{
1001 unsigned long cfg; 1178 unsigned long cfg;
1002 struct macb *bp = netdev_priv(dev); 1179 struct macb *bp = netdev_priv(dev);
@@ -1028,6 +1205,7 @@ static void macb_set_rx_mode(struct net_device *dev)
1028 1205
1029 macb_writel(bp, NCFGR, cfg); 1206 macb_writel(bp, NCFGR, cfg);
1030} 1207}
1208EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1031 1209
1032static int macb_open(struct net_device *dev) 1210static int macb_open(struct net_device *dev)
1033{ 1211{
@@ -1043,9 +1221,6 @@ static int macb_open(struct net_device *dev)
1043 if (!bp->phy_dev) 1221 if (!bp->phy_dev)
1044 return -EAGAIN; 1222 return -EAGAIN;
1045 1223
1046 if (!is_valid_ether_addr(dev->dev_addr))
1047 return -EADDRNOTAVAIL;
1048
1049 err = macb_alloc_consistent(bp); 1224 err = macb_alloc_consistent(bp);
1050 if (err) { 1225 if (err) {
1051 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", 1226 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
@@ -1135,7 +1310,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
1135 return nstat; 1310 return nstat;
1136} 1311}
1137 1312
1138static struct net_device_stats *macb_get_stats(struct net_device *dev) 1313struct net_device_stats *macb_get_stats(struct net_device *dev)
1139{ 1314{
1140 struct macb *bp = netdev_priv(dev); 1315 struct macb *bp = netdev_priv(dev);
1141 struct net_device_stats *nstat = &bp->stats; 1316 struct net_device_stats *nstat = &bp->stats;
@@ -1181,6 +1356,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
1181 1356
1182 return nstat; 1357 return nstat;
1183} 1358}
1359EXPORT_SYMBOL_GPL(macb_get_stats);
1184 1360
1185static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1361static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1186{ 1362{
@@ -1204,25 +1380,55 @@ static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1204 return phy_ethtool_sset(phydev, cmd); 1380 return phy_ethtool_sset(phydev, cmd);
1205} 1381}
1206 1382
1207static void macb_get_drvinfo(struct net_device *dev, 1383static int macb_get_regs_len(struct net_device *netdev)
1208 struct ethtool_drvinfo *info) 1384{
1385 return MACB_GREGS_NBR * sizeof(u32);
1386}
1387
1388static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1389 void *p)
1209{ 1390{
1210 struct macb *bp = netdev_priv(dev); 1391 struct macb *bp = netdev_priv(dev);
1392 unsigned int tail, head;
1393 u32 *regs_buff = p;
1394
1395 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
1396 | MACB_GREGS_VERSION;
1397
1398 tail = macb_tx_ring_wrap(bp->tx_tail);
1399 head = macb_tx_ring_wrap(bp->tx_head);
1400
1401 regs_buff[0] = macb_readl(bp, NCR);
1402 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
1403 regs_buff[2] = macb_readl(bp, NSR);
1404 regs_buff[3] = macb_readl(bp, TSR);
1405 regs_buff[4] = macb_readl(bp, RBQP);
1406 regs_buff[5] = macb_readl(bp, TBQP);
1407 regs_buff[6] = macb_readl(bp, RSR);
1408 regs_buff[7] = macb_readl(bp, IMR);
1211 1409
1212 strcpy(info->driver, bp->pdev->dev.driver->name); 1410 regs_buff[8] = tail;
1213 strcpy(info->version, "$Revision: 1.14 $"); 1411 regs_buff[9] = head;
1214 strcpy(info->bus_info, dev_name(&bp->pdev->dev)); 1412 regs_buff[10] = macb_tx_dma(bp, tail);
1413 regs_buff[11] = macb_tx_dma(bp, head);
1414
1415 if (macb_is_gem(bp)) {
1416 regs_buff[12] = gem_readl(bp, USRIO);
1417 regs_buff[13] = gem_readl(bp, DMACFG);
1418 }
1215} 1419}
1216 1420
1217static const struct ethtool_ops macb_ethtool_ops = { 1421const struct ethtool_ops macb_ethtool_ops = {
1218 .get_settings = macb_get_settings, 1422 .get_settings = macb_get_settings,
1219 .set_settings = macb_set_settings, 1423 .set_settings = macb_set_settings,
1220 .get_drvinfo = macb_get_drvinfo, 1424 .get_regs_len = macb_get_regs_len,
1425 .get_regs = macb_get_regs,
1221 .get_link = ethtool_op_get_link, 1426 .get_link = ethtool_op_get_link,
1222 .get_ts_info = ethtool_op_get_ts_info, 1427 .get_ts_info = ethtool_op_get_ts_info,
1223}; 1428};
1429EXPORT_SYMBOL_GPL(macb_ethtool_ops);
1224 1430
1225static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1431int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1226{ 1432{
1227 struct macb *bp = netdev_priv(dev); 1433 struct macb *bp = netdev_priv(dev);
1228 struct phy_device *phydev = bp->phy_dev; 1434 struct phy_device *phydev = bp->phy_dev;
@@ -1235,6 +1441,7 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1235 1441
1236 return phy_mii_ioctl(phydev, rq, cmd); 1442 return phy_mii_ioctl(phydev, rq, cmd);
1237} 1443}
1444EXPORT_SYMBOL_GPL(macb_ioctl);
1238 1445
1239static const struct net_device_ops macb_netdev_ops = { 1446static const struct net_device_ops macb_netdev_ops = {
1240 .ndo_open = macb_open, 1447 .ndo_open = macb_open,
@@ -1263,7 +1470,7 @@ static const struct of_device_id macb_dt_ids[] = {
1263 1470
1264MODULE_DEVICE_TABLE(of, macb_dt_ids); 1471MODULE_DEVICE_TABLE(of, macb_dt_ids);
1265 1472
1266static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) 1473static int macb_get_phy_mode_dt(struct platform_device *pdev)
1267{ 1474{
1268 struct device_node *np = pdev->dev.of_node; 1475 struct device_node *np = pdev->dev.of_node;
1269 1476
@@ -1273,7 +1480,7 @@ static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
1273 return -ENODEV; 1480 return -ENODEV;
1274} 1481}
1275 1482
1276static int __devinit macb_get_hwaddr_dt(struct macb *bp) 1483static int macb_get_hwaddr_dt(struct macb *bp)
1277{ 1484{
1278 struct device_node *np = bp->pdev->dev.of_node; 1485 struct device_node *np = bp->pdev->dev.of_node;
1279 if (np) { 1486 if (np) {
@@ -1287,11 +1494,11 @@ static int __devinit macb_get_hwaddr_dt(struct macb *bp)
1287 return -ENODEV; 1494 return -ENODEV;
1288} 1495}
1289#else 1496#else
1290static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) 1497static int macb_get_phy_mode_dt(struct platform_device *pdev)
1291{ 1498{
1292 return -ENODEV; 1499 return -ENODEV;
1293} 1500}
1294static int __devinit macb_get_hwaddr_dt(struct macb *bp) 1501static int macb_get_hwaddr_dt(struct macb *bp)
1295{ 1502{
1296 return -ENODEV; 1503 return -ENODEV;
1297} 1504}
@@ -1306,6 +1513,7 @@ static int __init macb_probe(struct platform_device *pdev)
1306 struct phy_device *phydev; 1513 struct phy_device *phydev;
1307 u32 config; 1514 u32 config;
1308 int err = -ENXIO; 1515 int err = -ENXIO;
1516 struct pinctrl *pinctrl;
1309 1517
1310 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1518 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1311 if (!regs) { 1519 if (!regs) {
@@ -1313,6 +1521,15 @@ static int __init macb_probe(struct platform_device *pdev)
1313 goto err_out; 1521 goto err_out;
1314 } 1522 }
1315 1523
1524 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1525 if (IS_ERR(pinctrl)) {
1526 err = PTR_ERR(pinctrl);
1527 if (err == -EPROBE_DEFER)
1528 goto err_out;
1529
1530 dev_warn(&pdev->dev, "No pinctrl provided\n");
1531 }
1532
1316 err = -ENOMEM; 1533 err = -ENOMEM;
1317 dev = alloc_etherdev(sizeof(*bp)); 1534 dev = alloc_etherdev(sizeof(*bp));
1318 if (!dev) 1535 if (!dev)
@@ -1328,6 +1545,7 @@ static int __init macb_probe(struct platform_device *pdev)
1328 bp->dev = dev; 1545 bp->dev = dev;
1329 1546
1330 spin_lock_init(&bp->lock); 1547 spin_lock_init(&bp->lock);
1548 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
1331 1549
1332 bp->pclk = clk_get(&pdev->dev, "pclk"); 1550 bp->pclk = clk_get(&pdev->dev, "pclk");
1333 if (IS_ERR(bp->pclk)) { 1551 if (IS_ERR(bp->pclk)) {
@@ -1384,7 +1602,9 @@ static int __init macb_probe(struct platform_device *pdev)
1384 bp->phy_interface = err; 1602 bp->phy_interface = err;
1385 } 1603 }
1386 1604
1387 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) 1605 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
1606 macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
1607 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
1388#if defined(CONFIG_ARCH_AT91) 1608#if defined(CONFIG_ARCH_AT91)
1389 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | 1609 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
1390 MACB_BIT(CLKEN))); 1610 MACB_BIT(CLKEN)));
@@ -1398,8 +1618,6 @@ static int __init macb_probe(struct platform_device *pdev)
1398 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); 1618 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
1399#endif 1619#endif
1400 1620
1401 bp->tx_pending = DEF_TX_RING_PENDING;
1402
1403 err = register_netdev(dev); 1621 err = register_netdev(dev);
1404 if (err) { 1622 if (err) {
1405 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 1623 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 335e288f5314..570908b93578 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -10,10 +10,15 @@
10#ifndef _MACB_H 10#ifndef _MACB_H
11#define _MACB_H 11#define _MACB_H
12 12
13#define MACB_GREGS_NBR 16
14#define MACB_GREGS_VERSION 1
15
13/* MACB register offsets */ 16/* MACB register offsets */
14#define MACB_NCR 0x0000 17#define MACB_NCR 0x0000
15#define MACB_NCFGR 0x0004 18#define MACB_NCFGR 0x0004
16#define MACB_NSR 0x0008 19#define MACB_NSR 0x0008
20#define MACB_TAR 0x000c /* AT91RM9200 only */
21#define MACB_TCR 0x0010 /* AT91RM9200 only */
17#define MACB_TSR 0x0014 22#define MACB_TSR 0x0014
18#define MACB_RBQP 0x0018 23#define MACB_RBQP 0x0018
19#define MACB_TBQP 0x001c 24#define MACB_TBQP 0x001c
@@ -69,6 +74,12 @@
69#define GEM_HRT 0x0084 74#define GEM_HRT 0x0084
70#define GEM_SA1B 0x0088 75#define GEM_SA1B 0x0088
71#define GEM_SA1T 0x008C 76#define GEM_SA1T 0x008C
77#define GEM_SA2B 0x0090
78#define GEM_SA2T 0x0094
79#define GEM_SA3B 0x0098
80#define GEM_SA3T 0x009C
81#define GEM_SA4B 0x00A0
82#define GEM_SA4T 0x00A4
72#define GEM_OTX 0x0100 83#define GEM_OTX 0x0100
73#define GEM_DCFG1 0x0280 84#define GEM_DCFG1 0x0280
74#define GEM_DCFG2 0x0284 85#define GEM_DCFG2 0x0284
@@ -133,6 +144,8 @@
133#define MACB_RTY_SIZE 1 144#define MACB_RTY_SIZE 1
134#define MACB_PAE_OFFSET 13 145#define MACB_PAE_OFFSET 13
135#define MACB_PAE_SIZE 1 146#define MACB_PAE_SIZE 1
147#define MACB_RM9200_RMII_OFFSET 13 /* AT91RM9200 only */
148#define MACB_RM9200_RMII_SIZE 1 /* AT91RM9200 only */
136#define MACB_RBOF_OFFSET 14 149#define MACB_RBOF_OFFSET 14
137#define MACB_RBOF_SIZE 2 150#define MACB_RBOF_SIZE 2
138#define MACB_RLCE_OFFSET 16 151#define MACB_RLCE_OFFSET 16
@@ -145,6 +158,8 @@
145#define MACB_IRXFCS_SIZE 1 158#define MACB_IRXFCS_SIZE 1
146 159
147/* GEM specific NCFGR bitfields. */ 160/* GEM specific NCFGR bitfields. */
161#define GEM_GBE_OFFSET 10
162#define GEM_GBE_SIZE 1
148#define GEM_CLK_OFFSET 18 163#define GEM_CLK_OFFSET 18
149#define GEM_CLK_SIZE 3 164#define GEM_CLK_SIZE 3
150#define GEM_DBW_OFFSET 21 165#define GEM_DBW_OFFSET 21
@@ -156,8 +171,19 @@
156#define GEM_DBW128 2 171#define GEM_DBW128 2
157 172
158/* Bitfields in DMACFG. */ 173/* Bitfields in DMACFG. */
174#define GEM_FBLDO_OFFSET 0
175#define GEM_FBLDO_SIZE 5
176#define GEM_RXBMS_OFFSET 8
177#define GEM_RXBMS_SIZE 2
178#define GEM_TXPBMS_OFFSET 10
179#define GEM_TXPBMS_SIZE 1
180#define GEM_TXCOEN_OFFSET 11
181#define GEM_TXCOEN_SIZE 1
159#define GEM_RXBS_OFFSET 16 182#define GEM_RXBS_OFFSET 16
160#define GEM_RXBS_SIZE 8 183#define GEM_RXBS_SIZE 8
184#define GEM_DDRP_OFFSET 24
185#define GEM_DDRP_SIZE 1
186
161 187
162/* Bitfields in NSR */ 188/* Bitfields in NSR */
163#define MACB_NSR_LINK_OFFSET 0 189#define MACB_NSR_LINK_OFFSET 0
@@ -178,6 +204,8 @@
178#define MACB_TGO_SIZE 1 204#define MACB_TGO_SIZE 1
179#define MACB_BEX_OFFSET 4 205#define MACB_BEX_OFFSET 4
180#define MACB_BEX_SIZE 1 206#define MACB_BEX_SIZE 1
207#define MACB_RM9200_BNQ_OFFSET 4 /* AT91RM9200 only */
208#define MACB_RM9200_BNQ_SIZE 1 /* AT91RM9200 only */
181#define MACB_COMP_OFFSET 5 209#define MACB_COMP_OFFSET 5
182#define MACB_COMP_SIZE 1 210#define MACB_COMP_SIZE 1
183#define MACB_UND_OFFSET 6 211#define MACB_UND_OFFSET 6
@@ -246,6 +274,8 @@
246/* Bitfields in USRIO (AT91) */ 274/* Bitfields in USRIO (AT91) */
247#define MACB_RMII_OFFSET 0 275#define MACB_RMII_OFFSET 0
248#define MACB_RMII_SIZE 1 276#define MACB_RMII_SIZE 1
277#define GEM_RGMII_OFFSET 0 /* GEM gigabit mode */
278#define GEM_RGMII_SIZE 1
249#define MACB_CLKEN_OFFSET 1 279#define MACB_CLKEN_OFFSET 1
250#define MACB_CLKEN_SIZE 1 280#define MACB_CLKEN_SIZE 1
251 281
@@ -352,7 +382,12 @@
352 __v; \ 382 __v; \
353 }) 383 })
354 384
355struct dma_desc { 385/**
386 * struct macb_dma_desc - Hardware DMA descriptor
387 * @addr: DMA address of data buffer
388 * @ctrl: Control and status bits
389 */
390struct macb_dma_desc {
356 u32 addr; 391 u32 addr;
357 u32 ctrl; 392 u32 ctrl;
358}; 393};
@@ -417,7 +452,12 @@ struct dma_desc {
417#define MACB_TX_USED_OFFSET 31 452#define MACB_TX_USED_OFFSET 31
418#define MACB_TX_USED_SIZE 1 453#define MACB_TX_USED_SIZE 1
419 454
420struct ring_info { 455/**
456 * struct macb_tx_skb - data about an skb which is being transmitted
457 * @skb: skb currently being transmitted
458 * @mapping: DMA address of the skb's data buffer
459 */
460struct macb_tx_skb {
421 struct sk_buff *skb; 461 struct sk_buff *skb;
422 dma_addr_t mapping; 462 dma_addr_t mapping;
423}; 463};
@@ -502,12 +542,12 @@ struct macb {
502 void __iomem *regs; 542 void __iomem *regs;
503 543
504 unsigned int rx_tail; 544 unsigned int rx_tail;
505 struct dma_desc *rx_ring; 545 struct macb_dma_desc *rx_ring;
506 void *rx_buffers; 546 void *rx_buffers;
507 547
508 unsigned int tx_head, tx_tail; 548 unsigned int tx_head, tx_tail;
509 struct dma_desc *tx_ring; 549 struct macb_dma_desc *tx_ring;
510 struct ring_info *tx_skb; 550 struct macb_tx_skb *tx_skb;
511 551
512 spinlock_t lock; 552 spinlock_t lock;
513 struct platform_device *pdev; 553 struct platform_device *pdev;
@@ -515,6 +555,7 @@ struct macb {
515 struct clk *hclk; 555 struct clk *hclk;
516 struct net_device *dev; 556 struct net_device *dev;
517 struct napi_struct napi; 557 struct napi_struct napi;
558 struct work_struct tx_error_task;
518 struct net_device_stats stats; 559 struct net_device_stats stats;
519 union { 560 union {
520 struct macb_stats macb; 561 struct macb_stats macb;
@@ -525,8 +566,6 @@ struct macb {
525 dma_addr_t tx_ring_dma; 566 dma_addr_t tx_ring_dma;
526 dma_addr_t rx_buffers_dma; 567 dma_addr_t rx_buffers_dma;
527 568
528 unsigned int rx_pending, tx_pending;
529
530 struct mii_bus *mii_bus; 569 struct mii_bus *mii_bus;
531 struct phy_device *phy_dev; 570 struct phy_device *phy_dev;
532 unsigned int link; 571 unsigned int link;
@@ -534,8 +573,22 @@ struct macb {
534 unsigned int duplex; 573 unsigned int duplex;
535 574
536 phy_interface_t phy_interface; 575 phy_interface_t phy_interface;
576
577 /* AT91RM9200 transmit */
578 struct sk_buff *skb; /* holds skb until xmit interrupt completes */
579 dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
580 int skb_length; /* saved skb length for pci_unmap_single */
537}; 581};
538 582
583extern const struct ethtool_ops macb_ethtool_ops;
584
585int macb_mii_init(struct macb *bp);
586int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
587struct net_device_stats *macb_get_stats(struct net_device *dev);
588void macb_set_rx_mode(struct net_device *dev);
589void macb_set_hwaddr(struct macb *bp);
590void macb_get_hwaddr(struct macb *bp);
591
539static inline bool macb_is_gem(struct macb *bp) 592static inline bool macb_is_gem(struct macb *bp)
540{ 593{
541 return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2; 594 return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 16814b34d4b6..b407043ce9b0 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -191,6 +191,7 @@
191#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ 191#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
192#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ 192#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
193#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */ 193#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
194#define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
194 195
195/* DMA Normal interrupt */ 196/* DMA Normal interrupt */
196#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ 197#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
@@ -210,7 +211,7 @@
210#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ 211#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
211 212
212#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ 213#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
213 DMA_INTR_ENA_TUE) 214 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
214 215
215#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ 216#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
216 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \ 217 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
@@ -373,6 +374,7 @@ struct xgmac_priv {
373 struct sk_buff **tx_skbuff; 374 struct sk_buff **tx_skbuff;
374 unsigned int tx_head; 375 unsigned int tx_head;
375 unsigned int tx_tail; 376 unsigned int tx_tail;
377 int tx_irq_cnt;
376 378
377 void __iomem *base; 379 void __iomem *base;
378 unsigned int dma_buf_sz; 380 unsigned int dma_buf_sz;
@@ -663,6 +665,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
663{ 665{
664 struct xgmac_dma_desc *p; 666 struct xgmac_dma_desc *p;
665 dma_addr_t paddr; 667 dma_addr_t paddr;
668 int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
666 669
667 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) { 670 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
668 int entry = priv->rx_head; 671 int entry = priv->rx_head;
@@ -671,13 +674,13 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
671 p = priv->dma_rx + entry; 674 p = priv->dma_rx + entry;
672 675
673 if (priv->rx_skbuff[entry] == NULL) { 676 if (priv->rx_skbuff[entry] == NULL) {
674 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz); 677 skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
675 if (unlikely(skb == NULL)) 678 if (unlikely(skb == NULL))
676 break; 679 break;
677 680
678 priv->rx_skbuff[entry] = skb; 681 priv->rx_skbuff[entry] = skb;
679 paddr = dma_map_single(priv->device, skb->data, 682 paddr = dma_map_single(priv->device, skb->data,
680 priv->dma_buf_sz, DMA_FROM_DEVICE); 683 bufsz, DMA_FROM_DEVICE);
681 desc_set_buf_addr(p, paddr, priv->dma_buf_sz); 684 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
682 } 685 }
683 686
@@ -701,10 +704,10 @@ static int xgmac_dma_desc_rings_init(struct net_device *dev)
701 unsigned int bfsize; 704 unsigned int bfsize;
702 705
703 /* Set the Buffer size according to the MTU; 706 /* Set the Buffer size according to the MTU;
704 * indeed, in case of jumbo we need to bump-up the buffer sizes. 707 * The total buffer size including any IP offset must be a multiple
708 * of 8 bytes.
705 */ 709 */
706 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64, 710 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
707 64);
708 711
709 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize); 712 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
710 713
@@ -845,9 +848,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
845static void xgmac_tx_complete(struct xgmac_priv *priv) 848static void xgmac_tx_complete(struct xgmac_priv *priv)
846{ 849{
847 int i; 850 int i;
848 void __iomem *ioaddr = priv->base;
849
850 writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
851 851
852 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { 852 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
853 unsigned int entry = priv->tx_tail; 853 unsigned int entry = priv->tx_tail;
@@ -888,7 +888,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
888 } 888 }
889 889
890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
891 TX_THRESH) 891 MAX_SKB_FRAGS)
892 netif_wake_queue(priv->dev); 892 netif_wake_queue(priv->dev);
893} 893}
894 894
@@ -965,8 +965,7 @@ static int xgmac_hw_init(struct net_device *dev)
965 ctrl |= XGMAC_CONTROL_IPC; 965 ctrl |= XGMAC_CONTROL_IPC;
966 writel(ctrl, ioaddr + XGMAC_CONTROL); 966 writel(ctrl, ioaddr + XGMAC_CONTROL);
967 967
968 value = DMA_CONTROL_DFF; 968 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
969 writel(value, ioaddr + XGMAC_DMA_CONTROL);
970 969
971 /* Set the HW DMA mode and the COE */ 970 /* Set the HW DMA mode and the COE */
972 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA | 971 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
@@ -1060,19 +1059,15 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1060 struct xgmac_priv *priv = netdev_priv(dev); 1059 struct xgmac_priv *priv = netdev_priv(dev);
1061 unsigned int entry; 1060 unsigned int entry;
1062 int i; 1061 int i;
1062 u32 irq_flag;
1063 int nfrags = skb_shinfo(skb)->nr_frags; 1063 int nfrags = skb_shinfo(skb)->nr_frags;
1064 struct xgmac_dma_desc *desc, *first; 1064 struct xgmac_dma_desc *desc, *first;
1065 unsigned int desc_flags; 1065 unsigned int desc_flags;
1066 unsigned int len; 1066 unsigned int len;
1067 dma_addr_t paddr; 1067 dma_addr_t paddr;
1068 1068
1069 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < 1069 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1070 (nfrags + 1)) { 1070 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
1071 writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
1072 priv->base + XGMAC_DMA_INTR_ENA);
1073 netif_stop_queue(dev);
1074 return NETDEV_TX_BUSY;
1075 }
1076 1071
1077 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ? 1072 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1078 TXDESC_CSUM_ALL : 0; 1073 TXDESC_CSUM_ALL : 0;
@@ -1113,9 +1108,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1113 /* Interrupt on completition only for the latest segment */ 1108 /* Interrupt on completition only for the latest segment */
1114 if (desc != first) 1109 if (desc != first)
1115 desc_set_tx_owner(desc, desc_flags | 1110 desc_set_tx_owner(desc, desc_flags |
1116 TXDESC_LAST_SEG | TXDESC_INTERRUPT); 1111 TXDESC_LAST_SEG | irq_flag);
1117 else 1112 else
1118 desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT; 1113 desc_flags |= TXDESC_LAST_SEG | irq_flag;
1119 1114
1120 /* Set owner on first desc last to avoid race condition */ 1115 /* Set owner on first desc last to avoid race condition */
1121 wmb(); 1116 wmb();
@@ -1124,6 +1119,9 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1124 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); 1119 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1125 1120
1126 writel(1, priv->base + XGMAC_DMA_TX_POLL); 1121 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1122 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
1123 MAX_SKB_FRAGS)
1124 netif_stop_queue(dev);
1127 1125
1128 return NETDEV_TX_OK; 1126 return NETDEV_TX_OK;
1129} 1127}
@@ -1139,9 +1137,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1139 struct sk_buff *skb; 1137 struct sk_buff *skb;
1140 int frame_len; 1138 int frame_len;
1141 1139
1142 writel(DMA_STATUS_RI | DMA_STATUS_NIS,
1143 priv->base + XGMAC_DMA_STATUS);
1144
1145 entry = priv->rx_tail; 1140 entry = priv->rx_tail;
1146 p = priv->dma_rx + entry; 1141 p = priv->dma_rx + entry;
1147 if (desc_get_owner(p)) 1142 if (desc_get_owner(p))
@@ -1180,8 +1175,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
1180 1175
1181 xgmac_rx_refill(priv); 1176 xgmac_rx_refill(priv);
1182 1177
1183 writel(1, priv->base + XGMAC_DMA_RX_POLL);
1184
1185 return count; 1178 return count;
1186} 1179}
1187 1180
@@ -1205,7 +1198,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
1205 1198
1206 if (work_done < budget) { 1199 if (work_done < budget) {
1207 napi_complete(napi); 1200 napi_complete(napi);
1208 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA); 1201 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
1209 } 1202 }
1210 return work_done; 1203 return work_done;
1211} 1204}
@@ -1350,7 +1343,7 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1350 struct xgmac_priv *priv = netdev_priv(dev); 1343 struct xgmac_priv *priv = netdev_priv(dev);
1351 void __iomem *ioaddr = priv->base; 1344 void __iomem *ioaddr = priv->base;
1352 1345
1353 intr_status = readl(ioaddr + XGMAC_INT_STAT); 1346 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
1354 if (intr_status & XGMAC_INT_STAT_PMT) { 1347 if (intr_status & XGMAC_INT_STAT_PMT) {
1355 netdev_dbg(priv->dev, "received Magic frame\n"); 1348 netdev_dbg(priv->dev, "received Magic frame\n");
1356 /* clear the PMT bits 5 and 6 by reading the PMT */ 1349 /* clear the PMT bits 5 and 6 by reading the PMT */
@@ -1368,9 +1361,9 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1368 struct xgmac_extra_stats *x = &priv->xstats; 1361 struct xgmac_extra_stats *x = &priv->xstats;
1369 1362
1370 /* read the status register (CSR5) */ 1363 /* read the status register (CSR5) */
1371 intr_status = readl(priv->base + XGMAC_DMA_STATUS); 1364 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1372 intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA); 1365 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1373 writel(intr_status, priv->base + XGMAC_DMA_STATUS); 1366 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
1374 1367
1375 /* It displays the DMA process states (CSR5 register) */ 1368 /* It displays the DMA process states (CSR5 register) */
1376 /* ABNORMAL interrupts */ 1369 /* ABNORMAL interrupts */
@@ -1405,8 +1398,8 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1405 } 1398 }
1406 1399
1407 /* TX/RX NORMAL interrupts */ 1400 /* TX/RX NORMAL interrupts */
1408 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) { 1401 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
1409 writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA); 1402 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
1410 napi_schedule(&priv->napi); 1403 napi_schedule(&priv->napi);
1411 } 1404 }
1412 1405
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 2de50f95798f..d40c994a4f6a 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_CHELSIO 5config NET_VENDOR_CHELSIO
6 bool "Chelsio devices" 6 bool "Chelsio devices"
7 default y 7 default y
8 depends on PCI || INET 8 depends on PCI
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 1d17c92f2dda..c8fdeaae56c0 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -974,8 +974,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
974#endif 974#endif
975}; 975};
976 976
977static int __devinit init_one(struct pci_dev *pdev, 977static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
978 const struct pci_device_id *ent)
979{ 978{
980 static int version_printed; 979 static int version_printed;
981 980
@@ -1332,7 +1331,7 @@ static inline void t1_sw_reset(struct pci_dev *pdev)
1332 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0); 1331 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1333} 1332}
1334 1333
1335static void __devexit remove_one(struct pci_dev *pdev) 1334static void remove_one(struct pci_dev *pdev)
1336{ 1335{
1337 struct net_device *dev = pci_get_drvdata(pdev); 1336 struct net_device *dev = pci_get_drvdata(pdev);
1338 struct adapter *adapter = dev->ml_priv; 1337 struct adapter *adapter = dev->ml_priv;
@@ -1361,7 +1360,7 @@ static struct pci_driver driver = {
1361 .name = DRV_NAME, 1360 .name = DRV_NAME,
1362 .id_table = t1_pci_tbl, 1361 .id_table = t1_pci_tbl,
1363 .probe = init_one, 1362 .probe = init_one,
1364 .remove = __devexit_p(remove_one), 1363 .remove = remove_one,
1365}; 1364};
1366 1365
1367static int __init t1_init_module(void) 1366static int __init t1_init_module(void)
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 47a84359d4e4..d84872e88171 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -367,18 +367,6 @@ void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
367 367
368#endif /* 0 */ 368#endif /* 0 */
369 369
370
371/*
372 * get_clock() implements a ns clock (see ktime_get)
373 */
374static inline ktime_t get_clock(void)
375{
376 struct timespec ts;
377
378 ktime_get_ts(&ts);
379 return timespec_to_ktime(ts);
380}
381
382/* 370/*
383 * tx_sched_init() allocates resources and does basic initialization. 371 * tx_sched_init() allocates resources and does basic initialization.
384 */ 372 */
@@ -411,7 +399,7 @@ static int tx_sched_init(struct sge *sge)
411static inline int sched_update_avail(struct sge *sge) 399static inline int sched_update_avail(struct sge *sge)
412{ 400{
413 struct sched *s = sge->tx_sched; 401 struct sched *s = sge->tx_sched;
414 ktime_t now = get_clock(); 402 ktime_t now = ktime_get();
415 unsigned int i; 403 unsigned int i;
416 long long delta_time_ns; 404 long long delta_time_ns;
417 405
@@ -2071,8 +2059,7 @@ static void espibug_workaround(unsigned long data)
2071/* 2059/*
2072 * Creates a t1_sge structure and returns suggested resource parameters. 2060 * Creates a t1_sge structure and returns suggested resource parameters.
2073 */ 2061 */
2074struct sge * __devinit t1_sge_create(struct adapter *adapter, 2062struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
2075 struct sge_params *p)
2076{ 2063{
2077 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL); 2064 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2078 int i; 2065 int i;
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 8a43c7e19701..e0a03a31e7c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -892,8 +892,8 @@ static void power_sequence_xpak(adapter_t* adapter)
892 } 892 }
893} 893}
894 894
895int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi, 895int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
896 struct adapter_params *p) 896 struct adapter_params *p)
897{ 897{
898 p->chip_version = bi->chip_term; 898 p->chip_version = bi->chip_term;
899 p->is_asic = (p->chip_version != CHBT_TERM_FPGA); 899 p->is_asic = (p->chip_version != CHBT_TERM_FPGA);
@@ -992,7 +992,7 @@ out_err:
992/* 992/*
993 * Determine a card's PCI mode. 993 * Determine a card's PCI mode.
994 */ 994 */
995static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p) 995static void get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
996{ 996{
997 static const unsigned short speed_map[] = { 33, 66, 100, 133 }; 997 static const unsigned short speed_map[] = { 33, 66, 100, 133 };
998 u32 pci_mode; 998 u32 pci_mode;
@@ -1028,8 +1028,8 @@ void t1_free_sw_modules(adapter_t *adapter)
1028 t1_espi_destroy(adapter->espi); 1028 t1_espi_destroy(adapter->espi);
1029} 1029}
1030 1030
1031static void __devinit init_link_config(struct link_config *lc, 1031static void init_link_config(struct link_config *lc,
1032 const struct board_info *bi) 1032 const struct board_info *bi)
1033{ 1033{
1034 lc->supported = bi->caps; 1034 lc->supported = bi->caps;
1035 lc->requested_speed = lc->speed = SPEED_INVALID; 1035 lc->requested_speed = lc->speed = SPEED_INVALID;
@@ -1049,8 +1049,7 @@ static void __devinit init_link_config(struct link_config *lc,
1049 * Allocate and initialize the data structures that hold the SW state of 1049 * Allocate and initialize the data structures that hold the SW state of
1050 * the Terminator HW modules. 1050 * the Terminator HW modules.
1051 */ 1051 */
1052int __devinit t1_init_sw_modules(adapter_t *adapter, 1052int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi)
1053 const struct board_info *bi)
1054{ 1053{
1055 unsigned int i; 1054 unsigned int i;
1056 1055
diff --git a/drivers/net/ethernet/chelsio/cxgb/tp.c b/drivers/net/ethernet/chelsio/cxgb/tp.c
index 8bed4a59e65f..b146acabf982 100644
--- a/drivers/net/ethernet/chelsio/cxgb/tp.c
+++ b/drivers/net/ethernet/chelsio/cxgb/tp.c
@@ -55,7 +55,7 @@ void t1_tp_destroy(struct petp *tp)
55 kfree(tp); 55 kfree(tp);
56} 56}
57 57
58struct petp *__devinit t1_tp_create(adapter_t * adapter, struct tp_params *p) 58struct petp *t1_tp_create(adapter_t *adapter, struct tp_params *p)
59{ 59{
60 struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL); 60 struct petp *tp = kzalloc(sizeof(*tp), GFP_KERNEL);
61 61
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index df01b6343241..8c82248ce416 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -42,10 +42,9 @@
42#include <linux/mdio.h> 42#include <linux/mdio.h>
43#include "version.h" 43#include "version.h"
44 44
45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__) 45#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ##__VA_ARGS__)
46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__) 46#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ##__VA_ARGS__)
47#define CH_ALERT(adap, fmt, ...) \ 47#define CH_ALERT(adap, fmt, ...) dev_alert(&adap->pdev->dev, fmt, ##__VA_ARGS__)
48 dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
49 48
50/* 49/*
51 * More powerful macro that selectively prints messages based on msg_enable. 50 * More powerful macro that selectively prints messages based on msg_enable.
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 9c9f3260344a..f15ee326d5c1 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3078,7 +3078,7 @@ static void set_nqsets(struct adapter *adap)
3078 } 3078 }
3079} 3079}
3080 3080
3081static int __devinit cxgb_enable_msix(struct adapter *adap) 3081static int cxgb_enable_msix(struct adapter *adap)
3082{ 3082{
3083 struct msix_entry entries[SGE_QSETS + 1]; 3083 struct msix_entry entries[SGE_QSETS + 1];
3084 int vectors; 3084 int vectors;
@@ -3108,8 +3108,7 @@ static int __devinit cxgb_enable_msix(struct adapter *adap)
3108 return err; 3108 return err;
3109} 3109}
3110 3110
3111static void __devinit print_port_info(struct adapter *adap, 3111static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3112 const struct adapter_info *ai)
3113{ 3112{
3114 static const char *pci_variant[] = { 3113 static const char *pci_variant[] = {
3115 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express" 3114 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
@@ -3165,7 +3164,7 @@ static const struct net_device_ops cxgb_netdev_ops = {
3165#endif 3164#endif
3166}; 3165};
3167 3166
3168static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev) 3167static void cxgb3_init_iscsi_mac(struct net_device *dev)
3169{ 3168{
3170 struct port_info *pi = netdev_priv(dev); 3169 struct port_info *pi = netdev_priv(dev);
3171 3170
@@ -3176,8 +3175,7 @@ static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3176#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 3175#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3177#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 3176#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3178 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 3177 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3179static int __devinit init_one(struct pci_dev *pdev, 3178static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3180 const struct pci_device_id *ent)
3181{ 3179{
3182 static int version_printed; 3180 static int version_printed;
3183 3181
@@ -3381,7 +3379,7 @@ out:
3381 return err; 3379 return err;
3382} 3380}
3383 3381
3384static void __devexit remove_one(struct pci_dev *pdev) 3382static void remove_one(struct pci_dev *pdev)
3385{ 3383{
3386 struct adapter *adapter = pci_get_drvdata(pdev); 3384 struct adapter *adapter = pci_get_drvdata(pdev);
3387 3385
@@ -3425,7 +3423,7 @@ static struct pci_driver driver = {
3425 .name = DRV_NAME, 3423 .name = DRV_NAME,
3426 .id_table = cxgb3_pci_tbl, 3424 .id_table = cxgb3_pci_tbl,
3427 .probe = init_one, 3425 .probe = init_one,
3428 .remove = __devexit_p(remove_one), 3426 .remove = remove_one,
3429 .err_handler = &t3_err_handler, 3427 .err_handler = &t3_err_handler,
3430}; 3428};
3431 3429
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 2dbbcbb450d3..942dace361d2 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1382,7 +1382,7 @@ static inline int adap2type(struct adapter *adapter)
1382 return type; 1382 return type;
1383} 1383}
1384 1384
1385void __devinit cxgb3_adapter_ofld(struct adapter *adapter) 1385void cxgb3_adapter_ofld(struct adapter *adapter)
1386{ 1386{
1387 struct t3cdev *tdev = &adapter->tdev; 1387 struct t3cdev *tdev = &adapter->tdev;
1388 1388
@@ -1396,7 +1396,7 @@ void __devinit cxgb3_adapter_ofld(struct adapter *adapter)
1396 register_tdev(tdev); 1396 register_tdev(tdev);
1397} 1397}
1398 1398
1399void __devexit cxgb3_adapter_unofld(struct adapter *adapter) 1399void cxgb3_adapter_unofld(struct adapter *adapter)
1400{ 1400{
1401 struct t3cdev *tdev = &adapter->tdev; 1401 struct t3cdev *tdev = &adapter->tdev;
1402 1402
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0df1284df497..130dd9d5b493 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2148,8 +2148,8 @@ static const struct file_operations mem_debugfs_fops = {
2148 .llseek = default_llseek, 2148 .llseek = default_llseek,
2149}; 2149};
2150 2150
2151static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, 2151static void add_debugfs_mem(struct adapter *adap, const char *name,
2152 unsigned int idx, unsigned int size_mb) 2152 unsigned int idx, unsigned int size_mb)
2153{ 2153{
2154 struct dentry *de; 2154 struct dentry *de;
2155 2155
@@ -2159,7 +2159,7 @@ static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2159 de->d_inode->i_size = size_mb << 20; 2159 de->d_inode->i_size = size_mb << 20;
2160} 2160}
2161 2161
2162static int __devinit setup_debugfs(struct adapter *adap) 2162static int setup_debugfs(struct adapter *adap)
2163{ 2163{
2164 int i; 2164 int i;
2165 2165
@@ -4173,7 +4173,7 @@ static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
4173 * of ports we found and the number of available CPUs. Most settings can be 4173 * of ports we found and the number of available CPUs. Most settings can be
4174 * modified by the admin prior to actual use. 4174 * modified by the admin prior to actual use.
4175 */ 4175 */
4176static void __devinit cfg_queues(struct adapter *adap) 4176static void cfg_queues(struct adapter *adap)
4177{ 4177{
4178 struct sge *s = &adap->sge; 4178 struct sge *s = &adap->sge;
4179 int i, q10g = 0, n10g = 0, qidx = 0; 4179 int i, q10g = 0, n10g = 0, qidx = 0;
@@ -4257,7 +4257,7 @@ static void __devinit cfg_queues(struct adapter *adap)
4257 * Reduce the number of Ethernet queues across all ports to at most n. 4257 * Reduce the number of Ethernet queues across all ports to at most n.
4258 * n provides at least one queue per port. 4258 * n provides at least one queue per port.
4259 */ 4259 */
4260static void __devinit reduce_ethqs(struct adapter *adap, int n) 4260static void reduce_ethqs(struct adapter *adap, int n)
4261{ 4261{
4262 int i; 4262 int i;
4263 struct port_info *pi; 4263 struct port_info *pi;
@@ -4284,7 +4284,7 @@ static void __devinit reduce_ethqs(struct adapter *adap, int n)
4284/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ 4284/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4285#define EXTRA_VECS 2 4285#define EXTRA_VECS 2
4286 4286
4287static int __devinit enable_msix(struct adapter *adap) 4287static int enable_msix(struct adapter *adap)
4288{ 4288{
4289 int ofld_need = 0; 4289 int ofld_need = 0;
4290 int i, err, want, need; 4290 int i, err, want, need;
@@ -4333,7 +4333,7 @@ static int __devinit enable_msix(struct adapter *adap)
4333 4333
4334#undef EXTRA_VECS 4334#undef EXTRA_VECS
4335 4335
4336static int __devinit init_rss(struct adapter *adap) 4336static int init_rss(struct adapter *adap)
4337{ 4337{
4338 unsigned int i, j; 4338 unsigned int i, j;
4339 4339
@@ -4349,7 +4349,7 @@ static int __devinit init_rss(struct adapter *adap)
4349 return 0; 4349 return 0;
4350} 4350}
4351 4351
4352static void __devinit print_port_info(const struct net_device *dev) 4352static void print_port_info(const struct net_device *dev)
4353{ 4353{
4354 static const char *base[] = { 4354 static const char *base[] = {
4355 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", 4355 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
@@ -4386,7 +4386,7 @@ static void __devinit print_port_info(const struct net_device *dev)
4386 adap->params.vpd.sn, adap->params.vpd.ec); 4386 adap->params.vpd.sn, adap->params.vpd.ec);
4387} 4387}
4388 4388
4389static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) 4389static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4390{ 4390{
4391 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); 4391 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4392} 4392}
@@ -4419,8 +4419,7 @@ static void free_some_resources(struct adapter *adapter)
4419#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 4419#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4420 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 4420 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4421 4421
4422static int __devinit init_one(struct pci_dev *pdev, 4422static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4423 const struct pci_device_id *ent)
4424{ 4423{
4425 int func, i, err; 4424 int func, i, err;
4426 struct port_info *pi; 4425 struct port_info *pi;
@@ -4640,7 +4639,7 @@ sriov:
4640 return err; 4639 return err;
4641} 4640}
4642 4641
4643static void __devexit remove_one(struct pci_dev *pdev) 4642static void remove_one(struct pci_dev *pdev)
4644{ 4643{
4645 struct adapter *adapter = pci_get_drvdata(pdev); 4644 struct adapter *adapter = pci_get_drvdata(pdev);
4646 4645
@@ -4680,7 +4679,7 @@ static struct pci_driver cxgb4_driver = {
4680 .name = KBUILD_MODNAME, 4679 .name = KBUILD_MODNAME,
4681 .id_table = cxgb4_pci_tbl, 4680 .id_table = cxgb4_pci_tbl,
4682 .probe = init_one, 4681 .probe = init_one,
4683 .remove = __devexit_p(remove_one), 4682 .remove = remove_one,
4684 .err_handler = &cxgb4_eeh, 4683 .err_handler = &cxgb4_eeh,
4685}; 4684};
4686 4685
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 730ae2cfa49e..45f2bea2e929 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2003,7 +2003,7 @@ void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2003 * 2003 *
2004 * Initialize the congestion control parameters. 2004 * Initialize the congestion control parameters.
2005 */ 2005 */
2006static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b) 2006static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2007{ 2007{
2008 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 2008 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2009 a[9] = 2; 2009 a[9] = 2;
@@ -3440,8 +3440,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3440 return 0; 3440 return 0;
3441} 3441}
3442 3442
3443static void __devinit get_pci_mode(struct adapter *adapter, 3443static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3444 struct pci_params *p)
3445{ 3444{
3446 u16 val; 3445 u16 val;
3447 3446
@@ -3460,8 +3459,7 @@ static void __devinit get_pci_mode(struct adapter *adapter,
3460 * Initializes the SW state maintained for each link, including the link's 3459 * Initializes the SW state maintained for each link, including the link's
3461 * capabilities and default speed/flow-control/autonegotiation settings. 3460 * capabilities and default speed/flow-control/autonegotiation settings.
3462 */ 3461 */
3463static void __devinit init_link_config(struct link_config *lc, 3462static void init_link_config(struct link_config *lc, unsigned int caps)
3464 unsigned int caps)
3465{ 3463{
3466 lc->supported = caps; 3464 lc->supported = caps;
3467 lc->requested_speed = 0; 3465 lc->requested_speed = 0;
@@ -3485,7 +3483,7 @@ int t4_wait_dev_ready(struct adapter *adap)
3485 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO; 3483 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
3486} 3484}
3487 3485
3488static int __devinit get_flash_params(struct adapter *adap) 3486static int get_flash_params(struct adapter *adap)
3489{ 3487{
3490 int ret; 3488 int ret;
3491 u32 info; 3489 u32 info;
@@ -3521,7 +3519,7 @@ static int __devinit get_flash_params(struct adapter *adap)
3521 * values for some adapter tunables, take PHYs out of reset, and 3519 * values for some adapter tunables, take PHYs out of reset, and
3522 * initialize the MDIO interface. 3520 * initialize the MDIO interface.
3523 */ 3521 */
3524int __devinit t4_prep_adapter(struct adapter *adapter) 3522int t4_prep_adapter(struct adapter *adapter)
3525{ 3523{
3526 int ret; 3524 int ret;
3527 3525
@@ -3549,7 +3547,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter)
3549 return 0; 3547 return 0;
3550} 3548}
3551 3549
3552int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 3550int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3553{ 3551{
3554 u8 addr[6]; 3552 u8 addr[6];
3555 int ret, i, j = 0; 3553 int ret, i, j = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9dad56101e23..0188df705719 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2023,7 +2023,7 @@ static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2023 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the 2023 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2024 * directory (debugfs_root) has already been set up. 2024 * directory (debugfs_root) has already been set up.
2025 */ 2025 */
2026static int __devinit setup_debugfs(struct adapter *adapter) 2026static int setup_debugfs(struct adapter *adapter)
2027{ 2027{
2028 int i; 2028 int i;
2029 2029
@@ -2064,7 +2064,7 @@ static void cleanup_debugfs(struct adapter *adapter)
2064 * adapter parameters we're going to be using and initialize basic adapter 2064 * adapter parameters we're going to be using and initialize basic adapter
2065 * hardware support. 2065 * hardware support.
2066 */ 2066 */
2067static int __devinit adap_init0(struct adapter *adapter) 2067static int adap_init0(struct adapter *adapter)
2068{ 2068{
2069 struct vf_resources *vfres = &adapter->params.vfres; 2069 struct vf_resources *vfres = &adapter->params.vfres;
2070 struct sge_params *sge_params = &adapter->params.sge; 2070 struct sge_params *sge_params = &adapter->params.sge;
@@ -2266,7 +2266,7 @@ static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2266 * be modified by the admin via ethtool and cxgbtool prior to the adapter 2266 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2267 * being brought up for the first time. 2267 * being brought up for the first time.
2268 */ 2268 */
2269static void __devinit cfg_queues(struct adapter *adapter) 2269static void cfg_queues(struct adapter *adapter)
2270{ 2270{
2271 struct sge *s = &adapter->sge; 2271 struct sge *s = &adapter->sge;
2272 int q10g, n10g, qidx, pidx, qs; 2272 int q10g, n10g, qidx, pidx, qs;
@@ -2361,7 +2361,7 @@ static void __devinit cfg_queues(struct adapter *adapter)
2361 * Reduce the number of Ethernet queues across all ports to at most n. 2361 * Reduce the number of Ethernet queues across all ports to at most n.
2362 * n provides at least one queue per port. 2362 * n provides at least one queue per port.
2363 */ 2363 */
2364static void __devinit reduce_ethqs(struct adapter *adapter, int n) 2364static void reduce_ethqs(struct adapter *adapter, int n)
2365{ 2365{
2366 int i; 2366 int i;
2367 struct port_info *pi; 2367 struct port_info *pi;
@@ -2400,7 +2400,7 @@ static void __devinit reduce_ethqs(struct adapter *adapter, int n)
2400 * for our "extras". Note that this process may lower the maximum number of 2400 * for our "extras". Note that this process may lower the maximum number of
2401 * allowed Queue Sets ... 2401 * allowed Queue Sets ...
2402 */ 2402 */
2403static int __devinit enable_msix(struct adapter *adapter) 2403static int enable_msix(struct adapter *adapter)
2404{ 2404{
2405 int i, err, want, need; 2405 int i, err, want, need;
2406 struct msix_entry entries[MSIX_ENTRIES]; 2406 struct msix_entry entries[MSIX_ENTRIES];
@@ -2462,8 +2462,8 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
2462 * state needed to manage the device. This routine is called "init_one" in 2462 * state needed to manage the device. This routine is called "init_one" in
2463 * the PF Driver ... 2463 * the PF Driver ...
2464 */ 2464 */
2465static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, 2465static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2466 const struct pci_device_id *ent) 2466 const struct pci_device_id *ent)
2467{ 2467{
2468 static int version_printed; 2468 static int version_printed;
2469 2469
@@ -2769,7 +2769,7 @@ err_disable_device:
2769 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note 2769 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
2770 * that this is called "remove_one" in the PF Driver.) 2770 * that this is called "remove_one" in the PF Driver.)
2771 */ 2771 */
2772static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) 2772static void cxgb4vf_pci_remove(struct pci_dev *pdev)
2773{ 2773{
2774 struct adapter *adapter = pci_get_drvdata(pdev); 2774 struct adapter *adapter = pci_get_drvdata(pdev);
2775 2775
@@ -2835,7 +2835,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2835 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt 2835 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2836 * delivery. 2836 * delivery.
2837 */ 2837 */
2838static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) 2838static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2839{ 2839{
2840 struct adapter *adapter; 2840 struct adapter *adapter;
2841 int pidx; 2841 int pidx;
@@ -2905,8 +2905,8 @@ static struct pci_driver cxgb4vf_driver = {
2905 .name = KBUILD_MODNAME, 2905 .name = KBUILD_MODNAME,
2906 .id_table = cxgb4vf_pci_tbl, 2906 .id_table = cxgb4vf_pci_tbl,
2907 .probe = cxgb4vf_pci_probe, 2907 .probe = cxgb4vf_pci_probe,
2908 .remove = __devexit_p(cxgb4vf_pci_remove), 2908 .remove = cxgb4vf_pci_remove,
2909 .shutdown = __devexit_p(cxgb4vf_pci_shutdown), 2909 .shutdown = cxgb4vf_pci_shutdown,
2910}; 2910};
2911 2911
2912/* 2912/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index a65c80aed1f2..283f9d0d37fd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -232,8 +232,8 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
232 return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); 232 return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
233} 233}
234 234
235int __devinit t4vf_wait_dev_ready(struct adapter *); 235int t4vf_wait_dev_ready(struct adapter *);
236int __devinit t4vf_port_init(struct adapter *, int); 236int t4vf_port_init(struct adapter *, int);
237 237
238int t4vf_fw_reset(struct adapter *); 238int t4vf_fw_reset(struct adapter *);
239int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); 239int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index fe3fd3dad6f7..7127c7b9efde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -46,7 +46,7 @@
46 * returning a value other than all 1's). Return an error if it doesn't 46 * returning a value other than all 1's). Return an error if it doesn't
47 * become ready ... 47 * become ready ...
48 */ 48 */
49int __devinit t4vf_wait_dev_ready(struct adapter *adapter) 49int t4vf_wait_dev_ready(struct adapter *adapter)
50{ 50{
51 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI; 51 const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
52 const u32 notready1 = 0xffffffff; 52 const u32 notready1 = 0xffffffff;
@@ -253,8 +253,7 @@ static int hash_mac_addr(const u8 *addr)
253 * Initializes the SW state maintained for each link, including the link's 253 * Initializes the SW state maintained for each link, including the link's
254 * capabilities and default speed/flow-control/autonegotiation settings. 254 * capabilities and default speed/flow-control/autonegotiation settings.
255 */ 255 */
256static void __devinit init_link_config(struct link_config *lc, 256static void init_link_config(struct link_config *lc, unsigned int caps)
257 unsigned int caps)
258{ 257{
259 lc->supported = caps; 258 lc->supported = caps;
260 lc->requested_speed = 0; 259 lc->requested_speed = 0;
@@ -275,7 +274,7 @@ static void __devinit init_link_config(struct link_config *lc,
275 * @adapter: the adapter 274 * @adapter: the adapter
276 * @pidx: the adapter port index 275 * @pidx: the adapter port index
277 */ 276 */
278int __devinit t4vf_port_init(struct adapter *adapter, int pidx) 277int t4vf_port_init(struct adapter *adapter, int pidx)
279{ 278{
280 struct port_info *pi = adap2pinfo(adapter, pidx); 279 struct port_info *pi = adap2pinfo(adapter, pidx);
281 struct fw_vi_cmd vi_cmd, vi_rpl; 280 struct fw_vi_cmd vi_cmd, vi_rpl;
diff --git a/drivers/net/ethernet/cisco/Kconfig b/drivers/net/ethernet/cisco/Kconfig
index 94606f7ee13a..1c7b884e3371 100644
--- a/drivers/net/ethernet/cisco/Kconfig
+++ b/drivers/net/ethernet/cisco/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_CISCO 5config NET_VENDOR_CISCO
6 bool "Cisco devices" 6 bool "Cisco devices"
7 default y 7 default y
8 depends on PCI && INET 8 depends on PCI
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig
index 9cc706a6cffd..b63f8d8a4261 100644
--- a/drivers/net/ethernet/cisco/enic/Kconfig
+++ b/drivers/net/ethernet/cisco/enic/Kconfig
@@ -4,6 +4,6 @@
4 4
5config ENIC 5config ENIC
6 tristate "Cisco VIC Ethernet NIC Support" 6 tristate "Cisco VIC Ethernet NIC Support"
7 depends on PCI && INET 7 depends on PCI
8 ---help--- 8 ---help---
9 This enables the support for the Cisco VIC Ethernet card. 9 This enables the support for the Cisco VIC Ethernet card.
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index ad1468b3ab91..64866ff1aea0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2275,8 +2275,7 @@ static void enic_iounmap(struct enic *enic)
2275 iounmap(enic->bar[i].vaddr); 2275 iounmap(enic->bar[i].vaddr);
2276} 2276}
2277 2277
2278static int __devinit enic_probe(struct pci_dev *pdev, 2278static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2279 const struct pci_device_id *ent)
2280{ 2279{
2281 struct device *dev = &pdev->dev; 2280 struct device *dev = &pdev->dev;
2282 struct net_device *netdev; 2281 struct net_device *netdev;
@@ -2552,7 +2551,7 @@ err_out_free_netdev:
2552 return err; 2551 return err;
2553} 2552}
2554 2553
2555static void __devexit enic_remove(struct pci_dev *pdev) 2554static void enic_remove(struct pci_dev *pdev)
2556{ 2555{
2557 struct net_device *netdev = pci_get_drvdata(pdev); 2556 struct net_device *netdev = pci_get_drvdata(pdev);
2558 2557
@@ -2584,7 +2583,7 @@ static struct pci_driver enic_driver = {
2584 .name = DRV_NAME, 2583 .name = DRV_NAME,
2585 .id_table = enic_id_table, 2584 .id_table = enic_id_table,
2586 .probe = enic_probe, 2585 .probe = enic_probe,
2587 .remove = __devexit_p(enic_remove), 2586 .remove = enic_remove,
2588}; 2587};
2589 2588
2590static int __init enic_init_module(void) 2589static int __init enic_init_module(void)
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 36499d5edd95..c73472c369cd 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -193,35 +193,35 @@ iow(board_info_t * db, int reg, int value)
193 193
194static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count) 194static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
195{ 195{
196 writesb(reg, data, count); 196 iowrite8_rep(reg, data, count);
197} 197}
198 198
199static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count) 199static void dm9000_outblk_16bit(void __iomem *reg, void *data, int count)
200{ 200{
201 writesw(reg, data, (count+1) >> 1); 201 iowrite16_rep(reg, data, (count+1) >> 1);
202} 202}
203 203
204static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count) 204static void dm9000_outblk_32bit(void __iomem *reg, void *data, int count)
205{ 205{
206 writesl(reg, data, (count+3) >> 2); 206 iowrite32_rep(reg, data, (count+3) >> 2);
207} 207}
208 208
209/* input block from chip to memory */ 209/* input block from chip to memory */
210 210
211static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count) 211static void dm9000_inblk_8bit(void __iomem *reg, void *data, int count)
212{ 212{
213 readsb(reg, data, count); 213 ioread8_rep(reg, data, count);
214} 214}
215 215
216 216
217static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count) 217static void dm9000_inblk_16bit(void __iomem *reg, void *data, int count)
218{ 218{
219 readsw(reg, data, (count+1) >> 1); 219 ioread16_rep(reg, data, (count+1) >> 1);
220} 220}
221 221
222static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count) 222static void dm9000_inblk_32bit(void __iomem *reg, void *data, int count)
223{ 223{
224 readsl(reg, data, (count+3) >> 2); 224 ioread32_rep(reg, data, (count+3) >> 2);
225} 225}
226 226
227/* dump block from chip to null */ 227/* dump block from chip to null */
@@ -1359,7 +1359,7 @@ static const struct net_device_ops dm9000_netdev_ops = {
1359/* 1359/*
1360 * Search DM9000 board, allocate space and register it 1360 * Search DM9000 board, allocate space and register it
1361 */ 1361 */
1362static int __devinit 1362static int
1363dm9000_probe(struct platform_device *pdev) 1363dm9000_probe(struct platform_device *pdev)
1364{ 1364{
1365 struct dm9000_plat_data *pdata = pdev->dev.platform_data; 1365 struct dm9000_plat_data *pdata = pdev->dev.platform_data;
@@ -1661,7 +1661,7 @@ static const struct dev_pm_ops dm9000_drv_pm_ops = {
1661 .resume = dm9000_drv_resume, 1661 .resume = dm9000_drv_resume,
1662}; 1662};
1663 1663
1664static int __devexit 1664static int
1665dm9000_drv_remove(struct platform_device *pdev) 1665dm9000_drv_remove(struct platform_device *pdev)
1666{ 1666{
1667 struct net_device *ndev = platform_get_drvdata(pdev); 1667 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1683,7 +1683,7 @@ static struct platform_driver dm9000_driver = {
1683 .pm = &dm9000_drv_pm_ops, 1683 .pm = &dm9000_drv_pm_ops,
1684 }, 1684 },
1685 .probe = dm9000_probe, 1685 .probe = dm9000_probe,
1686 .remove = __devexit_p(dm9000_drv_remove), 1686 .remove = dm9000_drv_remove,
1687}; 1687};
1688 1688
1689static int __init 1689static int __init
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
index 17ae8c619680..9f992b95eddc 100644
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ b/drivers/net/ethernet/dec/ewrk3.c
@@ -1910,9 +1910,8 @@ static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
1910static int ndevs; 1910static int ndevs;
1911static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, }; 1911static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
1912 1912
1913/* '21' below should really be 'MAX_NUM_EWRK3S' */
1914module_param_array(io, int, NULL, 0); 1913module_param_array(io, int, NULL, 0);
1915module_param_array(irq, int, NULL, 0); 1914module_param_array(irq, byte, NULL, 0);
1916MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)"); 1915MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
1917MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)"); 1916MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
1918 1917
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 77335853ac36..eaab73cf27ca 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1700,7 +1700,7 @@ static const struct ethtool_ops de_ethtool_ops = {
1700 .get_regs = de_get_regs, 1700 .get_regs = de_get_regs,
1701}; 1701};
1702 1702
1703static void __devinit de21040_get_mac_address (struct de_private *de) 1703static void de21040_get_mac_address(struct de_private *de)
1704{ 1704{
1705 unsigned i; 1705 unsigned i;
1706 1706
@@ -1721,7 +1721,7 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
1721 } 1721 }
1722} 1722}
1723 1723
1724static void __devinit de21040_get_media_info(struct de_private *de) 1724static void de21040_get_media_info(struct de_private *de)
1725{ 1725{
1726 unsigned int i; 1726 unsigned int i;
1727 1727
@@ -1748,7 +1748,8 @@ static void __devinit de21040_get_media_info(struct de_private *de)
1748} 1748}
1749 1749
1750/* Note: this routine returns extra data bits for size detection. */ 1750/* Note: this routine returns extra data bits for size detection. */
1751static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len) 1751static unsigned tulip_read_eeprom(void __iomem *regs, int location,
1752 int addr_len)
1752{ 1753{
1753 int i; 1754 int i;
1754 unsigned retval = 0; 1755 unsigned retval = 0;
@@ -1783,7 +1784,7 @@ static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, in
1783 return retval; 1784 return retval;
1784} 1785}
1785 1786
1786static void __devinit de21041_get_srom_info (struct de_private *de) 1787static void de21041_get_srom_info(struct de_private *de)
1787{ 1788{
1788 unsigned i, sa_offset = 0, ofs; 1789 unsigned i, sa_offset = 0, ofs;
1789 u8 ee_data[DE_EEPROM_SIZE + 6] = {}; 1790 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
@@ -1961,8 +1962,7 @@ static const struct net_device_ops de_netdev_ops = {
1961 .ndo_validate_addr = eth_validate_addr, 1962 .ndo_validate_addr = eth_validate_addr,
1962}; 1963};
1963 1964
1964static int __devinit de_init_one (struct pci_dev *pdev, 1965static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1965 const struct pci_device_id *ent)
1966{ 1966{
1967 struct net_device *dev; 1967 struct net_device *dev;
1968 struct de_private *de; 1968 struct de_private *de;
@@ -2099,7 +2099,7 @@ err_out_free:
2099 return rc; 2099 return rc;
2100} 2100}
2101 2101
2102static void __devexit de_remove_one (struct pci_dev *pdev) 2102static void de_remove_one(struct pci_dev *pdev)
2103{ 2103{
2104 struct net_device *dev = pci_get_drvdata(pdev); 2104 struct net_device *dev = pci_get_drvdata(pdev);
2105 struct de_private *de = netdev_priv(dev); 2105 struct de_private *de = netdev_priv(dev);
@@ -2184,7 +2184,7 @@ static struct pci_driver de_driver = {
2184 .name = DRV_NAME, 2184 .name = DRV_NAME,
2185 .id_table = de_pci_tbl, 2185 .id_table = de_pci_tbl,
2186 .probe = de_init_one, 2186 .probe = de_init_one,
2187 .remove = __devexit_p(de_remove_one), 2187 .remove = de_remove_one,
2188#ifdef CONFIG_PM 2188#ifdef CONFIG_PM
2189 .suspend = de_suspend, 2189 .suspend = de_suspend,
2190 .resume = de_resume, 2190 .resume = de_resume,
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f879e9224846..4c830030fb06 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -479,7 +479,7 @@
479 479
480#include "de4x5.h" 480#include "de4x5.h"
481 481
482static const char version[] __devinitconst = 482static const char version[] =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; 483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484 484
485#define c_char const char 485#define c_char const char
@@ -1092,7 +1092,7 @@ static const struct net_device_ops de4x5_netdev_ops = {
1092}; 1092};
1093 1093
1094 1094
1095static int __devinit 1095static int
1096de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) 1096de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1097{ 1097{
1098 char name[DE4X5_NAME_LENGTH + 1]; 1098 char name[DE4X5_NAME_LENGTH + 1];
@@ -2077,7 +2077,7 @@ static int __init de4x5_eisa_probe (struct device *gendev)
2077 return status; 2077 return status;
2078} 2078}
2079 2079
2080static int __devexit de4x5_eisa_remove (struct device *device) 2080static int de4x5_eisa_remove(struct device *device)
2081{ 2081{
2082 struct net_device *dev; 2082 struct net_device *dev;
2083 u_long iobase; 2083 u_long iobase;
@@ -2104,7 +2104,7 @@ static struct eisa_driver de4x5_eisa_driver = {
2104 .driver = { 2104 .driver = {
2105 .name = "de4x5", 2105 .name = "de4x5",
2106 .probe = de4x5_eisa_probe, 2106 .probe = de4x5_eisa_probe,
2107 .remove = __devexit_p (de4x5_eisa_remove), 2107 .remove = de4x5_eisa_remove,
2108 } 2108 }
2109}; 2109};
2110MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); 2110MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
@@ -2118,7 +2118,7 @@ MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2118** DECchips, we can find the base SROM irrespective of the BIOS scan direction. 2118** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
2119** For single port cards this is a time waster... 2119** For single port cards this is a time waster...
2120*/ 2120*/
2121static void __devinit 2121static void
2122srom_search(struct net_device *dev, struct pci_dev *pdev) 2122srom_search(struct net_device *dev, struct pci_dev *pdev)
2123{ 2123{
2124 u_char pb; 2124 u_char pb;
@@ -2192,8 +2192,8 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
2192** kernels use the V0.535[n] drivers. 2192** kernels use the V0.535[n] drivers.
2193*/ 2193*/
2194 2194
2195static int __devinit de4x5_pci_probe (struct pci_dev *pdev, 2195static int de4x5_pci_probe(struct pci_dev *pdev,
2196 const struct pci_device_id *ent) 2196 const struct pci_device_id *ent)
2197{ 2197{
2198 u_char pb, pbus = 0, dev_num, dnum = 0, timer; 2198 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2199 u_short vendor, status; 2199 u_short vendor, status;
@@ -2314,7 +2314,7 @@ static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
2314 return error; 2314 return error;
2315} 2315}
2316 2316
2317static void __devexit de4x5_pci_remove (struct pci_dev *pdev) 2317static void de4x5_pci_remove(struct pci_dev *pdev)
2318{ 2318{
2319 struct net_device *dev; 2319 struct net_device *dev;
2320 u_long iobase; 2320 u_long iobase;
@@ -2344,7 +2344,7 @@ static struct pci_driver de4x5_pci_driver = {
2344 .name = "de4x5", 2344 .name = "de4x5",
2345 .id_table = de4x5_pci_tbl, 2345 .id_table = de4x5_pci_tbl,
2346 .probe = de4x5_pci_probe, 2346 .probe = de4x5_pci_probe,
2347 .remove = __devexit_p (de4x5_pci_remove), 2347 .remove = de4x5_pci_remove,
2348}; 2348};
2349 2349
2350#endif 2350#endif
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index d23755ea9bc7..83139307861c 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -291,8 +291,8 @@ enum dmfe_CR6_bits {
291}; 291};
292 292
293/* Global variable declaration ----------------------------- */ 293/* Global variable declaration ----------------------------- */
294static int __devinitdata printed_version; 294static int printed_version;
295static const char version[] __devinitconst = 295static const char version[] =
296 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")"; 296 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
297 297
298static int dmfe_debug; 298static int dmfe_debug;
@@ -367,8 +367,7 @@ static const struct net_device_ops netdev_ops = {
367 * Search DM910X board ,allocate space and register it 367 * Search DM910X board ,allocate space and register it
368 */ 368 */
369 369
370static int __devinit dmfe_init_one (struct pci_dev *pdev, 370static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
371 const struct pci_device_id *ent)
372{ 371{
373 struct dmfe_board_info *db; /* board information structure */ 372 struct dmfe_board_info *db; /* board information structure */
374 struct net_device *dev; 373 struct net_device *dev;
@@ -531,7 +530,7 @@ err_out_free:
531} 530}
532 531
533 532
534static void __devexit dmfe_remove_one (struct pci_dev *pdev) 533static void dmfe_remove_one(struct pci_dev *pdev)
535{ 534{
536 struct net_device *dev = pci_get_drvdata(pdev); 535 struct net_device *dev = pci_get_drvdata(pdev);
537 struct dmfe_board_info *db = netdev_priv(dev); 536 struct dmfe_board_info *db = netdev_priv(dev);
@@ -2187,7 +2186,7 @@ static struct pci_driver dmfe_driver = {
2187 .name = "dmfe", 2186 .name = "dmfe",
2188 .id_table = dmfe_pci_tbl, 2187 .id_table = dmfe_pci_tbl,
2189 .probe = dmfe_init_one, 2188 .probe = dmfe_init_one,
2190 .remove = __devexit_p(dmfe_remove_one), 2189 .remove = dmfe_remove_one,
2191 .suspend = dmfe_suspend, 2190 .suspend = dmfe_suspend,
2192 .resume = dmfe_resume 2191 .resume = dmfe_resume
2193}; 2192};
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index 44f7e8e82d85..df5a892fb49c 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -26,7 +26,7 @@
26 */ 26 */
27 27
28/* Known cards that have old-style EEPROMs. */ 28/* Known cards that have old-style EEPROMs. */
29static struct eeprom_fixup eeprom_fixups[] __devinitdata = { 29static struct eeprom_fixup eeprom_fixups[] = {
30 {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c, 30 {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
31 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }}, 31 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
32 {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f, 32 {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
79 {NULL}}; 79 {NULL}};
80 80
81 81
82static const char *const block_name[] __devinitconst = { 82static const char *const block_name[] = {
83 "21140 non-MII", 83 "21140 non-MII",
84 "21140 MII PHY", 84 "21140 MII PHY",
85 "21142 Serial PHY", 85 "21142 Serial PHY",
@@ -102,7 +102,7 @@ static const char *const block_name[] __devinitconst = {
102 * #ifdef __hppa__ should completely optimize this function away for 102 * #ifdef __hppa__ should completely optimize this function away for
103 * non-parisc hardware. 103 * non-parisc hardware.
104 */ 104 */
105static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp) 105static void tulip_build_fake_mediatable(struct tulip_private *tp)
106{ 106{
107#ifdef CONFIG_GSC 107#ifdef CONFIG_GSC
108 if (tp->flags & NEEDS_FAKE_MEDIA_TABLE) { 108 if (tp->flags & NEEDS_FAKE_MEDIA_TABLE) {
@@ -140,7 +140,7 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
140#endif 140#endif
141} 141}
142 142
143void __devinit tulip_parse_eeprom(struct net_device *dev) 143void tulip_parse_eeprom(struct net_device *dev)
144{ 144{
145 /* 145 /*
146 dev is not registered at this point, so logging messages can't 146 dev is not registered at this point, so logging messages can't
@@ -339,7 +339,7 @@ subsequent_board:
339#define EE_READ_CMD (6) 339#define EE_READ_CMD (6)
340 340
341/* Note: this routine returns extra data bits for size detection. */ 341/* Note: this routine returns extra data bits for size detection. */
342int __devinit tulip_read_eeprom(struct net_device *dev, int location, int addr_len) 342int tulip_read_eeprom(struct net_device *dev, int location, int addr_len)
343{ 343{
344 int i; 344 int i;
345 unsigned retval = 0; 345 unsigned retval = 0;
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index ae937c6749e7..93a4afaa09f1 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -447,7 +447,7 @@ int tulip_check_duplex(struct net_device *dev)
447 return 0; 447 return 0;
448} 448}
449 449
450void __devinit tulip_find_mii (struct net_device *dev, int board_idx) 450void tulip_find_mii(struct net_device *dev, int board_idx)
451{ 451{
452 struct tulip_private *tp = netdev_priv(dev); 452 struct tulip_private *tp = netdev_priv(dev);
453 int phyn, phy_idx = 0; 453 int phyn, phy_idx = 0;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 885700a19978..1e9443d9fb57 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -37,7 +37,7 @@
37#include <asm/prom.h> 37#include <asm/prom.h>
38#endif 38#endif
39 39
40static char version[] __devinitdata = 40static char version[] =
41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n"; 41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42 42
43/* A few user-configurable values. */ 43/* A few user-configurable values. */
@@ -1191,8 +1191,7 @@ static void set_rx_mode(struct net_device *dev)
1191} 1191}
1192 1192
1193#ifdef CONFIG_TULIP_MWI 1193#ifdef CONFIG_TULIP_MWI
1194static void __devinit tulip_mwi_config (struct pci_dev *pdev, 1194static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1195 struct net_device *dev)
1196{ 1195{
1197 struct tulip_private *tp = netdev_priv(dev); 1196 struct tulip_private *tp = netdev_priv(dev);
1198 u8 cache; 1197 u8 cache;
@@ -1301,8 +1300,7 @@ DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
1301 { }, 1300 { },
1302}; 1301};
1303 1302
1304static int __devinit tulip_init_one (struct pci_dev *pdev, 1303static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1305 const struct pci_device_id *ent)
1306{ 1304{
1307 struct tulip_private *tp; 1305 struct tulip_private *tp;
1308 /* See note below on the multiport cards. */ 1306 /* See note below on the multiport cards. */
@@ -1927,7 +1925,7 @@ static int tulip_resume(struct pci_dev *pdev)
1927#endif /* CONFIG_PM */ 1925#endif /* CONFIG_PM */
1928 1926
1929 1927
1930static void __devexit tulip_remove_one (struct pci_dev *pdev) 1928static void tulip_remove_one(struct pci_dev *pdev)
1931{ 1929{
1932 struct net_device *dev = pci_get_drvdata (pdev); 1930 struct net_device *dev = pci_get_drvdata (pdev);
1933 struct tulip_private *tp; 1931 struct tulip_private *tp;
@@ -1974,7 +1972,7 @@ static struct pci_driver tulip_driver = {
1974 .name = DRV_NAME, 1972 .name = DRV_NAME,
1975 .id_table = tulip_pci_tbl, 1973 .id_table = tulip_pci_tbl,
1976 .probe = tulip_init_one, 1974 .probe = tulip_init_one,
1977 .remove = __devexit_p(tulip_remove_one), 1975 .remove = tulip_remove_one,
1978#ifdef CONFIG_PM 1976#ifdef CONFIG_PM
1979 .suspend = tulip_suspend, 1977 .suspend = tulip_suspend,
1980 .resume = tulip_resume, 1978 .resume = tulip_resume,
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 75d45f8a37dc..93845afe1cea 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -204,8 +204,8 @@ enum uli526x_CR6_bits {
204}; 204};
205 205
206/* Global variable declaration ----------------------------- */ 206/* Global variable declaration ----------------------------- */
207static int __devinitdata printed_version; 207static int printed_version;
208static const char version[] __devinitconst = 208static const char version[] =
209 "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")"; 209 "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
210 210
211static int uli526x_debug; 211static int uli526x_debug;
@@ -281,8 +281,8 @@ static const struct net_device_ops netdev_ops = {
281 * Search ULI526X board, allocate space and register it 281 * Search ULI526X board, allocate space and register it
282 */ 282 */
283 283
284static int __devinit uli526x_init_one (struct pci_dev *pdev, 284static int uli526x_init_one(struct pci_dev *pdev,
285 const struct pci_device_id *ent) 285 const struct pci_device_id *ent)
286{ 286{
287 struct uli526x_board_info *db; /* board information structure */ 287 struct uli526x_board_info *db; /* board information structure */
288 struct net_device *dev; 288 struct net_device *dev;
@@ -436,7 +436,7 @@ err_out_free:
436} 436}
437 437
438 438
439static void __devexit uli526x_remove_one (struct pci_dev *pdev) 439static void uli526x_remove_one(struct pci_dev *pdev)
440{ 440{
441 struct net_device *dev = pci_get_drvdata(pdev); 441 struct net_device *dev = pci_get_drvdata(pdev);
442 struct uli526x_board_info *db = netdev_priv(dev); 442 struct uli526x_board_info *db = netdev_priv(dev);
@@ -1788,7 +1788,7 @@ static struct pci_driver uli526x_driver = {
1788 .name = "uli526x", 1788 .name = "uli526x",
1789 .id_table = uli526x_pci_tbl, 1789 .id_table = uli526x_pci_tbl,
1790 .probe = uli526x_init_one, 1790 .probe = uli526x_init_one,
1791 .remove = __devexit_p(uli526x_remove_one), 1791 .remove = uli526x_remove_one,
1792 .suspend = uli526x_suspend, 1792 .suspend = uli526x_suspend,
1793 .resume = uli526x_resume, 1793 .resume = uli526x_resume,
1794}; 1794};
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 7c1ec4d7920b..c7b04ecf5b49 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -236,7 +236,7 @@ struct pci_id_info {
236 int drv_flags; /* Driver use, intended as capability flags. */ 236 int drv_flags; /* Driver use, intended as capability flags. */
237}; 237};
238 238
239static const struct pci_id_info pci_id_tbl[] __devinitconst = { 239static const struct pci_id_info pci_id_tbl[] = {
240 { /* Sometime a Level-One switch card. */ 240 { /* Sometime a Level-One switch card. */
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, 241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx}, 242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
@@ -358,8 +358,7 @@ static const struct net_device_ops netdev_ops = {
358 .ndo_validate_addr = eth_validate_addr, 358 .ndo_validate_addr = eth_validate_addr,
359}; 359};
360 360
361static int __devinit w840_probe1 (struct pci_dev *pdev, 361static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
362 const struct pci_device_id *ent)
363{ 362{
364 struct net_device *dev; 363 struct net_device *dev;
365 struct netdev_private *np; 364 struct netdev_private *np;
@@ -1532,7 +1531,7 @@ static int netdev_close(struct net_device *dev)
1532 return 0; 1531 return 0;
1533} 1532}
1534 1533
1535static void __devexit w840_remove1 (struct pci_dev *pdev) 1534static void w840_remove1(struct pci_dev *pdev)
1536{ 1535{
1537 struct net_device *dev = pci_get_drvdata(pdev); 1536 struct net_device *dev = pci_get_drvdata(pdev);
1538 1537
@@ -1647,7 +1646,7 @@ static struct pci_driver w840_driver = {
1647 .name = DRV_NAME, 1646 .name = DRV_NAME,
1648 .id_table = w840_pci_tbl, 1647 .id_table = w840_pci_tbl,
1649 .probe = w840_probe1, 1648 .probe = w840_probe1,
1650 .remove = __devexit_p(w840_remove1), 1649 .remove = w840_remove1,
1651#ifdef CONFIG_PM 1650#ifdef CONFIG_PM
1652 .suspend = w840_suspend, 1651 .suspend = w840_suspend,
1653 .resume = w840_resume, 1652 .resume = w840_resume,
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 138bf83bc98e..88feced9a629 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -148,7 +148,7 @@ static struct pci_driver xircom_ops = {
148 .name = "xircom_cb", 148 .name = "xircom_cb",
149 .id_table = xircom_pci_table, 149 .id_table = xircom_pci_table,
150 .probe = xircom_probe, 150 .probe = xircom_probe,
151 .remove = __devexit_p(xircom_remove), 151 .remove = xircom_remove,
152}; 152};
153 153
154 154
@@ -190,7 +190,7 @@ static const struct net_device_ops netdev_ops = {
190 first two packets that get send, and pump hates that. 190 first two packets that get send, and pump hates that.
191 191
192 */ 192 */
193static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) 193static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
194{ 194{
195 struct device *d = &pdev->dev; 195 struct device *d = &pdev->dev;
196 struct net_device *dev = NULL; 196 struct net_device *dev = NULL;
@@ -312,7 +312,7 @@ err_disable:
312 Interrupts and such are already stopped in the "ifconfig ethX down" 312 Interrupts and such are already stopped in the "ifconfig ethX down"
313 code. 313 code.
314 */ 314 */
315static void __devexit xircom_remove(struct pci_dev *pdev) 315static void xircom_remove(struct pci_dev *pdev)
316{ 316{
317 struct net_device *dev = pci_get_drvdata(pdev); 317 struct net_device *dev = pci_get_drvdata(pdev);
318 struct xircom_private *card = netdev_priv(dev); 318 struct xircom_private *card = netdev_priv(dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a059f0c27e28..1d342d37915c 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -23,7 +23,7 @@
23#define dr16(reg) ioread16(ioaddr + (reg)) 23#define dr16(reg) ioread16(ioaddr + (reg))
24#define dr8(reg) ioread8(ioaddr + (reg)) 24#define dr8(reg) ioread8(ioaddr + (reg))
25 25
26static char version[] __devinitdata = 26static char version[] =
27 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 27 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
28#define MAX_UNITS 8 28#define MAX_UNITS 8
29static int mtu[MAX_UNITS]; 29static int mtu[MAX_UNITS];
@@ -110,7 +110,7 @@ static const struct net_device_ops netdev_ops = {
110 .ndo_change_mtu = change_mtu, 110 .ndo_change_mtu = change_mtu,
111}; 111};
112 112
113static int __devinit 113static int
114rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) 114rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
115{ 115{
116 struct net_device *dev; 116 struct net_device *dev;
@@ -1727,7 +1727,7 @@ rio_close (struct net_device *dev)
1727 return 0; 1727 return 0;
1728} 1728}
1729 1729
1730static void __devexit 1730static void
1731rio_remove1 (struct pci_dev *pdev) 1731rio_remove1 (struct pci_dev *pdev)
1732{ 1732{
1733 struct net_device *dev = pci_get_drvdata (pdev); 1733 struct net_device *dev = pci_get_drvdata (pdev);
@@ -1755,24 +1755,10 @@ static struct pci_driver rio_driver = {
1755 .name = "dl2k", 1755 .name = "dl2k",
1756 .id_table = rio_pci_tbl, 1756 .id_table = rio_pci_tbl,
1757 .probe = rio_probe1, 1757 .probe = rio_probe1,
1758 .remove = __devexit_p(rio_remove1), 1758 .remove = rio_remove1,
1759}; 1759};
1760 1760
1761static int __init 1761module_pci_driver(rio_driver);
1762rio_init (void)
1763{
1764 return pci_register_driver(&rio_driver);
1765}
1766
1767static void __exit
1768rio_exit (void)
1769{
1770 pci_unregister_driver (&rio_driver);
1771}
1772
1773module_init (rio_init);
1774module_exit (rio_exit);
1775
1776/* 1762/*
1777 1763
1778Compile command: 1764Compile command:
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 3b83588e51f6..28fc11b2f1ea 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -102,7 +102,7 @@ static char *media[MAX_UNITS];
102#include <linux/mii.h> 102#include <linux/mii.h>
103 103
104/* These identify the driver base version and may not be removed. */ 104/* These identify the driver base version and may not be removed. */
105static const char version[] __devinitconst = 105static const char version[] =
106 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE 106 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
107 " Written by Donald Becker\n"; 107 " Written by Donald Becker\n";
108 108
@@ -218,7 +218,7 @@ enum {
218struct pci_id_info { 218struct pci_id_info {
219 const char *name; 219 const char *name;
220}; 220};
221static const struct pci_id_info pci_id_tbl[] __devinitconst = { 221static const struct pci_id_info pci_id_tbl[] = {
222 {"D-Link DFE-550TX FAST Ethernet Adapter"}, 222 {"D-Link DFE-550TX FAST Ethernet Adapter"},
223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, 223 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
224 {"D-Link DFE-580TX 4 port Server Adapter"}, 224 {"D-Link DFE-580TX 4 port Server Adapter"},
@@ -259,6 +259,7 @@ enum alta_offsets {
259 EECtrl = 0x36, 259 EECtrl = 0x36,
260 FlashAddr = 0x40, 260 FlashAddr = 0x40,
261 FlashData = 0x44, 261 FlashData = 0x44,
262 WakeEvent = 0x45,
262 TxStatus = 0x46, 263 TxStatus = 0x46,
263 TxFrameId = 0x47, 264 TxFrameId = 0x47,
264 DownCounter = 0x18, 265 DownCounter = 0x18,
@@ -333,6 +334,14 @@ enum mac_ctrl1_bits {
333 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000, 334 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
334}; 335};
335 336
337/* Bits in WakeEvent register. */
338enum wake_event_bits {
339 WakePktEnable = 0x01,
340 MagicPktEnable = 0x02,
341 LinkEventEnable = 0x04,
342 WolEnable = 0x80,
343};
344
336/* The Rx and Tx buffer descriptors. */ 345/* The Rx and Tx buffer descriptors. */
337/* Note that using only 32 bit fields simplifies conversion to big-endian 346/* Note that using only 32 bit fields simplifies conversion to big-endian
338 architectures. */ 347 architectures. */
@@ -392,6 +401,7 @@ struct netdev_private {
392 unsigned int default_port:4; /* Last dev->if_port value. */ 401 unsigned int default_port:4; /* Last dev->if_port value. */
393 unsigned int an_enable:1; 402 unsigned int an_enable:1;
394 unsigned int speed; 403 unsigned int speed;
404 unsigned int wol_enabled:1; /* Wake on LAN enabled */
395 struct tasklet_struct rx_tasklet; 405 struct tasklet_struct rx_tasklet;
396 struct tasklet_struct tx_tasklet; 406 struct tasklet_struct tx_tasklet;
397 int budget; 407 int budget;
@@ -472,8 +482,8 @@ static const struct net_device_ops netdev_ops = {
472 .ndo_validate_addr = eth_validate_addr, 482 .ndo_validate_addr = eth_validate_addr,
473}; 483};
474 484
475static int __devinit sundance_probe1 (struct pci_dev *pdev, 485static int sundance_probe1(struct pci_dev *pdev,
476 const struct pci_device_id *ent) 486 const struct pci_device_id *ent)
477{ 487{
478 struct net_device *dev; 488 struct net_device *dev;
479 struct netdev_private *np; 489 struct netdev_private *np;
@@ -701,7 +711,7 @@ static int change_mtu(struct net_device *dev, int new_mtu)
701 711
702#define eeprom_delay(ee_addr) ioread32(ee_addr) 712#define eeprom_delay(ee_addr) ioread32(ee_addr)
703/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */ 713/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
704static int __devinit eeprom_read(void __iomem *ioaddr, int location) 714static int eeprom_read(void __iomem *ioaddr, int location)
705{ 715{
706 int boguscnt = 10000; /* Typical 1900 ticks. */ 716 int boguscnt = 10000; /* Typical 1900 ticks. */
707 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl); 717 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
@@ -829,7 +839,7 @@ static int netdev_open(struct net_device *dev)
829 unsigned long flags; 839 unsigned long flags;
830 int i; 840 int i;
831 841
832 /* Do we need to reset the chip??? */ 842 sundance_reset(dev, 0x00ff << 16);
833 843
834 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev); 844 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
835 if (i) 845 if (i)
@@ -877,6 +887,10 @@ static int netdev_open(struct net_device *dev)
877 887
878 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); 888 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
879 889
890 /* Disable Wol */
891 iowrite8(ioread8(ioaddr + WakeEvent) | 0x00, ioaddr + WakeEvent);
892 np->wol_enabled = 0;
893
880 if (netif_msg_ifup(np)) 894 if (netif_msg_ifup(np))
881 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " 895 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
882 "MAC Control %x, %4.4x %4.4x.\n", 896 "MAC Control %x, %4.4x %4.4x.\n",
@@ -1715,6 +1729,60 @@ static void get_ethtool_stats(struct net_device *dev,
1715 data[i++] = np->xstats.rx_mcasts; 1729 data[i++] = np->xstats.rx_mcasts;
1716} 1730}
1717 1731
1732#ifdef CONFIG_PM
1733
1734static void sundance_get_wol(struct net_device *dev,
1735 struct ethtool_wolinfo *wol)
1736{
1737 struct netdev_private *np = netdev_priv(dev);
1738 void __iomem *ioaddr = np->base;
1739 u8 wol_bits;
1740
1741 wol->wolopts = 0;
1742
1743 wol->supported = (WAKE_PHY | WAKE_MAGIC);
1744 if (!np->wol_enabled)
1745 return;
1746
1747 wol_bits = ioread8(ioaddr + WakeEvent);
1748 if (wol_bits & MagicPktEnable)
1749 wol->wolopts |= WAKE_MAGIC;
1750 if (wol_bits & LinkEventEnable)
1751 wol->wolopts |= WAKE_PHY;
1752}
1753
1754static int sundance_set_wol(struct net_device *dev,
1755 struct ethtool_wolinfo *wol)
1756{
1757 struct netdev_private *np = netdev_priv(dev);
1758 void __iomem *ioaddr = np->base;
1759 u8 wol_bits;
1760
1761 if (!device_can_wakeup(&np->pci_dev->dev))
1762 return -EOPNOTSUPP;
1763
1764 np->wol_enabled = !!(wol->wolopts);
1765 wol_bits = ioread8(ioaddr + WakeEvent);
1766 wol_bits &= ~(WakePktEnable | MagicPktEnable |
1767 LinkEventEnable | WolEnable);
1768
1769 if (np->wol_enabled) {
1770 if (wol->wolopts & WAKE_MAGIC)
1771 wol_bits |= (MagicPktEnable | WolEnable);
1772 if (wol->wolopts & WAKE_PHY)
1773 wol_bits |= (LinkEventEnable | WolEnable);
1774 }
1775 iowrite8(wol_bits, ioaddr + WakeEvent);
1776
1777 device_set_wakeup_enable(&np->pci_dev->dev, np->wol_enabled);
1778
1779 return 0;
1780}
1781#else
1782#define sundance_get_wol NULL
1783#define sundance_set_wol NULL
1784#endif /* CONFIG_PM */
1785
1718static const struct ethtool_ops ethtool_ops = { 1786static const struct ethtool_ops ethtool_ops = {
1719 .begin = check_if_running, 1787 .begin = check_if_running,
1720 .get_drvinfo = get_drvinfo, 1788 .get_drvinfo = get_drvinfo,
@@ -1722,6 +1790,8 @@ static const struct ethtool_ops ethtool_ops = {
1722 .set_settings = set_settings, 1790 .set_settings = set_settings,
1723 .nway_reset = nway_reset, 1791 .nway_reset = nway_reset,
1724 .get_link = get_link, 1792 .get_link = get_link,
1793 .get_wol = sundance_get_wol,
1794 .set_wol = sundance_set_wol,
1725 .get_msglevel = get_msglevel, 1795 .get_msglevel = get_msglevel,
1726 .set_msglevel = set_msglevel, 1796 .set_msglevel = set_msglevel,
1727 .get_strings = get_strings, 1797 .get_strings = get_strings,
@@ -1844,7 +1914,7 @@ static int netdev_close(struct net_device *dev)
1844 return 0; 1914 return 0;
1845} 1915}
1846 1916
1847static void __devexit sundance_remove1 (struct pci_dev *pdev) 1917static void sundance_remove1(struct pci_dev *pdev)
1848{ 1918{
1849 struct net_device *dev = pci_get_drvdata(pdev); 1919 struct net_device *dev = pci_get_drvdata(pdev);
1850 1920
@@ -1867,6 +1937,8 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev)
1867static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) 1937static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1868{ 1938{
1869 struct net_device *dev = pci_get_drvdata(pci_dev); 1939 struct net_device *dev = pci_get_drvdata(pci_dev);
1940 struct netdev_private *np = netdev_priv(dev);
1941 void __iomem *ioaddr = np->base;
1870 1942
1871 if (!netif_running(dev)) 1943 if (!netif_running(dev))
1872 return 0; 1944 return 0;
@@ -1875,6 +1947,12 @@ static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
1875 netif_device_detach(dev); 1947 netif_device_detach(dev);
1876 1948
1877 pci_save_state(pci_dev); 1949 pci_save_state(pci_dev);
1950 if (np->wol_enabled) {
1951 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1952 iowrite16(RxEnable, ioaddr + MACCtrl1);
1953 }
1954 pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
1955 np->wol_enabled);
1878 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 1956 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
1879 1957
1880 return 0; 1958 return 0;
@@ -1890,6 +1968,7 @@ static int sundance_resume(struct pci_dev *pci_dev)
1890 1968
1891 pci_set_power_state(pci_dev, PCI_D0); 1969 pci_set_power_state(pci_dev, PCI_D0);
1892 pci_restore_state(pci_dev); 1970 pci_restore_state(pci_dev);
1971 pci_enable_wake(pci_dev, PCI_D0, 0);
1893 1972
1894 err = netdev_open(dev); 1973 err = netdev_open(dev);
1895 if (err) { 1974 if (err) {
@@ -1910,7 +1989,7 @@ static struct pci_driver sundance_driver = {
1910 .name = DRV_NAME, 1989 .name = DRV_NAME,
1911 .id_table = sundance_pci_tbl, 1990 .id_table = sundance_pci_tbl,
1912 .probe = sundance_probe1, 1991 .probe = sundance_probe1,
1913 .remove = __devexit_p(sundance_remove1), 1992 .remove = sundance_remove1,
1914#ifdef CONFIG_PM 1993#ifdef CONFIG_PM
1915 .suspend = sundance_suspend, 1994 .suspend = sundance_suspend,
1916 .resume = sundance_resume, 1995 .resume = sundance_resume,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 290b26f868c9..2c177b329c8b 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -72,7 +72,7 @@ static void __dnet_set_hwaddr(struct dnet *bp)
72 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); 72 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
73} 73}
74 74
75static void __devinit dnet_get_hwaddr(struct dnet *bp) 75static void dnet_get_hwaddr(struct dnet *bp)
76{ 76{
77 u16 tmp; 77 u16 tmp;
78 u8 addr[6]; 78 u8 addr[6];
@@ -664,9 +664,6 @@ static int dnet_open(struct net_device *dev)
664 if (!bp->phy_dev) 664 if (!bp->phy_dev)
665 return -EAGAIN; 665 return -EAGAIN;
666 666
667 if (!is_valid_ether_addr(dev->dev_addr))
668 return -EADDRNOTAVAIL;
669
670 napi_enable(&bp->napi); 667 napi_enable(&bp->napi);
671 dnet_init_hw(bp); 668 dnet_init_hw(bp);
672 669
@@ -829,7 +826,7 @@ static const struct net_device_ops dnet_netdev_ops = {
829 .ndo_change_mtu = eth_change_mtu, 826 .ndo_change_mtu = eth_change_mtu,
830}; 827};
831 828
832static int __devinit dnet_probe(struct platform_device *pdev) 829static int dnet_probe(struct platform_device *pdev)
833{ 830{
834 struct resource *res; 831 struct resource *res;
835 struct net_device *dev; 832 struct net_device *dev;
@@ -945,7 +942,7 @@ err_out:
945 return err; 942 return err;
946} 943}
947 944
948static int __devexit dnet_remove(struct platform_device *pdev) 945static int dnet_remove(struct platform_device *pdev)
949{ 946{
950 947
951 struct net_device *dev; 948 struct net_device *dev;
@@ -971,7 +968,7 @@ static int __devexit dnet_remove(struct platform_device *pdev)
971 968
972static struct platform_driver dnet_driver = { 969static struct platform_driver dnet_driver = {
973 .probe = dnet_probe, 970 .probe = dnet_probe,
974 .remove = __devexit_p(dnet_remove), 971 .remove = dnet_remove,
975 .driver = { 972 .driver = {
976 .name = "dnet", 973 .name = "dnet",
977 }, 974 },
diff --git a/drivers/net/ethernet/emulex/Kconfig b/drivers/net/ethernet/emulex/Kconfig
index 7a28a6433944..1b8d638c6cb1 100644
--- a/drivers/net/ethernet/emulex/Kconfig
+++ b/drivers/net/ethernet/emulex/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_EMULEX 5config NET_VENDOR_EMULEX
6 bool "Emulex devices" 6 bool "Emulex devices"
7 default y 7 default y
8 depends on PCI && INET 8 depends on PCI
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 804db04a2bd0..231129dd1764 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -1,6 +1,6 @@
1config BE2NET 1config BE2NET
2 tristate "ServerEngines' 10Gbps NIC - BladeEngine" 2 tristate "ServerEngines' 10Gbps NIC - BladeEngine"
3 depends on PCI && INET 3 depends on PCI
4 ---help--- 4 ---help---
5 This driver implements the NIC functionality for ServerEngines' 5 This driver implements the NIC functionality for ServerEngines'
6 10Gbps network adapter - BladeEngine. 6 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf4c05bdf5fe..abf26c7c1d19 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "4.4.31.0u" 37#define DRV_VER "4.4.161.0u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 39#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 40#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -53,6 +53,7 @@
53#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */ 53#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
54#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */ 54#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
55#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */ 55#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
56#define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */
56#define OC_SUBSYS_DEVICE_ID1 0xE602 57#define OC_SUBSYS_DEVICE_ID1 0xE602
57#define OC_SUBSYS_DEVICE_ID2 0xE642 58#define OC_SUBSYS_DEVICE_ID2 0xE642
58#define OC_SUBSYS_DEVICE_ID3 0xE612 59#define OC_SUBSYS_DEVICE_ID3 0xE612
@@ -71,6 +72,7 @@ static inline char *nic_name(struct pci_dev *pdev)
71 case BE_DEVICE_ID2: 72 case BE_DEVICE_ID2:
72 return BE3_NAME; 73 return BE3_NAME;
73 case OC_DEVICE_ID5: 74 case OC_DEVICE_ID5:
75 case OC_DEVICE_ID6:
74 return OC_NAME_SH; 76 return OC_NAME_SH;
75 default: 77 default:
76 return BE_NAME; 78 return BE_NAME;
@@ -346,7 +348,6 @@ struct be_adapter {
346 struct pci_dev *pdev; 348 struct pci_dev *pdev;
347 struct net_device *netdev; 349 struct net_device *netdev;
348 350
349 u8 __iomem *csr;
350 u8 __iomem *db; /* Door Bell */ 351 u8 __iomem *db; /* Door Bell */
351 352
352 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 353 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
@@ -374,11 +375,8 @@ struct be_adapter {
374 struct be_rx_obj rx_obj[MAX_RX_QS]; 375 struct be_rx_obj rx_obj[MAX_RX_QS];
375 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 376 u32 big_page_size; /* Compounded page size shared by rx wrbs */
376 377
377 u8 eq_next_idx;
378 struct be_drv_stats drv_stats; 378 struct be_drv_stats drv_stats;
379
380 u16 vlans_added; 379 u16 vlans_added;
381 u16 max_vlans; /* Number of vlans supported */
382 u8 vlan_tag[VLAN_N_VID]; 380 u8 vlan_tag[VLAN_N_VID];
383 u8 vlan_prio_bmap; /* Available Priority BitMap */ 381 u8 vlan_prio_bmap; /* Available Priority BitMap */
384 u16 recommended_prio; /* Recommended Priority */ 382 u16 recommended_prio; /* Recommended Priority */
@@ -391,6 +389,7 @@ struct be_adapter {
391 389
392 struct delayed_work func_recovery_work; 390 struct delayed_work func_recovery_work;
393 u32 flags; 391 u32 flags;
392 u32 cmd_privileges;
394 /* Ethtool knobs and info */ 393 /* Ethtool knobs and info */
395 char fw_ver[FW_VER_LEN]; 394 char fw_ver[FW_VER_LEN];
396 int if_handle; /* Used to configure filtering */ 395 int if_handle; /* Used to configure filtering */
@@ -408,10 +407,8 @@ struct be_adapter {
408 u32 rx_fc; /* Rx flow control */ 407 u32 rx_fc; /* Rx flow control */
409 u32 tx_fc; /* Tx flow control */ 408 u32 tx_fc; /* Tx flow control */
410 bool stats_cmd_sent; 409 bool stats_cmd_sent;
411 u8 generation; /* BladeEngine ASIC generation */
412 u32 if_type; 410 u32 if_type;
413 struct { 411 struct {
414 u8 __iomem *base; /* Door Bell */
415 u32 size; 412 u32 size;
416 u32 total_size; 413 u32 total_size;
417 u64 io_addr; 414 u64 io_addr;
@@ -434,10 +431,18 @@ struct be_adapter {
434 struct phy_info phy; 431 struct phy_info phy;
435 u8 wol_cap; 432 u8 wol_cap;
436 bool wol; 433 bool wol;
437 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
438 u32 uc_macs; /* Count of secondary UC MAC programmed */ 434 u32 uc_macs; /* Count of secondary UC MAC programmed */
439 u32 msg_enable; 435 u32 msg_enable;
440 int be_get_temp_freq; 436 int be_get_temp_freq;
437 u16 max_mcast_mac;
438 u16 max_tx_queues;
439 u16 max_rss_queues;
440 u16 max_rx_queues;
441 u16 max_pmac_cnt;
442 u16 max_vlans;
443 u16 max_event_queues;
444 u32 if_cap_flags;
445 u8 pf_number;
441}; 446};
442 447
443#define be_physfn(adapter) (!adapter->virtfn) 448#define be_physfn(adapter) (!adapter->virtfn)
@@ -448,21 +453,25 @@ struct be_adapter {
448 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \ 453 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
449 i++, vf_cfg++) 454 i++, vf_cfg++)
450 455
451/* BladeEngine Generation numbers */
452#define BE_GEN2 2
453#define BE_GEN3 3
454
455#define ON 1 456#define ON 1
456#define OFF 0 457#define OFF 0
457#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
458 (adapter->pdev->device == OC_DEVICE_ID4))
459 458
460#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5) 459#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
460 adapter->pdev->device == OC_DEVICE_ID4)
461
462#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
463 adapter->pdev->device == OC_DEVICE_ID6)
464
465#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
466 adapter->pdev->device == OC_DEVICE_ID2)
461 467
468#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
469 adapter->pdev->device == OC_DEVICE_ID1)
462 470
463#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \ 471#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
464 adapter->sli_family == SKYHAWK_SLI_FAMILY) && \ 472
465 (adapter->function_mode & RDMA_ENABLED)) 473#define be_roce_supported(adapter) (skyhawk_chip(adapter) && \
474 (adapter->function_mode & RDMA_ENABLED))
466 475
467extern const struct ethtool_ops be_ethtool_ops; 476extern const struct ethtool_ops be_ethtool_ops;
468 477
@@ -637,12 +646,6 @@ static inline bool be_is_wol_excluded(struct be_adapter *adapter)
637 } 646 }
638} 647}
639 648
640static inline bool be_type_2_3(struct be_adapter *adapter)
641{
642 return (adapter->if_type == SLI_INTF_TYPE_2 ||
643 adapter->if_type == SLI_INTF_TYPE_3) ? true : false;
644}
645
646extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, 649extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
647 u16 num_popped); 650 u16 num_popped);
648extern void be_link_status_update(struct be_adapter *adapter, u8 link_status); 651extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index af60bb26e330..f2875aa47661 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,6 +19,55 @@
19#include "be.h" 19#include "be.h"
20#include "be_cmds.h" 20#include "be_cmds.h"
21 21
22static struct be_cmd_priv_map cmd_priv_map[] = {
23 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 CMD_SUBSYSTEM_ETH,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 },
29 {
30 OPCODE_COMMON_GET_FLOW_CONTROL,
31 CMD_SUBSYSTEM_COMMON,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 },
35 {
36 OPCODE_COMMON_SET_FLOW_CONTROL,
37 CMD_SUBSYSTEM_COMMON,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 },
41 {
42 OPCODE_ETH_GET_PPORT_STATS,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_PHY_DETAILS,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 }
53};
54
55static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56 u8 subsystem)
57{
58 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
61
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66 return false;
67
68 return true;
69}
70
22static inline void *embedded_payload(struct be_mcc_wrb *wrb) 71static inline void *embedded_payload(struct be_mcc_wrb *wrb)
23{ 72{
24 return wrb->payload.embedded_payload; 73 return wrb->payload.embedded_payload;
@@ -419,14 +468,13 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
419static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 468static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
420{ 469{
421 u32 sem; 470 u32 sem;
471 u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
472 SLIPORT_SEMAPHORE_OFFSET_BE;
422 473
423 if (lancer_chip(adapter)) 474 pci_read_config_dword(adapter->pdev, reg, &sem);
424 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); 475 *stage = sem & POST_STAGE_MASK;
425 else
426 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
427 476
428 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 477 if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
429 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
430 return -1; 478 return -1;
431 else 479 else
432 return 0; 480 return 0;
@@ -452,10 +500,33 @@ int lancer_wait_ready(struct be_adapter *adapter)
452 return status; 500 return status;
453} 501}
454 502
503static bool lancer_provisioning_error(struct be_adapter *adapter)
504{
505 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
506 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
507 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
508 sliport_err1 = ioread32(adapter->db +
509 SLIPORT_ERROR1_OFFSET);
510 sliport_err2 = ioread32(adapter->db +
511 SLIPORT_ERROR2_OFFSET);
512
513 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
514 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
515 return true;
516 }
517 return false;
518}
519
455int lancer_test_and_set_rdy_state(struct be_adapter *adapter) 520int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
456{ 521{
457 int status; 522 int status;
458 u32 sliport_status, err, reset_needed; 523 u32 sliport_status, err, reset_needed;
524 bool resource_error;
525
526 resource_error = lancer_provisioning_error(adapter);
527 if (resource_error)
528 return -1;
529
459 status = lancer_wait_ready(adapter); 530 status = lancer_wait_ready(adapter);
460 if (!status) { 531 if (!status) {
461 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 532 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
@@ -477,6 +548,14 @@ int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
477 status = -1; 548 status = -1;
478 } 549 }
479 } 550 }
551 /* Stop error recovery if error is not recoverable.
552 * No resource error is temporary errors and will go away
553 * when PF provisions resources.
554 */
555 resource_error = lancer_provisioning_error(adapter);
556 if (status == -1 && !resource_error)
557 adapter->eeh_error = true;
558
480 return status; 559 return status;
481} 560}
482 561
@@ -601,6 +680,9 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
601 struct be_queue_info *mccq = &adapter->mcc_obj.q; 680 struct be_queue_info *mccq = &adapter->mcc_obj.q;
602 struct be_mcc_wrb *wrb; 681 struct be_mcc_wrb *wrb;
603 682
683 if (!mccq->created)
684 return NULL;
685
604 if (atomic_read(&mccq->used) >= mccq->len) { 686 if (atomic_read(&mccq->used) >= mccq->len) {
605 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); 687 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
606 return NULL; 688 return NULL;
@@ -1155,8 +1237,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1155 req->id = cpu_to_le16(q->id); 1237 req->id = cpu_to_le16(q->id);
1156 1238
1157 status = be_mbox_notify_wait(adapter); 1239 status = be_mbox_notify_wait(adapter);
1158 if (!status) 1240 q->created = false;
1159 q->created = false;
1160 1241
1161 mutex_unlock(&adapter->mbox_lock); 1242 mutex_unlock(&adapter->mbox_lock);
1162 return status; 1243 return status;
@@ -1183,8 +1264,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1183 req->id = cpu_to_le16(q->id); 1264 req->id = cpu_to_le16(q->id);
1184 1265
1185 status = be_mcc_notify_wait(adapter); 1266 status = be_mcc_notify_wait(adapter);
1186 if (!status) 1267 q->created = false;
1187 q->created = false;
1188 1268
1189err: 1269err:
1190 spin_unlock_bh(&adapter->mcc_lock); 1270 spin_unlock_bh(&adapter->mcc_lock);
@@ -1281,7 +1361,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1281 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1361 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1282 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); 1362 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1283 1363
1284 if (adapter->generation == BE_GEN3) 1364 /* version 1 of the cmd is not supported only by BE2 */
1365 if (!BE2_chip(adapter))
1285 hdr->version = 1; 1366 hdr->version = 1;
1286 1367
1287 be_mcc_notify(adapter); 1368 be_mcc_notify(adapter);
@@ -1301,6 +1382,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1301 struct lancer_cmd_req_pport_stats *req; 1382 struct lancer_cmd_req_pport_stats *req;
1302 int status = 0; 1383 int status = 0;
1303 1384
1385 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1386 CMD_SUBSYSTEM_ETH))
1387 return -EPERM;
1388
1304 spin_lock_bh(&adapter->mcc_lock); 1389 spin_lock_bh(&adapter->mcc_lock);
1305 1390
1306 wrb = wrb_from_mccq(adapter); 1391 wrb = wrb_from_mccq(adapter);
@@ -1367,7 +1452,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1367 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1452 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1368 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); 1453 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1369 1454
1370 if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) 1455 /* version 1 of the cmd is not supported only by BE2 */
1456 if (!BE2_chip(adapter))
1371 req->hdr.version = 1; 1457 req->hdr.version = 1;
1372 1458
1373 req->hdr.domain = dom; 1459 req->hdr.domain = dom;
@@ -1658,9 +1744,9 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1658 /* Reset mcast promisc mode if already set by setting mask 1744 /* Reset mcast promisc mode if already set by setting mask
1659 * and not setting flags field 1745 * and not setting flags field
1660 */ 1746 */
1661 if (!lancer_chip(adapter) || be_physfn(adapter)) 1747 req->if_flags_mask |=
1662 req->if_flags_mask |= 1748 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1663 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); 1749 adapter->if_cap_flags);
1664 1750
1665 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); 1751 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1666 netdev_for_each_mc_addr(ha, adapter->netdev) 1752 netdev_for_each_mc_addr(ha, adapter->netdev)
@@ -1680,6 +1766,10 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1680 struct be_cmd_req_set_flow_control *req; 1766 struct be_cmd_req_set_flow_control *req;
1681 int status; 1767 int status;
1682 1768
1769 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1770 CMD_SUBSYSTEM_COMMON))
1771 return -EPERM;
1772
1683 spin_lock_bh(&adapter->mcc_lock); 1773 spin_lock_bh(&adapter->mcc_lock);
1684 1774
1685 wrb = wrb_from_mccq(adapter); 1775 wrb = wrb_from_mccq(adapter);
@@ -1709,6 +1799,10 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1709 struct be_cmd_req_get_flow_control *req; 1799 struct be_cmd_req_get_flow_control *req;
1710 int status; 1800 int status;
1711 1801
1802 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1803 CMD_SUBSYSTEM_COMMON))
1804 return -EPERM;
1805
1712 spin_lock_bh(&adapter->mcc_lock); 1806 spin_lock_bh(&adapter->mcc_lock);
1713 1807
1714 wrb = wrb_from_mccq(adapter); 1808 wrb = wrb_from_mccq(adapter);
@@ -2067,7 +2161,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2067 int offset) 2161 int offset)
2068{ 2162{
2069 struct be_mcc_wrb *wrb; 2163 struct be_mcc_wrb *wrb;
2070 struct be_cmd_write_flashrom *req; 2164 struct be_cmd_read_flash_crc *req;
2071 int status; 2165 int status;
2072 2166
2073 spin_lock_bh(&adapter->mcc_lock); 2167 spin_lock_bh(&adapter->mcc_lock);
@@ -2080,7 +2174,8 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2080 req = embedded_payload(wrb); 2174 req = embedded_payload(wrb);
2081 2175
2082 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2176 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2083 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); 2177 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2178 wrb, NULL);
2084 2179
2085 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT); 2180 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2086 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2181 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
@@ -2089,7 +2184,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2089 2184
2090 status = be_mcc_notify_wait(adapter); 2185 status = be_mcc_notify_wait(adapter);
2091 if (!status) 2186 if (!status)
2092 memcpy(flashed_crc, req->params.data_buf, 4); 2187 memcpy(flashed_crc, req->crc, 4);
2093 2188
2094err: 2189err:
2095 spin_unlock_bh(&adapter->mcc_lock); 2190 spin_unlock_bh(&adapter->mcc_lock);
@@ -2275,6 +2370,10 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
2275 struct be_dma_mem cmd; 2370 struct be_dma_mem cmd;
2276 int status; 2371 int status;
2277 2372
2373 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2374 CMD_SUBSYSTEM_COMMON))
2375 return -EPERM;
2376
2278 spin_lock_bh(&adapter->mcc_lock); 2377 spin_lock_bh(&adapter->mcc_lock);
2279 2378
2280 wrb = wrb_from_mccq(adapter); 2379 wrb = wrb_from_mccq(adapter);
@@ -2434,6 +2533,42 @@ err:
2434 return status; 2533 return status;
2435} 2534}
2436 2535
2536/* Get privilege(s) for a function */
2537int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2538 u32 domain)
2539{
2540 struct be_mcc_wrb *wrb;
2541 struct be_cmd_req_get_fn_privileges *req;
2542 int status;
2543
2544 spin_lock_bh(&adapter->mcc_lock);
2545
2546 wrb = wrb_from_mccq(adapter);
2547 if (!wrb) {
2548 status = -EBUSY;
2549 goto err;
2550 }
2551
2552 req = embedded_payload(wrb);
2553
2554 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2555 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2556 wrb, NULL);
2557
2558 req->hdr.domain = domain;
2559
2560 status = be_mcc_notify_wait(adapter);
2561 if (!status) {
2562 struct be_cmd_resp_get_fn_privileges *resp =
2563 embedded_payload(wrb);
2564 *privilege = le32_to_cpu(resp->privilege_mask);
2565 }
2566
2567err:
2568 spin_unlock_bh(&adapter->mcc_lock);
2569 return status;
2570}
2571
2437/* Uses synchronous MCCQ */ 2572/* Uses synchronous MCCQ */
2438int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 2573int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2439 bool *pmac_id_active, u32 *pmac_id, u8 domain) 2574 bool *pmac_id_active, u32 *pmac_id, u8 domain)
@@ -2651,6 +2786,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2651 int payload_len = sizeof(*req); 2786 int payload_len = sizeof(*req);
2652 struct be_dma_mem cmd; 2787 struct be_dma_mem cmd;
2653 2788
2789 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2790 CMD_SUBSYSTEM_ETH))
2791 return -EPERM;
2792
2654 memset(&cmd, 0, sizeof(struct be_dma_mem)); 2793 memset(&cmd, 0, sizeof(struct be_dma_mem));
2655 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 2794 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2656 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, 2795 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
@@ -2792,6 +2931,240 @@ err:
2792 return status; 2931 return status;
2793} 2932}
2794 2933
2934static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2935 u32 max_buf_size)
2936{
2937 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2938 int i;
2939
2940 for (i = 0; i < desc_count; i++) {
2941 desc->desc_len = RESOURCE_DESC_SIZE;
2942 if (((void *)desc + desc->desc_len) >
2943 (void *)(buf + max_buf_size)) {
2944 desc = NULL;
2945 break;
2946 }
2947
2948 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
2949 break;
2950
2951 desc = (void *)desc + desc->desc_len;
2952 }
2953
2954 if (!desc || i == MAX_RESOURCE_DESC)
2955 return NULL;
2956
2957 return desc;
2958}
2959
2960/* Uses Mbox */
2961int be_cmd_get_func_config(struct be_adapter *adapter)
2962{
2963 struct be_mcc_wrb *wrb;
2964 struct be_cmd_req_get_func_config *req;
2965 int status;
2966 struct be_dma_mem cmd;
2967
2968 memset(&cmd, 0, sizeof(struct be_dma_mem));
2969 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2970 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2971 &cmd.dma);
2972 if (!cmd.va) {
2973 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2974 return -ENOMEM;
2975 }
2976 if (mutex_lock_interruptible(&adapter->mbox_lock))
2977 return -1;
2978
2979 wrb = wrb_from_mbox(adapter);
2980 if (!wrb) {
2981 status = -EBUSY;
2982 goto err;
2983 }
2984
2985 req = cmd.va;
2986
2987 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2988 OPCODE_COMMON_GET_FUNC_CONFIG,
2989 cmd.size, wrb, &cmd);
2990
2991 status = be_mbox_notify_wait(adapter);
2992 if (!status) {
2993 struct be_cmd_resp_get_func_config *resp = cmd.va;
2994 u32 desc_count = le32_to_cpu(resp->desc_count);
2995 struct be_nic_resource_desc *desc;
2996
2997 desc = be_get_nic_desc(resp->func_param, desc_count,
2998 sizeof(resp->func_param));
2999 if (!desc) {
3000 status = -EINVAL;
3001 goto err;
3002 }
3003
3004 adapter->pf_number = desc->pf_num;
3005 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3006 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3007 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3008 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3009 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3010 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3011
3012 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3013 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3014 }
3015err:
3016 mutex_unlock(&adapter->mbox_lock);
3017 pci_free_consistent(adapter->pdev, cmd.size,
3018 cmd.va, cmd.dma);
3019 return status;
3020}
3021
3022 /* Uses sync mcc */
3023int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3024 u8 domain)
3025{
3026 struct be_mcc_wrb *wrb;
3027 struct be_cmd_req_get_profile_config *req;
3028 int status;
3029 struct be_dma_mem cmd;
3030
3031 memset(&cmd, 0, sizeof(struct be_dma_mem));
3032 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3033 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3034 &cmd.dma);
3035 if (!cmd.va) {
3036 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3037 return -ENOMEM;
3038 }
3039
3040 spin_lock_bh(&adapter->mcc_lock);
3041
3042 wrb = wrb_from_mccq(adapter);
3043 if (!wrb) {
3044 status = -EBUSY;
3045 goto err;
3046 }
3047
3048 req = cmd.va;
3049
3050 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3051 OPCODE_COMMON_GET_PROFILE_CONFIG,
3052 cmd.size, wrb, &cmd);
3053
3054 req->type = ACTIVE_PROFILE_TYPE;
3055 req->hdr.domain = domain;
3056
3057 status = be_mcc_notify_wait(adapter);
3058 if (!status) {
3059 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3060 u32 desc_count = le32_to_cpu(resp->desc_count);
3061 struct be_nic_resource_desc *desc;
3062
3063 desc = be_get_nic_desc(resp->func_param, desc_count,
3064 sizeof(resp->func_param));
3065
3066 if (!desc) {
3067 status = -EINVAL;
3068 goto err;
3069 }
3070 *cap_flags = le32_to_cpu(desc->cap_flags);
3071 }
3072err:
3073 spin_unlock_bh(&adapter->mcc_lock);
3074 pci_free_consistent(adapter->pdev, cmd.size,
3075 cmd.va, cmd.dma);
3076 return status;
3077}
3078
3079/* Uses sync mcc */
3080int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3081 u8 domain)
3082{
3083 struct be_mcc_wrb *wrb;
3084 struct be_cmd_req_set_profile_config *req;
3085 int status;
3086
3087 spin_lock_bh(&adapter->mcc_lock);
3088
3089 wrb = wrb_from_mccq(adapter);
3090 if (!wrb) {
3091 status = -EBUSY;
3092 goto err;
3093 }
3094
3095 req = embedded_payload(wrb);
3096
3097 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3098 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3099 wrb, NULL);
3100
3101 req->hdr.domain = domain;
3102 req->desc_count = cpu_to_le32(1);
3103
3104 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
3105 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3106 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3107 req->nic_desc.pf_num = adapter->pf_number;
3108 req->nic_desc.vf_num = domain;
3109
3110 /* Mark fields invalid */
3111 req->nic_desc.unicast_mac_count = 0xFFFF;
3112 req->nic_desc.mcc_count = 0xFFFF;
3113 req->nic_desc.vlan_count = 0xFFFF;
3114 req->nic_desc.mcast_mac_count = 0xFFFF;
3115 req->nic_desc.txq_count = 0xFFFF;
3116 req->nic_desc.rq_count = 0xFFFF;
3117 req->nic_desc.rssq_count = 0xFFFF;
3118 req->nic_desc.lro_count = 0xFFFF;
3119 req->nic_desc.cq_count = 0xFFFF;
3120 req->nic_desc.toe_conn_count = 0xFFFF;
3121 req->nic_desc.eq_count = 0xFFFF;
3122 req->nic_desc.link_param = 0xFF;
3123 req->nic_desc.bw_min = 0xFFFFFFFF;
3124 req->nic_desc.acpi_params = 0xFF;
3125 req->nic_desc.wol_param = 0x0F;
3126
3127 /* Change BW */
3128 req->nic_desc.bw_min = cpu_to_le32(bps);
3129 req->nic_desc.bw_max = cpu_to_le32(bps);
3130 status = be_mcc_notify_wait(adapter);
3131err:
3132 spin_unlock_bh(&adapter->mcc_lock);
3133 return status;
3134}
3135
3136/* Uses sync mcc */
3137int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3138{
3139 struct be_mcc_wrb *wrb;
3140 struct be_cmd_enable_disable_vf *req;
3141 int status;
3142
3143 if (!lancer_chip(adapter))
3144 return 0;
3145
3146 spin_lock_bh(&adapter->mcc_lock);
3147
3148 wrb = wrb_from_mccq(adapter);
3149 if (!wrb) {
3150 status = -EBUSY;
3151 goto err;
3152 }
3153
3154 req = embedded_payload(wrb);
3155
3156 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3157 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3158 wrb, NULL);
3159
3160 req->hdr.domain = domain;
3161 req->enable = 1;
3162 status = be_mcc_notify_wait(adapter);
3163err:
3164 spin_unlock_bh(&adapter->mcc_lock);
3165 return status;
3166}
3167
2795int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3168int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
2796 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3169 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
2797{ 3170{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 0936e21e3cff..d6552e19ffee 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -196,9 +196,14 @@ struct be_mcc_mailbox {
196#define OPCODE_COMMON_GET_MAC_LIST 147 196#define OPCODE_COMMON_GET_MAC_LIST 147
197#define OPCODE_COMMON_SET_MAC_LIST 148 197#define OPCODE_COMMON_SET_MAC_LIST 148
198#define OPCODE_COMMON_GET_HSW_CONFIG 152 198#define OPCODE_COMMON_GET_HSW_CONFIG 152
199#define OPCODE_COMMON_GET_FUNC_CONFIG 160
200#define OPCODE_COMMON_GET_PROFILE_CONFIG 164
201#define OPCODE_COMMON_SET_PROFILE_CONFIG 165
199#define OPCODE_COMMON_SET_HSW_CONFIG 153 202#define OPCODE_COMMON_SET_HSW_CONFIG 153
203#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
200#define OPCODE_COMMON_READ_OBJECT 171 204#define OPCODE_COMMON_READ_OBJECT 171
201#define OPCODE_COMMON_WRITE_OBJECT 172 205#define OPCODE_COMMON_WRITE_OBJECT 172
206#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
202 207
203#define OPCODE_ETH_RSS_CONFIG 1 208#define OPCODE_ETH_RSS_CONFIG 1
204#define OPCODE_ETH_ACPI_CONFIG 2 209#define OPCODE_ETH_ACPI_CONFIG 2
@@ -1151,14 +1156,22 @@ struct flashrom_params {
1151 u32 op_type; 1156 u32 op_type;
1152 u32 data_buf_size; 1157 u32 data_buf_size;
1153 u32 offset; 1158 u32 offset;
1154 u8 data_buf[4];
1155}; 1159};
1156 1160
1157struct be_cmd_write_flashrom { 1161struct be_cmd_write_flashrom {
1158 struct be_cmd_req_hdr hdr; 1162 struct be_cmd_req_hdr hdr;
1159 struct flashrom_params params; 1163 struct flashrom_params params;
1160}; 1164 u8 data_buf[32768];
1165 u8 rsvd[4];
1166} __packed;
1161 1167
1168/* cmd to read flash crc */
1169struct be_cmd_read_flash_crc {
1170 struct be_cmd_req_hdr hdr;
1171 struct flashrom_params params;
1172 u8 crc[4];
1173 u8 rsvd[4];
1174};
1162/**************** Lancer Firmware Flash ************/ 1175/**************** Lancer Firmware Flash ************/
1163struct amap_lancer_write_obj_context { 1176struct amap_lancer_write_obj_context {
1164 u8 write_length[24]; 1177 u8 write_length[24];
@@ -1429,6 +1442,41 @@ struct be_cmd_resp_set_func_cap {
1429 u8 rsvd[212]; 1442 u8 rsvd[212];
1430}; 1443};
1431 1444
1445/*********************** Function Privileges ***********************/
1446enum {
1447 BE_PRIV_DEFAULT = 0x1,
1448 BE_PRIV_LNKQUERY = 0x2,
1449 BE_PRIV_LNKSTATS = 0x4,
1450 BE_PRIV_LNKMGMT = 0x8,
1451 BE_PRIV_LNKDIAG = 0x10,
1452 BE_PRIV_UTILQUERY = 0x20,
1453 BE_PRIV_FILTMGMT = 0x40,
1454 BE_PRIV_IFACEMGMT = 0x80,
1455 BE_PRIV_VHADM = 0x100,
1456 BE_PRIV_DEVCFG = 0x200,
1457 BE_PRIV_DEVSEC = 0x400
1458};
1459#define MAX_PRIVILEGES (BE_PRIV_VHADM | BE_PRIV_DEVCFG | \
1460 BE_PRIV_DEVSEC)
1461#define MIN_PRIVILEGES BE_PRIV_DEFAULT
1462
1463struct be_cmd_priv_map {
1464 u8 opcode;
1465 u8 subsystem;
1466 u32 priv_mask;
1467};
1468
1469struct be_cmd_req_get_fn_privileges {
1470 struct be_cmd_req_hdr hdr;
1471 u32 rsvd;
1472};
1473
1474struct be_cmd_resp_get_fn_privileges {
1475 struct be_cmd_resp_hdr hdr;
1476 u32 privilege_mask;
1477};
1478
1479
1432/******************** GET/SET_MACLIST **************************/ 1480/******************** GET/SET_MACLIST **************************/
1433#define BE_MAX_MAC 64 1481#define BE_MAX_MAC 64
1434struct be_cmd_req_get_mac_list { 1482struct be_cmd_req_get_mac_list {
@@ -1608,33 +1656,6 @@ struct be_cmd_resp_get_stats_v1 {
1608 struct be_hw_stats_v1 hw_stats; 1656 struct be_hw_stats_v1 hw_stats;
1609}; 1657};
1610 1658
1611static inline void *hw_stats_from_cmd(struct be_adapter *adapter)
1612{
1613 if (adapter->generation == BE_GEN3) {
1614 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
1615
1616 return &cmd->hw_stats;
1617 } else {
1618 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
1619
1620 return &cmd->hw_stats;
1621 }
1622}
1623
1624static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
1625{
1626 if (adapter->generation == BE_GEN3) {
1627 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
1628
1629 return &hw_stats->erx;
1630 } else {
1631 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
1632
1633 return &hw_stats->erx;
1634 }
1635}
1636
1637
1638/************** get fat capabilites *******************/ 1659/************** get fat capabilites *******************/
1639#define MAX_MODULES 27 1660#define MAX_MODULES 27
1640#define MAX_MODES 4 1661#define MAX_MODES 4
@@ -1684,6 +1705,96 @@ struct be_cmd_req_set_ext_fat_caps {
1684 struct be_fat_conf_params set_params; 1705 struct be_fat_conf_params set_params;
1685}; 1706};
1686 1707
1708#define RESOURCE_DESC_SIZE 72
1709#define NIC_RESOURCE_DESC_TYPE_ID 0x41
1710#define MAX_RESOURCE_DESC 4
1711
1712/* QOS unit number */
1713#define QUN 4
1714/* Immediate */
1715#define IMM 6
1716/* No save */
1717#define NOSV 7
1718
1719struct be_nic_resource_desc {
1720 u8 desc_type;
1721 u8 desc_len;
1722 u8 rsvd1;
1723 u8 flags;
1724 u8 vf_num;
1725 u8 rsvd2;
1726 u8 pf_num;
1727 u8 rsvd3;
1728 u16 unicast_mac_count;
1729 u8 rsvd4[6];
1730 u16 mcc_count;
1731 u16 vlan_count;
1732 u16 mcast_mac_count;
1733 u16 txq_count;
1734 u16 rq_count;
1735 u16 rssq_count;
1736 u16 lro_count;
1737 u16 cq_count;
1738 u16 toe_conn_count;
1739 u16 eq_count;
1740 u32 rsvd5;
1741 u32 cap_flags;
1742 u8 link_param;
1743 u8 rsvd6[3];
1744 u32 bw_min;
1745 u32 bw_max;
1746 u8 acpi_params;
1747 u8 wol_param;
1748 u16 rsvd7;
1749 u32 rsvd8[3];
1750};
1751
1752struct be_cmd_req_get_func_config {
1753 struct be_cmd_req_hdr hdr;
1754};
1755
1756struct be_cmd_resp_get_func_config {
1757 struct be_cmd_req_hdr hdr;
1758 u32 desc_count;
1759 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
1760};
1761
1762#define ACTIVE_PROFILE_TYPE 0x2
1763struct be_cmd_req_get_profile_config {
1764 struct be_cmd_req_hdr hdr;
1765 u8 rsvd;
1766 u8 type;
1767 u16 rsvd1;
1768};
1769
1770struct be_cmd_resp_get_profile_config {
1771 struct be_cmd_req_hdr hdr;
1772 u32 desc_count;
1773 u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
1774};
1775
1776struct be_cmd_req_set_profile_config {
1777 struct be_cmd_req_hdr hdr;
1778 u32 rsvd;
1779 u32 desc_count;
1780 struct be_nic_resource_desc nic_desc;
1781};
1782
1783struct be_cmd_resp_set_profile_config {
1784 struct be_cmd_req_hdr hdr;
1785};
1786
1787struct be_cmd_enable_disable_vf {
1788 struct be_cmd_req_hdr hdr;
1789 u8 enable;
1790 u8 rsvd[3];
1791};
1792
1793static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
1794{
1795 return flags & adapter->cmd_privileges ? true : false;
1796}
1797
1687extern int be_pci_fnum_get(struct be_adapter *adapter); 1798extern int be_pci_fnum_get(struct be_adapter *adapter);
1688extern int be_fw_wait_ready(struct be_adapter *adapter); 1799extern int be_fw_wait_ready(struct be_adapter *adapter);
1689extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1800extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1780,6 +1891,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
1780extern int be_cmd_req_native_mode(struct be_adapter *adapter); 1891extern int be_cmd_req_native_mode(struct be_adapter *adapter);
1781extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size); 1892extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
1782extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf); 1893extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
1894extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
1895 u32 *privilege, u32 domain);
1783extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 1896extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1784 bool *pmac_id_active, u32 *pmac_id, 1897 bool *pmac_id_active, u32 *pmac_id,
1785 u8 domain); 1898 u8 domain);
@@ -1798,4 +1911,10 @@ extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
1798extern int lancer_wait_ready(struct be_adapter *adapter); 1911extern int lancer_wait_ready(struct be_adapter *adapter);
1799extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter); 1912extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
1800extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name); 1913extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
1914extern int be_cmd_get_func_config(struct be_adapter *adapter);
1915extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
1916 u8 domain);
1801 1917
1918extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
1919 u8 domain);
1920extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 8e6fb0ba6aa9..00454a10f88d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -261,6 +261,9 @@ be_get_reg_len(struct net_device *netdev)
261 struct be_adapter *adapter = netdev_priv(netdev); 261 struct be_adapter *adapter = netdev_priv(netdev);
262 u32 log_size = 0; 262 u32 log_size = 0;
263 263
264 if (!check_privilege(adapter, MAX_PRIVILEGES))
265 return 0;
266
264 if (be_physfn(adapter)) { 267 if (be_physfn(adapter)) {
265 if (lancer_chip(adapter)) 268 if (lancer_chip(adapter))
266 log_size = lancer_cmd_get_file_len(adapter, 269 log_size = lancer_cmd_get_file_len(adapter,
@@ -525,6 +528,10 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
525 u8 link_status; 528 u8 link_status;
526 u16 link_speed = 0; 529 u16 link_speed = 0;
527 int status; 530 int status;
531 u32 auto_speeds;
532 u32 fixed_speeds;
533 u32 dac_cable_len;
534 u16 interface_type;
528 535
529 if (adapter->phy.link_speed < 0) { 536 if (adapter->phy.link_speed < 0) {
530 status = be_cmd_link_status_query(adapter, &link_speed, 537 status = be_cmd_link_status_query(adapter, &link_speed,
@@ -534,39 +541,46 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
534 ethtool_cmd_speed_set(ecmd, link_speed); 541 ethtool_cmd_speed_set(ecmd, link_speed);
535 542
536 status = be_cmd_get_phy_info(adapter); 543 status = be_cmd_get_phy_info(adapter);
537 if (status) 544 if (!status) {
538 return status; 545 interface_type = adapter->phy.interface_type;
539 546 auto_speeds = adapter->phy.auto_speeds_supported;
540 ecmd->supported = 547 fixed_speeds = adapter->phy.fixed_speeds_supported;
541 convert_to_et_setting(adapter->phy.interface_type, 548 dac_cable_len = adapter->phy.dac_cable_len;
542 adapter->phy.auto_speeds_supported | 549
543 adapter->phy.fixed_speeds_supported); 550 ecmd->supported =
544 ecmd->advertising = 551 convert_to_et_setting(interface_type,
545 convert_to_et_setting(adapter->phy.interface_type, 552 auto_speeds |
546 adapter->phy.auto_speeds_supported); 553 fixed_speeds);
547 554 ecmd->advertising =
548 ecmd->port = be_get_port_type(adapter->phy.interface_type, 555 convert_to_et_setting(interface_type,
549 adapter->phy.dac_cable_len); 556 auto_speeds);
550 557
551 if (adapter->phy.auto_speeds_supported) { 558 ecmd->port = be_get_port_type(interface_type,
552 ecmd->supported |= SUPPORTED_Autoneg; 559 dac_cable_len);
553 ecmd->autoneg = AUTONEG_ENABLE; 560
554 ecmd->advertising |= ADVERTISED_Autoneg; 561 if (adapter->phy.auto_speeds_supported) {
555 } 562 ecmd->supported |= SUPPORTED_Autoneg;
563 ecmd->autoneg = AUTONEG_ENABLE;
564 ecmd->advertising |= ADVERTISED_Autoneg;
565 }
556 566
557 if (be_pause_supported(adapter)) {
558 ecmd->supported |= SUPPORTED_Pause; 567 ecmd->supported |= SUPPORTED_Pause;
559 ecmd->advertising |= ADVERTISED_Pause; 568 if (be_pause_supported(adapter))
560 } 569 ecmd->advertising |= ADVERTISED_Pause;
561 570
562 switch (adapter->phy.interface_type) { 571 switch (adapter->phy.interface_type) {
563 case PHY_TYPE_KR_10GB: 572 case PHY_TYPE_KR_10GB:
564 case PHY_TYPE_KX4_10GB: 573 case PHY_TYPE_KX4_10GB:
565 ecmd->transceiver = XCVR_INTERNAL; 574 ecmd->transceiver = XCVR_INTERNAL;
566 break; 575 break;
567 default: 576 default:
568 ecmd->transceiver = XCVR_EXTERNAL; 577 ecmd->transceiver = XCVR_EXTERNAL;
569 break; 578 break;
579 }
580 } else {
581 ecmd->port = PORT_OTHER;
582 ecmd->autoneg = AUTONEG_DISABLE;
583 ecmd->transceiver = XCVR_DUMMY1;
570 } 584 }
571 585
572 /* Save for future use */ 586 /* Save for future use */
@@ -787,6 +801,10 @@ static int
787be_get_eeprom_len(struct net_device *netdev) 801be_get_eeprom_len(struct net_device *netdev)
788{ 802{
789 struct be_adapter *adapter = netdev_priv(netdev); 803 struct be_adapter *adapter = netdev_priv(netdev);
804
805 if (!check_privilege(adapter, MAX_PRIVILEGES))
806 return 0;
807
790 if (lancer_chip(adapter)) { 808 if (lancer_chip(adapter)) {
791 if (be_physfn(adapter)) 809 if (be_physfn(adapter))
792 return lancer_cmd_get_file_len(adapter, 810 return lancer_cmd_get_file_len(adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index b755f7061dce..541d4530d5bf 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -31,12 +31,12 @@
31 31
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore ******************/ 34/********** MPU semphore: used for SH & BE *************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac 35#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400 36#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF 37#define POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_MASK 0x1 38#define POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31 39#define POST_ERR_SHIFT 31
40 40
41/* MPU semphore POST stage values */ 41/* MPU semphore POST stage values */
42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ 42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
@@ -59,6 +59,9 @@
59#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002 59#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
60#define PHYSDEV_CONTROL_INP_MASK 0x40000000 60#define PHYSDEV_CONTROL_INP_MASK 0x40000000
61 61
62#define SLIPORT_ERROR_NO_RESOURCE1 0x2
63#define SLIPORT_ERROR_NO_RESOURCE2 0x9
64
62/********* Memory BAR register ************/ 65/********* Memory BAR register ************/
63#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc 66#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
64/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt 67/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
@@ -102,11 +105,6 @@
102#define SLI_INTF_TYPE_2 2 105#define SLI_INTF_TYPE_2 2
103#define SLI_INTF_TYPE_3 3 106#define SLI_INTF_TYPE_3 3
104 107
105/* SLI family */
106#define BE_SLI_FAMILY 0x0
107#define LANCER_A0_SLI_FAMILY 0xA
108#define SKYHAWK_SLI_FAMILY 0x2
109
110/********* ISR0 Register offset **********/ 108/********* ISR0 Register offset **********/
111#define CEV_ISR0_OFFSET 0xC18 109#define CEV_ISR0_OFFSET 0xC18
112#define CEV_ISR_SIZE 4 110#define CEV_ISR_SIZE 4
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d1b6cc587639..f95612b907ae 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -44,6 +44,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, 44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)}, 45 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)}, 46 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
47 { 0 } 48 { 0 }
48}; 49};
49MODULE_DEVICE_TABLE(pci, be_dev_ids); 50MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,23 +238,46 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
237 int status = 0; 238 int status = 0;
238 u8 current_mac[ETH_ALEN]; 239 u8 current_mac[ETH_ALEN];
239 u32 pmac_id = adapter->pmac_id[0]; 240 u32 pmac_id = adapter->pmac_id[0];
241 bool active_mac = true;
240 242
241 if (!is_valid_ether_addr(addr->sa_data)) 243 if (!is_valid_ether_addr(addr->sa_data))
242 return -EADDRNOTAVAIL; 244 return -EADDRNOTAVAIL;
243 245
244 status = be_cmd_mac_addr_query(adapter, current_mac, false, 246 /* For BE VF, MAC address is already activated by PF.
245 adapter->if_handle, 0); 247 * Hence only operation left is updating netdev->devaddr.
248 * Update it if user is passing the same MAC which was used
249 * during configuring VF MAC from PF(Hypervisor).
250 */
251 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252 status = be_cmd_mac_addr_query(adapter, current_mac,
253 false, adapter->if_handle, 0);
254 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255 goto done;
256 else
257 goto err;
258 }
259
260 if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261 goto done;
262
263 /* For Lancer check if any MAC is active.
264 * If active, get its mac id.
265 */
266 if (lancer_chip(adapter) && !be_physfn(adapter))
267 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268 &pmac_id, 0);
269
270 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271 adapter->if_handle,
272 &adapter->pmac_id[0], 0);
273
246 if (status) 274 if (status)
247 goto err; 275 goto err;
248 276
249 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) { 277 if (active_mac)
250 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, 278 be_cmd_pmac_del(adapter, adapter->if_handle,
251 adapter->if_handle, &adapter->pmac_id[0], 0); 279 pmac_id, 0);
252 if (status) 280done:
253 goto err;
254
255 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256 }
257 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 281 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258 return 0; 282 return 0;
259err: 283err:
@@ -261,7 +285,35 @@ err:
261 return status; 285 return status;
262} 286}
263 287
264static void populate_be2_stats(struct be_adapter *adapter) 288/* BE2 supports only v0 cmd */
289static void *hw_stats_from_cmd(struct be_adapter *adapter)
290{
291 if (BE2_chip(adapter)) {
292 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294 return &cmd->hw_stats;
295 } else {
296 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298 return &cmd->hw_stats;
299 }
300}
301
302/* BE2 supports only v0 cmd */
303static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304{
305 if (BE2_chip(adapter)) {
306 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308 return &hw_stats->erx;
309 } else {
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312 return &hw_stats->erx;
313 }
314}
315
316static void populate_be_v0_stats(struct be_adapter *adapter)
265{ 317{
266 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); 318 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267 struct be_pmem_stats *pmem_sts = &hw_stats->pmem; 319 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -310,7 +362,7 @@ static void populate_be2_stats(struct be_adapter *adapter)
310 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; 362 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311} 363}
312 364
313static void populate_be3_stats(struct be_adapter *adapter) 365static void populate_be_v1_stats(struct be_adapter *adapter)
314{ 366{
315 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); 367 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316 struct be_pmem_stats *pmem_sts = &hw_stats->pmem; 368 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
@@ -412,28 +464,25 @@ void be_parse_stats(struct be_adapter *adapter)
412 struct be_rx_obj *rxo; 464 struct be_rx_obj *rxo;
413 int i; 465 int i;
414 466
415 if (adapter->generation == BE_GEN3) { 467 if (lancer_chip(adapter)) {
416 if (lancer_chip(adapter)) 468 populate_lancer_stats(adapter);
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else { 469 } else {
421 populate_be2_stats(adapter); 470 if (BE2_chip(adapter))
422 } 471 populate_be_v0_stats(adapter);
423 472 else
424 if (lancer_chip(adapter)) 473 /* for BE3 and Skyhawk */
425 goto done; 474 populate_be_v1_stats(adapter);
426 475
427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */ 476 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
428 for_all_rx_queues(adapter, rxo, i) { 477 for_all_rx_queues(adapter, rxo, i) {
429 /* below erx HW counter can actually wrap around after 478 /* below erx HW counter can actually wrap around after
430 * 65535. Driver accumulates a 32-bit value 479 * 65535. Driver accumulates a 32-bit value
431 */ 480 */
432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, 481 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433 (u16)erx->rx_drops_no_fragments[rxo->q.id]); 482 (u16)erx->rx_drops_no_fragments \
483 [rxo->q.id]);
484 }
434 } 485 }
435done:
436 return;
437} 486}
438 487
439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, 488static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
@@ -597,16 +646,6 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
597 hdr, skb_shinfo(skb)->gso_size); 646 hdr, skb_shinfo(skb)->gso_size);
598 if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) 647 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 648 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
600 if (lancer_chip(adapter) && adapter->sli_family ==
601 LANCER_A0_SLI_FAMILY) {
602 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
603 if (is_tcp_pkt(skb))
604 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
605 tcpcs, hdr, 1);
606 else if (is_udp_pkt(skb))
607 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
608 udpcs, hdr, 1);
609 }
610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 649 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
611 if (is_tcp_pkt(skb)) 650 if (is_tcp_pkt(skb))
612 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 651 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -856,11 +895,15 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
856 struct be_adapter *adapter = netdev_priv(netdev); 895 struct be_adapter *adapter = netdev_priv(netdev);
857 int status = 0; 896 int status = 0;
858 897
859 if (!be_physfn(adapter)) { 898 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
860 status = -EINVAL; 899 status = -EINVAL;
861 goto ret; 900 goto ret;
862 } 901 }
863 902
903 /* Packets with VID 0 are always received by Lancer by default */
904 if (lancer_chip(adapter) && vid == 0)
905 goto ret;
906
864 adapter->vlan_tag[vid] = 1; 907 adapter->vlan_tag[vid] = 1;
865 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 908 if (adapter->vlans_added <= (adapter->max_vlans + 1))
866 status = be_vid_config(adapter); 909 status = be_vid_config(adapter);
@@ -878,11 +921,15 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
878 struct be_adapter *adapter = netdev_priv(netdev); 921 struct be_adapter *adapter = netdev_priv(netdev);
879 int status = 0; 922 int status = 0;
880 923
881 if (!be_physfn(adapter)) { 924 if (!lancer_chip(adapter) && !be_physfn(adapter)) {
882 status = -EINVAL; 925 status = -EINVAL;
883 goto ret; 926 goto ret;
884 } 927 }
885 928
929 /* Packets with VID 0 are always received by Lancer by default */
930 if (lancer_chip(adapter) && vid == 0)
931 goto ret;
932
886 adapter->vlan_tag[vid] = 0; 933 adapter->vlan_tag[vid] = 0;
887 if (adapter->vlans_added <= adapter->max_vlans) 934 if (adapter->vlans_added <= adapter->max_vlans)
888 status = be_vid_config(adapter); 935 status = be_vid_config(adapter);
@@ -917,7 +964,7 @@ static void be_set_rx_mode(struct net_device *netdev)
917 964
918 /* Enable multicast promisc if num configured exceeds what we support */ 965 /* Enable multicast promisc if num configured exceeds what we support */
919 if (netdev->flags & IFF_ALLMULTI || 966 if (netdev->flags & IFF_ALLMULTI ||
920 netdev_mc_count(netdev) > BE_MAX_MC) { 967 netdev_mc_count(netdev) > adapter->max_mcast_mac) {
921 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); 968 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
922 goto done; 969 goto done;
923 } 970 }
@@ -962,6 +1009,9 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
962 struct be_adapter *adapter = netdev_priv(netdev); 1009 struct be_adapter *adapter = netdev_priv(netdev);
963 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1010 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
964 int status; 1011 int status;
1012 bool active_mac = false;
1013 u32 pmac_id;
1014 u8 old_mac[ETH_ALEN];
965 1015
966 if (!sriov_enabled(adapter)) 1016 if (!sriov_enabled(adapter))
967 return -EPERM; 1017 return -EPERM;
@@ -970,6 +1020,12 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
970 return -EINVAL; 1020 return -EINVAL;
971 1021
972 if (lancer_chip(adapter)) { 1022 if (lancer_chip(adapter)) {
1023 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024 &pmac_id, vf + 1);
1025 if (!status && active_mac)
1026 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027 pmac_id, vf + 1);
1028
973 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1); 1029 status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
974 } else { 1030 } else {
975 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle, 1031 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
@@ -1062,7 +1118,10 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1062 return -EINVAL; 1118 return -EINVAL;
1063 } 1119 }
1064 1120
1065 status = be_cmd_set_qos(adapter, rate / 10, vf + 1); 1121 if (lancer_chip(adapter))
1122 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123 else
1124 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1066 1125
1067 if (status) 1126 if (status)
1068 dev_err(&adapter->pdev->dev, 1127 dev_err(&adapter->pdev->dev,
@@ -1616,24 +1675,6 @@ static inline int events_get(struct be_eq_obj *eqo)
1616 return num; 1675 return num;
1617} 1676}
1618 1677
1619static int event_handle(struct be_eq_obj *eqo)
1620{
1621 bool rearm = false;
1622 int num = events_get(eqo);
1623
1624 /* Deal with any spurious interrupts that come without events */
1625 if (!num)
1626 rearm = true;
1627
1628 if (num || msix_enabled(eqo->adapter))
1629 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1630
1631 if (num)
1632 napi_schedule(&eqo->napi);
1633
1634 return num;
1635}
1636
1637/* Leaves the EQ is disarmed state */ 1678/* Leaves the EQ is disarmed state */
1638static void be_eq_clean(struct be_eq_obj *eqo) 1679static void be_eq_clean(struct be_eq_obj *eqo)
1639{ 1680{
@@ -1837,12 +1878,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
1837 1878
1838static int be_num_txqs_want(struct be_adapter *adapter) 1879static int be_num_txqs_want(struct be_adapter *adapter)
1839{ 1880{
1840 if (sriov_want(adapter) || be_is_mc(adapter) || 1881 if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1841 lancer_chip(adapter) || !be_physfn(adapter) || 1882 be_is_mc(adapter) ||
1842 adapter->generation == BE_GEN2) 1883 (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1884 BE2_chip(adapter))
1843 return 1; 1885 return 1;
1844 else 1886 else
1845 return MAX_TX_QS; 1887 return adapter->max_tx_queues;
1846} 1888}
1847 1889
1848static int be_tx_cqs_create(struct be_adapter *adapter) 1890static int be_tx_cqs_create(struct be_adapter *adapter)
@@ -1954,22 +1996,31 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
1954 1996
1955static irqreturn_t be_intx(int irq, void *dev) 1997static irqreturn_t be_intx(int irq, void *dev)
1956{ 1998{
1957 struct be_adapter *adapter = dev; 1999 struct be_eq_obj *eqo = dev;
1958 int num_evts; 2000 struct be_adapter *adapter = eqo->adapter;
2001 int num_evts = 0;
1959 2002
1960 /* With INTx only one EQ is used */ 2003 /* On Lancer, clear-intr bit of the EQ DB does not work.
1961 num_evts = event_handle(&adapter->eq_obj[0]); 2004 * INTx is de-asserted only on notifying num evts.
1962 if (num_evts) 2005 */
1963 return IRQ_HANDLED; 2006 if (lancer_chip(adapter))
1964 else 2007 num_evts = events_get(eqo);
1965 return IRQ_NONE; 2008
2009 /* The EQ-notify may not de-assert INTx rightaway, causing
2010 * the ISR to be invoked again. So, return HANDLED even when
2011 * num_evts is zero.
2012 */
2013 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2014 napi_schedule(&eqo->napi);
2015 return IRQ_HANDLED;
1966} 2016}
1967 2017
1968static irqreturn_t be_msix(int irq, void *dev) 2018static irqreturn_t be_msix(int irq, void *dev)
1969{ 2019{
1970 struct be_eq_obj *eqo = dev; 2020 struct be_eq_obj *eqo = dev;
1971 2021
1972 event_handle(eqo); 2022 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2023 napi_schedule(&eqo->napi);
1973 return IRQ_HANDLED; 2024 return IRQ_HANDLED;
1974} 2025}
1975 2026
@@ -2065,9 +2116,11 @@ int be_poll(struct napi_struct *napi, int budget)
2065{ 2116{
2066 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); 2117 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2067 struct be_adapter *adapter = eqo->adapter; 2118 struct be_adapter *adapter = eqo->adapter;
2068 int max_work = 0, work, i; 2119 int max_work = 0, work, i, num_evts;
2069 bool tx_done; 2120 bool tx_done;
2070 2121
2122 num_evts = events_get(eqo);
2123
2071 /* Process all TXQs serviced by this EQ */ 2124 /* Process all TXQs serviced by this EQ */
2072 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) { 2125 for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2073 tx_done = be_process_tx(adapter, &adapter->tx_obj[i], 2126 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
@@ -2090,10 +2143,10 @@ int be_poll(struct napi_struct *napi, int budget)
2090 2143
2091 if (max_work < budget) { 2144 if (max_work < budget) {
2092 napi_complete(napi); 2145 napi_complete(napi);
2093 be_eq_notify(adapter, eqo->q.id, true, false, 0); 2146 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2094 } else { 2147 } else {
2095 /* As we'll continue in polling mode, count and clear events */ 2148 /* As we'll continue in polling mode, count and clear events */
2096 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo)); 2149 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2097 } 2150 }
2098 return max_work; 2151 return max_work;
2099} 2152}
@@ -2177,9 +2230,11 @@ static void be_msix_disable(struct be_adapter *adapter)
2177static uint be_num_rss_want(struct be_adapter *adapter) 2230static uint be_num_rss_want(struct be_adapter *adapter)
2178{ 2231{
2179 u32 num = 0; 2232 u32 num = 0;
2233
2180 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && 2234 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2181 !sriov_want(adapter) && be_physfn(adapter)) { 2235 (lancer_chip(adapter) ||
2182 num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; 2236 (!sriov_want(adapter) && be_physfn(adapter)))) {
2237 num = adapter->max_rss_queues;
2183 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); 2238 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2184 } 2239 }
2185 return num; 2240 return num;
@@ -2277,10 +2332,10 @@ static int be_irq_register(struct be_adapter *adapter)
2277 return status; 2332 return status;
2278 } 2333 }
2279 2334
2280 /* INTx */ 2335 /* INTx: only the first EQ is used */
2281 netdev->irq = adapter->pdev->irq; 2336 netdev->irq = adapter->pdev->irq;
2282 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name, 2337 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2283 adapter); 2338 &adapter->eq_obj[0]);
2284 if (status) { 2339 if (status) {
2285 dev_err(&adapter->pdev->dev, 2340 dev_err(&adapter->pdev->dev,
2286 "INTx request IRQ failed - err %d\n", status); 2341 "INTx request IRQ failed - err %d\n", status);
@@ -2302,7 +2357,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
2302 2357
2303 /* INTx */ 2358 /* INTx */
2304 if (!msix_enabled(adapter)) { 2359 if (!msix_enabled(adapter)) {
2305 free_irq(netdev->irq, adapter); 2360 free_irq(netdev->irq, &adapter->eq_obj[0]);
2306 goto done; 2361 goto done;
2307 } 2362 }
2308 2363
@@ -2579,10 +2634,30 @@ static int be_clear(struct be_adapter *adapter)
2579 be_tx_queues_destroy(adapter); 2634 be_tx_queues_destroy(adapter);
2580 be_evt_queues_destroy(adapter); 2635 be_evt_queues_destroy(adapter);
2581 2636
2637 kfree(adapter->pmac_id);
2638 adapter->pmac_id = NULL;
2639
2582 be_msix_disable(adapter); 2640 be_msix_disable(adapter);
2583 return 0; 2641 return 0;
2584} 2642}
2585 2643
2644static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2645 u32 *cap_flags, u8 domain)
2646{
2647 bool profile_present = false;
2648 int status;
2649
2650 if (lancer_chip(adapter)) {
2651 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2652 if (!status)
2653 profile_present = true;
2654 }
2655
2656 if (!profile_present)
2657 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2658 BE_IF_FLAGS_MULTICAST;
2659}
2660
2586static int be_vf_setup_init(struct be_adapter *adapter) 2661static int be_vf_setup_init(struct be_adapter *adapter)
2587{ 2662{
2588 struct be_vf_cfg *vf_cfg; 2663 struct be_vf_cfg *vf_cfg;
@@ -2634,9 +2709,13 @@ static int be_vf_setup(struct be_adapter *adapter)
2634 if (status) 2709 if (status)
2635 goto err; 2710 goto err;
2636 2711
2637 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2638 BE_IF_FLAGS_MULTICAST;
2639 for_all_vfs(adapter, vf_cfg, vf) { 2712 for_all_vfs(adapter, vf_cfg, vf) {
2713 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2714
2715 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2716 BE_IF_FLAGS_BROADCAST |
2717 BE_IF_FLAGS_MULTICAST);
2718
2640 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2719 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2641 &vf_cfg->if_handle, vf + 1); 2720 &vf_cfg->if_handle, vf + 1);
2642 if (status) 2721 if (status)
@@ -2661,6 +2740,8 @@ static int be_vf_setup(struct be_adapter *adapter)
2661 if (status) 2740 if (status)
2662 goto err; 2741 goto err;
2663 vf_cfg->def_vid = def_vlan; 2742 vf_cfg->def_vid = def_vlan;
2743
2744 be_cmd_enable_vf(adapter, vf + 1);
2664 } 2745 }
2665 return 0; 2746 return 0;
2666err: 2747err:
@@ -2674,7 +2755,10 @@ static void be_setup_init(struct be_adapter *adapter)
2674 adapter->if_handle = -1; 2755 adapter->if_handle = -1;
2675 adapter->be3_native = false; 2756 adapter->be3_native = false;
2676 adapter->promiscuous = false; 2757 adapter->promiscuous = false;
2677 adapter->eq_next_idx = 0; 2758 if (be_physfn(adapter))
2759 adapter->cmd_privileges = MAX_PRIVILEGES;
2760 else
2761 adapter->cmd_privileges = MIN_PRIVILEGES;
2678} 2762}
2679 2763
2680static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle, 2764static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
@@ -2712,12 +2796,93 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2712 return status; 2796 return status;
2713} 2797}
2714 2798
2799static void be_get_resources(struct be_adapter *adapter)
2800{
2801 int status;
2802 bool profile_present = false;
2803
2804 if (lancer_chip(adapter)) {
2805 status = be_cmd_get_func_config(adapter);
2806
2807 if (!status)
2808 profile_present = true;
2809 }
2810
2811 if (profile_present) {
2812 /* Sanity fixes for Lancer */
2813 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2814 BE_UC_PMAC_COUNT);
2815 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2816 BE_NUM_VLANS_SUPPORTED);
2817 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2818 BE_MAX_MC);
2819 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2820 MAX_TX_QS);
2821 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2822 BE3_MAX_RSS_QS);
2823 adapter->max_event_queues = min_t(u16,
2824 adapter->max_event_queues,
2825 BE3_MAX_RSS_QS);
2826
2827 if (adapter->max_rss_queues &&
2828 adapter->max_rss_queues == adapter->max_rx_queues)
2829 adapter->max_rss_queues -= 1;
2830
2831 if (adapter->max_event_queues < adapter->max_rss_queues)
2832 adapter->max_rss_queues = adapter->max_event_queues;
2833
2834 } else {
2835 if (be_physfn(adapter))
2836 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2837 else
2838 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2839
2840 if (adapter->function_mode & FLEX10_MODE)
2841 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2842 else
2843 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2844
2845 adapter->max_mcast_mac = BE_MAX_MC;
2846 adapter->max_tx_queues = MAX_TX_QS;
2847 adapter->max_rss_queues = (adapter->be3_native) ?
2848 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2849 adapter->max_event_queues = BE3_MAX_RSS_QS;
2850
2851 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2852 BE_IF_FLAGS_BROADCAST |
2853 BE_IF_FLAGS_MULTICAST |
2854 BE_IF_FLAGS_PASS_L3L4_ERRORS |
2855 BE_IF_FLAGS_MCAST_PROMISCUOUS |
2856 BE_IF_FLAGS_VLAN_PROMISCUOUS |
2857 BE_IF_FLAGS_PROMISCUOUS;
2858
2859 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2860 adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2861 }
2862}
2863
2715/* Routine to query per function resource limits */ 2864/* Routine to query per function resource limits */
2716static int be_get_config(struct be_adapter *adapter) 2865static int be_get_config(struct be_adapter *adapter)
2717{ 2866{
2718 int pos; 2867 int pos, status;
2719 u16 dev_num_vfs; 2868 u16 dev_num_vfs;
2720 2869
2870 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2871 &adapter->function_mode,
2872 &adapter->function_caps);
2873 if (status)
2874 goto err;
2875
2876 be_get_resources(adapter);
2877
2878 /* primary mac needs 1 pmac entry */
2879 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2880 sizeof(u32), GFP_KERNEL);
2881 if (!adapter->pmac_id) {
2882 status = -ENOMEM;
2883 goto err;
2884 }
2885
2721 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV); 2886 pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2722 if (pos) { 2887 if (pos) {
2723 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, 2888 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
@@ -2726,13 +2891,14 @@ static int be_get_config(struct be_adapter *adapter)
2726 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS); 2891 dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2727 adapter->dev_num_vfs = dev_num_vfs; 2892 adapter->dev_num_vfs = dev_num_vfs;
2728 } 2893 }
2729 return 0; 2894err:
2895 return status;
2730} 2896}
2731 2897
2732static int be_setup(struct be_adapter *adapter) 2898static int be_setup(struct be_adapter *adapter)
2733{ 2899{
2734 struct device *dev = &adapter->pdev->dev; 2900 struct device *dev = &adapter->pdev->dev;
2735 u32 cap_flags, en_flags; 2901 u32 en_flags;
2736 u32 tx_fc, rx_fc; 2902 u32 tx_fc, rx_fc;
2737 int status; 2903 int status;
2738 u8 mac[ETH_ALEN]; 2904 u8 mac[ETH_ALEN];
@@ -2740,9 +2906,12 @@ static int be_setup(struct be_adapter *adapter)
2740 2906
2741 be_setup_init(adapter); 2907 be_setup_init(adapter);
2742 2908
2743 be_get_config(adapter); 2909 if (!lancer_chip(adapter))
2910 be_cmd_req_native_mode(adapter);
2744 2911
2745 be_cmd_req_native_mode(adapter); 2912 status = be_get_config(adapter);
2913 if (status)
2914 goto err;
2746 2915
2747 be_msix_enable(adapter); 2916 be_msix_enable(adapter);
2748 2917
@@ -2762,24 +2931,22 @@ static int be_setup(struct be_adapter *adapter)
2762 if (status) 2931 if (status)
2763 goto err; 2932 goto err;
2764 2933
2934 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2935 /* In UMC mode FW does not return right privileges.
2936 * Override with correct privilege equivalent to PF.
2937 */
2938 if (be_is_mc(adapter))
2939 adapter->cmd_privileges = MAX_PRIVILEGES;
2940
2765 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 2941 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2766 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; 2942 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2767 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2768 BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2769 2943
2770 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) { 2944 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2771 cap_flags |= BE_IF_FLAGS_RSS;
2772 en_flags |= BE_IF_FLAGS_RSS; 2945 en_flags |= BE_IF_FLAGS_RSS;
2773 }
2774 2946
2775 if (lancer_chip(adapter) && !be_physfn(adapter)) { 2947 en_flags = en_flags & adapter->if_cap_flags;
2776 en_flags = BE_IF_FLAGS_UNTAGGED |
2777 BE_IF_FLAGS_BROADCAST |
2778 BE_IF_FLAGS_MULTICAST;
2779 cap_flags = en_flags;
2780 }
2781 2948
2782 status = be_cmd_if_create(adapter, cap_flags, en_flags, 2949 status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2783 &adapter->if_handle, 0); 2950 &adapter->if_handle, 0);
2784 if (status != 0) 2951 if (status != 0)
2785 goto err; 2952 goto err;
@@ -2827,8 +2994,8 @@ static int be_setup(struct be_adapter *adapter)
2827 dev_warn(dev, "device doesn't support SRIOV\n"); 2994 dev_warn(dev, "device doesn't support SRIOV\n");
2828 } 2995 }
2829 2996
2830 be_cmd_get_phy_info(adapter); 2997 status = be_cmd_get_phy_info(adapter);
2831 if (be_pause_supported(adapter)) 2998 if (!status && be_pause_supported(adapter))
2832 adapter->phy.fc_autoneg = 1; 2999 adapter->phy.fc_autoneg = 1;
2833 3000
2834 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 3001 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
@@ -2846,8 +3013,10 @@ static void be_netpoll(struct net_device *netdev)
2846 struct be_eq_obj *eqo; 3013 struct be_eq_obj *eqo;
2847 int i; 3014 int i;
2848 3015
2849 for_all_evt_queues(adapter, eqo, i) 3016 for_all_evt_queues(adapter, eqo, i) {
2850 event_handle(eqo); 3017 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3018 napi_schedule(&eqo->napi);
3019 }
2851 3020
2852 return; 3021 return;
2853} 3022}
@@ -2895,7 +3064,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
2895 int i = 0, img_type = 0; 3064 int i = 0, img_type = 0;
2896 struct flash_section_info_g2 *fsec_g2 = NULL; 3065 struct flash_section_info_g2 *fsec_g2 = NULL;
2897 3066
2898 if (adapter->generation != BE_GEN3) 3067 if (BE2_chip(adapter))
2899 fsec_g2 = (struct flash_section_info_g2 *)fsec; 3068 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2900 3069
2901 for (i = 0; i < MAX_FLASH_COMP; i++) { 3070 for (i = 0; i < MAX_FLASH_COMP; i++) {
@@ -2928,7 +3097,49 @@ struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2928 return NULL; 3097 return NULL;
2929} 3098}
2930 3099
2931static int be_flash_data(struct be_adapter *adapter, 3100static int be_flash(struct be_adapter *adapter, const u8 *img,
3101 struct be_dma_mem *flash_cmd, int optype, int img_size)
3102{
3103 u32 total_bytes = 0, flash_op, num_bytes = 0;
3104 int status = 0;
3105 struct be_cmd_write_flashrom *req = flash_cmd->va;
3106
3107 total_bytes = img_size;
3108 while (total_bytes) {
3109 num_bytes = min_t(u32, 32*1024, total_bytes);
3110
3111 total_bytes -= num_bytes;
3112
3113 if (!total_bytes) {
3114 if (optype == OPTYPE_PHY_FW)
3115 flash_op = FLASHROM_OPER_PHY_FLASH;
3116 else
3117 flash_op = FLASHROM_OPER_FLASH;
3118 } else {
3119 if (optype == OPTYPE_PHY_FW)
3120 flash_op = FLASHROM_OPER_PHY_SAVE;
3121 else
3122 flash_op = FLASHROM_OPER_SAVE;
3123 }
3124
3125 memcpy(req->data_buf, img, num_bytes);
3126 img += num_bytes;
3127 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3128 flash_op, num_bytes);
3129 if (status) {
3130 if (status == ILLEGAL_IOCTL_REQ &&
3131 optype == OPTYPE_PHY_FW)
3132 break;
3133 dev_err(&adapter->pdev->dev,
3134 "cmd to write to flash rom failed.\n");
3135 return status;
3136 }
3137 }
3138 return 0;
3139}
3140
3141/* For BE2 and BE3 */
3142static int be_flash_BEx(struct be_adapter *adapter,
2932 const struct firmware *fw, 3143 const struct firmware *fw,
2933 struct be_dma_mem *flash_cmd, 3144 struct be_dma_mem *flash_cmd,
2934 int num_of_images) 3145 int num_of_images)
@@ -2936,12 +3147,9 @@ static int be_flash_data(struct be_adapter *adapter,
2936{ 3147{
2937 int status = 0, i, filehdr_size = 0; 3148 int status = 0, i, filehdr_size = 0;
2938 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); 3149 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2939 u32 total_bytes = 0, flash_op;
2940 int num_bytes;
2941 const u8 *p = fw->data; 3150 const u8 *p = fw->data;
2942 struct be_cmd_write_flashrom *req = flash_cmd->va;
2943 const struct flash_comp *pflashcomp; 3151 const struct flash_comp *pflashcomp;
2944 int num_comp, hdr_size; 3152 int num_comp, redboot;
2945 struct flash_section_info *fsec = NULL; 3153 struct flash_section_info *fsec = NULL;
2946 3154
2947 struct flash_comp gen3_flash_types[] = { 3155 struct flash_comp gen3_flash_types[] = {
@@ -2986,7 +3194,7 @@ static int be_flash_data(struct be_adapter *adapter,
2986 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE} 3194 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2987 }; 3195 };
2988 3196
2989 if (adapter->generation == BE_GEN3) { 3197 if (BE3_chip(adapter)) {
2990 pflashcomp = gen3_flash_types; 3198 pflashcomp = gen3_flash_types;
2991 filehdr_size = sizeof(struct flash_file_hdr_g3); 3199 filehdr_size = sizeof(struct flash_file_hdr_g3);
2992 num_comp = ARRAY_SIZE(gen3_flash_types); 3200 num_comp = ARRAY_SIZE(gen3_flash_types);
@@ -2995,6 +3203,7 @@ static int be_flash_data(struct be_adapter *adapter,
2995 filehdr_size = sizeof(struct flash_file_hdr_g2); 3203 filehdr_size = sizeof(struct flash_file_hdr_g2);
2996 num_comp = ARRAY_SIZE(gen2_flash_types); 3204 num_comp = ARRAY_SIZE(gen2_flash_types);
2997 } 3205 }
3206
2998 /* Get flash section info*/ 3207 /* Get flash section info*/
2999 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 3208 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3000 if (!fsec) { 3209 if (!fsec) {
@@ -3010,70 +3219,105 @@ static int be_flash_data(struct be_adapter *adapter,
3010 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) 3219 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3011 continue; 3220 continue;
3012 3221
3013 if (pflashcomp[i].optype == OPTYPE_PHY_FW) { 3222 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3014 if (!phy_flashing_required(adapter)) 3223 !phy_flashing_required(adapter))
3015 continue; 3224 continue;
3016 }
3017
3018 hdr_size = filehdr_size +
3019 (num_of_images * sizeof(struct image_hdr));
3020 3225
3021 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) && 3226 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3022 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset, 3227 redboot = be_flash_redboot(adapter, fw->data,
3023 pflashcomp[i].size, hdr_size))) 3228 pflashcomp[i].offset, pflashcomp[i].size,
3024 continue; 3229 filehdr_size + img_hdrs_size);
3230 if (!redboot)
3231 continue;
3232 }
3025 3233
3026 /* Flash the component */
3027 p = fw->data; 3234 p = fw->data;
3028 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size; 3235 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3029 if (p + pflashcomp[i].size > fw->data + fw->size) 3236 if (p + pflashcomp[i].size > fw->data + fw->size)
3030 return -1; 3237 return -1;
3031 total_bytes = pflashcomp[i].size; 3238
3032 while (total_bytes) { 3239 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3033 if (total_bytes > 32*1024) 3240 pflashcomp[i].size);
3034 num_bytes = 32*1024; 3241 if (status) {
3035 else 3242 dev_err(&adapter->pdev->dev,
3036 num_bytes = total_bytes; 3243 "Flashing section type %d failed.\n",
3037 total_bytes -= num_bytes; 3244 pflashcomp[i].img_type);
3038 if (!total_bytes) { 3245 return status;
3039 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3040 flash_op = FLASHROM_OPER_PHY_FLASH;
3041 else
3042 flash_op = FLASHROM_OPER_FLASH;
3043 } else {
3044 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
3045 flash_op = FLASHROM_OPER_PHY_SAVE;
3046 else
3047 flash_op = FLASHROM_OPER_SAVE;
3048 }
3049 memcpy(req->params.data_buf, p, num_bytes);
3050 p += num_bytes;
3051 status = be_cmd_write_flashrom(adapter, flash_cmd,
3052 pflashcomp[i].optype, flash_op, num_bytes);
3053 if (status) {
3054 if ((status == ILLEGAL_IOCTL_REQ) &&
3055 (pflashcomp[i].optype ==
3056 OPTYPE_PHY_FW))
3057 break;
3058 dev_err(&adapter->pdev->dev,
3059 "cmd to write to flash rom failed.\n");
3060 return -1;
3061 }
3062 } 3246 }
3063 } 3247 }
3064 return 0; 3248 return 0;
3065} 3249}
3066 3250
3067static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr) 3251static int be_flash_skyhawk(struct be_adapter *adapter,
3252 const struct firmware *fw,
3253 struct be_dma_mem *flash_cmd, int num_of_images)
3068{ 3254{
3069 if (fhdr == NULL) 3255 int status = 0, i, filehdr_size = 0;
3070 return 0; 3256 int img_offset, img_size, img_optype, redboot;
3071 if (fhdr->build[0] == '3') 3257 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3072 return BE_GEN3; 3258 const u8 *p = fw->data;
3073 else if (fhdr->build[0] == '2') 3259 struct flash_section_info *fsec = NULL;
3074 return BE_GEN2; 3260
3075 else 3261 filehdr_size = sizeof(struct flash_file_hdr_g3);
3076 return 0; 3262 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3263 if (!fsec) {
3264 dev_err(&adapter->pdev->dev,
3265 "Invalid Cookie. UFI corrupted ?\n");
3266 return -1;
3267 }
3268
3269 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3270 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3271 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3272
3273 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3274 case IMAGE_FIRMWARE_iSCSI:
3275 img_optype = OPTYPE_ISCSI_ACTIVE;
3276 break;
3277 case IMAGE_BOOT_CODE:
3278 img_optype = OPTYPE_REDBOOT;
3279 break;
3280 case IMAGE_OPTION_ROM_ISCSI:
3281 img_optype = OPTYPE_BIOS;
3282 break;
3283 case IMAGE_OPTION_ROM_PXE:
3284 img_optype = OPTYPE_PXE_BIOS;
3285 break;
3286 case IMAGE_OPTION_ROM_FCoE:
3287 img_optype = OPTYPE_FCOE_BIOS;
3288 break;
3289 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3290 img_optype = OPTYPE_ISCSI_BACKUP;
3291 break;
3292 case IMAGE_NCSI:
3293 img_optype = OPTYPE_NCSI_FW;
3294 break;
3295 default:
3296 continue;
3297 }
3298
3299 if (img_optype == OPTYPE_REDBOOT) {
3300 redboot = be_flash_redboot(adapter, fw->data,
3301 img_offset, img_size,
3302 filehdr_size + img_hdrs_size);
3303 if (!redboot)
3304 continue;
3305 }
3306
3307 p = fw->data;
3308 p += filehdr_size + img_offset + img_hdrs_size;
3309 if (p + img_size > fw->data + fw->size)
3310 return -1;
3311
3312 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3313 if (status) {
3314 dev_err(&adapter->pdev->dev,
3315 "Flashing section type %d failed.\n",
3316 fsec->fsec_entry[i].type);
3317 return status;
3318 }
3319 }
3320 return 0;
3077} 3321}
3078 3322
3079static int lancer_wait_idle(struct be_adapter *adapter) 3323static int lancer_wait_idle(struct be_adapter *adapter)
@@ -3207,6 +3451,28 @@ lancer_fw_exit:
3207 return status; 3451 return status;
3208} 3452}
3209 3453
3454#define UFI_TYPE2 2
3455#define UFI_TYPE3 3
3456#define UFI_TYPE4 4
3457static int be_get_ufi_type(struct be_adapter *adapter,
3458 struct flash_file_hdr_g2 *fhdr)
3459{
3460 if (fhdr == NULL)
3461 goto be_get_ufi_exit;
3462
3463 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3464 return UFI_TYPE4;
3465 else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3466 return UFI_TYPE3;
3467 else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3468 return UFI_TYPE2;
3469
3470be_get_ufi_exit:
3471 dev_err(&adapter->pdev->dev,
3472 "UFI and Interface are not compatible for flashing\n");
3473 return -1;
3474}
3475
3210static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) 3476static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3211{ 3477{
3212 struct flash_file_hdr_g2 *fhdr; 3478 struct flash_file_hdr_g2 *fhdr;
@@ -3214,12 +3480,9 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3214 struct image_hdr *img_hdr_ptr = NULL; 3480 struct image_hdr *img_hdr_ptr = NULL;
3215 struct be_dma_mem flash_cmd; 3481 struct be_dma_mem flash_cmd;
3216 const u8 *p; 3482 const u8 *p;
3217 int status = 0, i = 0, num_imgs = 0; 3483 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3218
3219 p = fw->data;
3220 fhdr = (struct flash_file_hdr_g2 *) p;
3221 3484
3222 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; 3485 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3223 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 3486 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3224 &flash_cmd.dma, GFP_KERNEL); 3487 &flash_cmd.dma, GFP_KERNEL);
3225 if (!flash_cmd.va) { 3488 if (!flash_cmd.va) {
@@ -3229,27 +3492,32 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3229 goto be_fw_exit; 3492 goto be_fw_exit;
3230 } 3493 }
3231 3494
3232 if ((adapter->generation == BE_GEN3) && 3495 p = fw->data;
3233 (get_ufigen_type(fhdr) == BE_GEN3)) { 3496 fhdr = (struct flash_file_hdr_g2 *)p;
3234 fhdr3 = (struct flash_file_hdr_g3 *) fw->data; 3497
3235 num_imgs = le32_to_cpu(fhdr3->num_imgs); 3498 ufi_type = be_get_ufi_type(adapter, fhdr);
3236 for (i = 0; i < num_imgs; i++) { 3499
3237 img_hdr_ptr = (struct image_hdr *) (fw->data + 3500 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3238 (sizeof(struct flash_file_hdr_g3) + 3501 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3239 i * sizeof(struct image_hdr))); 3502 for (i = 0; i < num_imgs; i++) {
3240 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) 3503 img_hdr_ptr = (struct image_hdr *)(fw->data +
3241 status = be_flash_data(adapter, fw, &flash_cmd, 3504 (sizeof(struct flash_file_hdr_g3) +
3242 num_imgs); 3505 i * sizeof(struct image_hdr)));
3506 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3507 if (ufi_type == UFI_TYPE4)
3508 status = be_flash_skyhawk(adapter, fw,
3509 &flash_cmd, num_imgs);
3510 else if (ufi_type == UFI_TYPE3)
3511 status = be_flash_BEx(adapter, fw, &flash_cmd,
3512 num_imgs);
3243 } 3513 }
3244 } else if ((adapter->generation == BE_GEN2) &&
3245 (get_ufigen_type(fhdr) == BE_GEN2)) {
3246 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3247 } else {
3248 dev_err(&adapter->pdev->dev,
3249 "UFI and Interface are not compatible for flashing\n");
3250 status = -1;
3251 } 3514 }
3252 3515
3516 if (ufi_type == UFI_TYPE2)
3517 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3518 else if (ufi_type == -1)
3519 status = -1;
3520
3253 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, 3521 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3254 flash_cmd.dma); 3522 flash_cmd.dma);
3255 if (status) { 3523 if (status) {
@@ -3344,80 +3612,47 @@ static void be_netdev_init(struct net_device *netdev)
3344 3612
3345static void be_unmap_pci_bars(struct be_adapter *adapter) 3613static void be_unmap_pci_bars(struct be_adapter *adapter)
3346{ 3614{
3347 if (adapter->csr)
3348 iounmap(adapter->csr);
3349 if (adapter->db) 3615 if (adapter->db)
3350 iounmap(adapter->db); 3616 pci_iounmap(adapter->pdev, adapter->db);
3351 if (adapter->roce_db.base)
3352 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3353} 3617}
3354 3618
3355static int lancer_roce_map_pci_bars(struct be_adapter *adapter) 3619static int db_bar(struct be_adapter *adapter)
3356{ 3620{
3357 struct pci_dev *pdev = adapter->pdev; 3621 if (lancer_chip(adapter) || !be_physfn(adapter))
3358 u8 __iomem *addr; 3622 return 0;
3359 3623 else
3360 addr = pci_iomap(pdev, 2, 0); 3624 return 4;
3361 if (addr == NULL) 3625}
3362 return -ENOMEM;
3363 3626
3364 adapter->roce_db.base = addr; 3627static int be_roce_map_pci_bars(struct be_adapter *adapter)
3365 adapter->roce_db.io_addr = pci_resource_start(pdev, 2); 3628{
3366 adapter->roce_db.size = 8192; 3629 if (skyhawk_chip(adapter)) {
3367 adapter->roce_db.total_size = pci_resource_len(pdev, 2); 3630 adapter->roce_db.size = 4096;
3631 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3632 db_bar(adapter));
3633 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3634 db_bar(adapter));
3635 }
3368 return 0; 3636 return 0;
3369} 3637}
3370 3638
3371static int be_map_pci_bars(struct be_adapter *adapter) 3639static int be_map_pci_bars(struct be_adapter *adapter)
3372{ 3640{
3373 u8 __iomem *addr; 3641 u8 __iomem *addr;
3374 int db_reg; 3642 u32 sli_intf;
3375 3643
3376 if (lancer_chip(adapter)) { 3644 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3377 if (be_type_2_3(adapter)) { 3645 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3378 addr = ioremap_nocache( 3646 SLI_INTF_IF_TYPE_SHIFT;
3379 pci_resource_start(adapter->pdev, 0),
3380 pci_resource_len(adapter->pdev, 0));
3381 if (addr == NULL)
3382 return -ENOMEM;
3383 adapter->db = addr;
3384 }
3385 if (adapter->if_type == SLI_INTF_TYPE_3) {
3386 if (lancer_roce_map_pci_bars(adapter))
3387 goto pci_map_err;
3388 }
3389 return 0;
3390 }
3391
3392 if (be_physfn(adapter)) {
3393 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3394 pci_resource_len(adapter->pdev, 2));
3395 if (addr == NULL)
3396 return -ENOMEM;
3397 adapter->csr = addr;
3398 }
3399 3647
3400 if (adapter->generation == BE_GEN2) { 3648 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3401 db_reg = 4;
3402 } else {
3403 if (be_physfn(adapter))
3404 db_reg = 4;
3405 else
3406 db_reg = 0;
3407 }
3408 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3409 pci_resource_len(adapter->pdev, db_reg));
3410 if (addr == NULL) 3649 if (addr == NULL)
3411 goto pci_map_err; 3650 goto pci_map_err;
3412 adapter->db = addr; 3651 adapter->db = addr;
3413 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) { 3652
3414 adapter->roce_db.size = 4096; 3653 be_roce_map_pci_bars(adapter);
3415 adapter->roce_db.io_addr =
3416 pci_resource_start(adapter->pdev, db_reg);
3417 adapter->roce_db.total_size =
3418 pci_resource_len(adapter->pdev, db_reg);
3419 }
3420 return 0; 3654 return 0;
3655
3421pci_map_err: 3656pci_map_err:
3422 be_unmap_pci_bars(adapter); 3657 be_unmap_pci_bars(adapter);
3423 return -ENOMEM; 3658 return -ENOMEM;
@@ -3437,7 +3672,6 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
3437 if (mem->va) 3672 if (mem->va)
3438 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, 3673 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3439 mem->dma); 3674 mem->dma);
3440 kfree(adapter->pmac_id);
3441} 3675}
3442 3676
3443static int be_ctrl_init(struct be_adapter *adapter) 3677static int be_ctrl_init(struct be_adapter *adapter)
@@ -3445,8 +3679,14 @@ static int be_ctrl_init(struct be_adapter *adapter)
3445 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; 3679 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3446 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; 3680 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3447 struct be_dma_mem *rx_filter = &adapter->rx_filter; 3681 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3682 u32 sli_intf;
3448 int status; 3683 int status;
3449 3684
3685 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3686 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3687 SLI_INTF_FAMILY_SHIFT;
3688 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3689
3450 status = be_map_pci_bars(adapter); 3690 status = be_map_pci_bars(adapter);
3451 if (status) 3691 if (status)
3452 goto done; 3692 goto done;
@@ -3473,13 +3713,6 @@ static int be_ctrl_init(struct be_adapter *adapter)
3473 goto free_mbox; 3713 goto free_mbox;
3474 } 3714 }
3475 memset(rx_filter->va, 0, rx_filter->size); 3715 memset(rx_filter->va, 0, rx_filter->size);
3476
3477 /* primary mac needs 1 pmac entry */
3478 adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3479 sizeof(*adapter->pmac_id), GFP_KERNEL);
3480 if (!adapter->pmac_id)
3481 return -ENOMEM;
3482
3483 mutex_init(&adapter->mbox_lock); 3716 mutex_init(&adapter->mbox_lock);
3484 spin_lock_init(&adapter->mcc_lock); 3717 spin_lock_init(&adapter->mcc_lock);
3485 spin_lock_init(&adapter->mcc_cq_lock); 3718 spin_lock_init(&adapter->mcc_cq_lock);
@@ -3512,14 +3745,14 @@ static int be_stats_init(struct be_adapter *adapter)
3512{ 3745{
3513 struct be_dma_mem *cmd = &adapter->stats_cmd; 3746 struct be_dma_mem *cmd = &adapter->stats_cmd;
3514 3747
3515 if (adapter->generation == BE_GEN2) { 3748 if (lancer_chip(adapter))
3749 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3750 else if (BE2_chip(adapter))
3516 cmd->size = sizeof(struct be_cmd_req_get_stats_v0); 3751 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3517 } else { 3752 else
3518 if (lancer_chip(adapter)) 3753 /* BE3 and Skyhawk */
3519 cmd->size = sizeof(struct lancer_cmd_req_pport_stats); 3754 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3520 else 3755
3521 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3522 }
3523 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 3756 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3524 GFP_KERNEL); 3757 GFP_KERNEL);
3525 if (cmd->va == NULL) 3758 if (cmd->va == NULL)
@@ -3528,7 +3761,7 @@ static int be_stats_init(struct be_adapter *adapter)
3528 return 0; 3761 return 0;
3529} 3762}
3530 3763
3531static void __devexit be_remove(struct pci_dev *pdev) 3764static void be_remove(struct pci_dev *pdev)
3532{ 3765{
3533 struct be_adapter *adapter = pci_get_drvdata(pdev); 3766 struct be_adapter *adapter = pci_get_drvdata(pdev);
3534 3767
@@ -3573,6 +3806,9 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
3573 u32 level = 0; 3806 u32 level = 0;
3574 int j; 3807 int j;
3575 3808
3809 if (lancer_chip(adapter))
3810 return 0;
3811
3576 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 3812 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3577 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 3813 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3578 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, 3814 extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
@@ -3598,26 +3834,12 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
3598err: 3834err:
3599 return level; 3835 return level;
3600} 3836}
3837
3601static int be_get_initial_config(struct be_adapter *adapter) 3838static int be_get_initial_config(struct be_adapter *adapter)
3602{ 3839{
3603 int status; 3840 int status;
3604 u32 level; 3841 u32 level;
3605 3842
3606 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3607 &adapter->function_mode, &adapter->function_caps);
3608 if (status)
3609 return status;
3610
3611 if (adapter->function_mode & FLEX10_MODE)
3612 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3613 else
3614 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3615
3616 if (be_physfn(adapter))
3617 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3618 else
3619 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3620
3621 status = be_cmd_get_cntl_attributes(adapter); 3843 status = be_cmd_get_cntl_attributes(adapter);
3622 if (status) 3844 if (status)
3623 return status; 3845 return status;
@@ -3642,55 +3864,6 @@ static int be_get_initial_config(struct be_adapter *adapter)
3642 return 0; 3864 return 0;
3643} 3865}
3644 3866
3645static int be_dev_type_check(struct be_adapter *adapter)
3646{
3647 struct pci_dev *pdev = adapter->pdev;
3648 u32 sli_intf = 0, if_type;
3649
3650 switch (pdev->device) {
3651 case BE_DEVICE_ID1:
3652 case OC_DEVICE_ID1:
3653 adapter->generation = BE_GEN2;
3654 break;
3655 case BE_DEVICE_ID2:
3656 case OC_DEVICE_ID2:
3657 adapter->generation = BE_GEN3;
3658 break;
3659 case OC_DEVICE_ID3:
3660 case OC_DEVICE_ID4:
3661 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3662 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3663 SLI_INTF_IF_TYPE_SHIFT;
3664 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3665 SLI_INTF_IF_TYPE_SHIFT;
3666 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3667 !be_type_2_3(adapter)) {
3668 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3669 return -EINVAL;
3670 }
3671 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3672 SLI_INTF_FAMILY_SHIFT);
3673 adapter->generation = BE_GEN3;
3674 break;
3675 case OC_DEVICE_ID5:
3676 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3677 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3678 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3679 return -EINVAL;
3680 }
3681 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3682 SLI_INTF_FAMILY_SHIFT);
3683 adapter->generation = BE_GEN3;
3684 break;
3685 default:
3686 adapter->generation = 0;
3687 }
3688
3689 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3690 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3691 return 0;
3692}
3693
3694static int lancer_recover_func(struct be_adapter *adapter) 3867static int lancer_recover_func(struct be_adapter *adapter)
3695{ 3868{
3696 int status; 3869 int status;
@@ -3721,8 +3894,9 @@ static int lancer_recover_func(struct be_adapter *adapter)
3721 "Adapter SLIPORT recovery succeeded\n"); 3894 "Adapter SLIPORT recovery succeeded\n");
3722 return 0; 3895 return 0;
3723err: 3896err:
3724 dev_err(&adapter->pdev->dev, 3897 if (adapter->eeh_error)
3725 "Adapter SLIPORT recovery failed\n"); 3898 dev_err(&adapter->pdev->dev,
3899 "Adapter SLIPORT recovery failed\n");
3726 3900
3727 return status; 3901 return status;
3728} 3902}
@@ -3820,8 +3994,7 @@ static inline char *func_name(struct be_adapter *adapter)
3820 return be_physfn(adapter) ? "PF" : "VF"; 3994 return be_physfn(adapter) ? "PF" : "VF";
3821} 3995}
3822 3996
3823static int __devinit be_probe(struct pci_dev *pdev, 3997static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
3824 const struct pci_device_id *pdev_id)
3825{ 3998{
3826 int status = 0; 3999 int status = 0;
3827 struct be_adapter *adapter; 4000 struct be_adapter *adapter;
@@ -3845,11 +4018,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
3845 adapter = netdev_priv(netdev); 4018 adapter = netdev_priv(netdev);
3846 adapter->pdev = pdev; 4019 adapter->pdev = pdev;
3847 pci_set_drvdata(pdev, adapter); 4020 pci_set_drvdata(pdev, adapter);
3848
3849 status = be_dev_type_check(adapter);
3850 if (status)
3851 goto free_netdev;
3852
3853 adapter->netdev = netdev; 4021 adapter->netdev = netdev;
3854 SET_NETDEV_DEV(netdev, &pdev->dev); 4022 SET_NETDEV_DEV(netdev, &pdev->dev);
3855 4023
@@ -4023,9 +4191,6 @@ static void be_shutdown(struct pci_dev *pdev)
4023 4191
4024 netif_device_detach(adapter->netdev); 4192 netif_device_detach(adapter->netdev);
4025 4193
4026 if (adapter->wol)
4027 be_setup_wol(adapter, true);
4028
4029 be_cmd_reset_function(adapter); 4194 be_cmd_reset_function(adapter);
4030 4195
4031 pci_disable_device(pdev); 4196 pci_disable_device(pdev);
@@ -4061,9 +4226,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4061 4226
4062 /* The error could cause the FW to trigger a flash debug dump. 4227 /* The error could cause the FW to trigger a flash debug dump.
4063 * Resetting the card while flash dump is in progress 4228 * Resetting the card while flash dump is in progress
4064 * can cause it not to recover; wait for it to finish 4229 * can cause it not to recover; wait for it to finish.
4230 * Wait only for first function as it is needed only once per
4231 * adapter.
4065 */ 4232 */
4066 ssleep(30); 4233 if (pdev->devfn == 0)
4234 ssleep(30);
4235
4067 return PCI_ERS_RESULT_NEED_RESET; 4236 return PCI_ERS_RESULT_NEED_RESET;
4068} 4237}
4069 4238
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index deecc44b3617..55d32aa0a093 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -47,10 +47,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
47 dev_info.dpp_unmapped_len = 0; 47 dev_info.dpp_unmapped_len = 0;
48 } 48 }
49 dev_info.pdev = adapter->pdev; 49 dev_info.pdev = adapter->pdev;
50 if (adapter->sli_family == SKYHAWK_SLI_FAMILY) 50 dev_info.db = adapter->db;
51 dev_info.db = adapter->db;
52 else
53 dev_info.db = adapter->roce_db.base;
54 dev_info.unmapped_db = adapter->roce_db.io_addr; 51 dev_info.unmapped_db = adapter->roce_db.io_addr;
55 dev_info.db_page_size = adapter->roce_db.size; 52 dev_info.db_page_size = adapter->roce_db.size;
56 dev_info.db_total_size = adapter->roce_db.total_size; 53 dev_info.db_total_size = adapter->roce_db.total_size;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 94b7bfcdb24e..8db1c06008de 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -665,7 +665,7 @@ static void ethoc_mdio_poll(struct net_device *dev)
665{ 665{
666} 666}
667 667
668static int __devinit ethoc_mdio_probe(struct net_device *dev) 668static int ethoc_mdio_probe(struct net_device *dev)
669{ 669{
670 struct ethoc *priv = netdev_priv(dev); 670 struct ethoc *priv = netdev_priv(dev);
671 struct phy_device *phy; 671 struct phy_device *phy;
@@ -905,7 +905,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
905 * ethoc_probe - initialize OpenCores ethernet MAC 905 * ethoc_probe - initialize OpenCores ethernet MAC
906 * pdev: platform device 906 * pdev: platform device
907 */ 907 */
908static int __devinit ethoc_probe(struct platform_device *pdev) 908static int ethoc_probe(struct platform_device *pdev)
909{ 909{
910 struct net_device *netdev = NULL; 910 struct net_device *netdev = NULL;
911 struct resource *res = NULL; 911 struct resource *res = NULL;
@@ -1143,7 +1143,7 @@ out:
1143 * ethoc_remove - shutdown OpenCores ethernet MAC 1143 * ethoc_remove - shutdown OpenCores ethernet MAC
1144 * @pdev: platform device 1144 * @pdev: platform device
1145 */ 1145 */
1146static int __devexit ethoc_remove(struct platform_device *pdev) 1146static int ethoc_remove(struct platform_device *pdev)
1147{ 1147{
1148 struct net_device *netdev = platform_get_drvdata(pdev); 1148 struct net_device *netdev = platform_get_drvdata(pdev);
1149 struct ethoc *priv = netdev_priv(netdev); 1149 struct ethoc *priv = netdev_priv(netdev);
@@ -1190,7 +1190,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match);
1190 1190
1191static struct platform_driver ethoc_driver = { 1191static struct platform_driver ethoc_driver = {
1192 .probe = ethoc_probe, 1192 .probe = ethoc_probe,
1193 .remove = __devexit_p(ethoc_remove), 1193 .remove = ethoc_remove,
1194 .suspend = ethoc_suspend, 1194 .suspend = ethoc_suspend,
1195 .resume = ethoc_resume, 1195 .resume = ethoc_resume,
1196 .driver = { 1196 .driver = {
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 0e4a0ac86aa8..c706b7a9397e 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
92#include <asm/byteorder.h> 92#include <asm/byteorder.h>
93 93
94/* These identify the driver base version and may not be removed. */ 94/* These identify the driver base version and may not be removed. */
95static const char version[] __devinitconst = 95static const char version[] =
96 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 96 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
97 97
98 98
@@ -150,7 +150,7 @@ struct chip_info {
150 int flags; 150 int flags;
151}; 151};
152 152
153static const struct chip_info skel_netdrv_tbl[] __devinitconst = { 153static const struct chip_info skel_netdrv_tbl[] = {
154 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 154 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
155 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR }, 155 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
156 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR }, 156 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
@@ -477,8 +477,8 @@ static const struct net_device_ops netdev_ops = {
477 .ndo_validate_addr = eth_validate_addr, 477 .ndo_validate_addr = eth_validate_addr,
478}; 478};
479 479
480static int __devinit fealnx_init_one(struct pci_dev *pdev, 480static int fealnx_init_one(struct pci_dev *pdev,
481 const struct pci_device_id *ent) 481 const struct pci_device_id *ent)
482{ 482{
483 struct netdev_private *np; 483 struct netdev_private *np;
484 int i, option, err, irq; 484 int i, option, err, irq;
@@ -684,7 +684,7 @@ err_out_res:
684} 684}
685 685
686 686
687static void __devexit fealnx_remove_one(struct pci_dev *pdev) 687static void fealnx_remove_one(struct pci_dev *pdev)
688{ 688{
689 struct net_device *dev = pci_get_drvdata(pdev); 689 struct net_device *dev = pci_get_drvdata(pdev);
690 690
@@ -1950,7 +1950,7 @@ static struct pci_driver fealnx_driver = {
1950 .name = "fealnx", 1950 .name = "fealnx",
1951 .id_table = fealnx_pci_tbl, 1951 .id_table = fealnx_pci_tbl,
1952 .probe = fealnx_init_one, 1952 .probe = fealnx_init_one,
1953 .remove = __devexit_p(fealnx_remove_one), 1953 .remove = fealnx_remove_one,
1954}; 1954};
1955 1955
1956static int __init fealnx_init(void) 1956static int __init fealnx_init(void)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index feff51664dcf..5ba6e1cbd346 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -92,4 +92,13 @@ config GIANFAR
92 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, 92 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
93 and MPC86xx family of chips, and the FEC on the 8540. 93 and MPC86xx family of chips, and the FEC on the 8540.
94 94
95config FEC_PTP
96 bool "PTP Hardware Clock (PHC)"
97 depends on FEC && ARCH_MXC
98 select PTP_1588_CLOCK
99 default y if SOC_IMX6Q
100 --help---
101 Say Y here if you want to use PTP Hardware Clock (PHC) in the
102 driver. Only the basic clock operations have been implemented.
103
95endif # NET_VENDOR_FREESCALE 104endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 3d1839afff65..d4d19b3d00ae 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_FEC) += fec.o 5obj-$(CONFIG_FEC) += fec.o
6obj-$(CONFIG_FEC_PTP) += fec_ptp.o
6obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o 7obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
7ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) 8ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
8 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o 9 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index fffd20528b5d..0704bcab178a 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -140,21 +140,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
140#endif 140#endif
141#endif /* CONFIG_M5272 */ 141#endif /* CONFIG_M5272 */
142 142
143/* The number of Tx and Rx buffers. These are allocated from the page
144 * pool. The code may assume these are power of two, so it it best
145 * to keep them that size.
146 * We don't need to allocate pages for the transmitter. We just use
147 * the skbuffer directly.
148 */
149#define FEC_ENET_RX_PAGES 8
150#define FEC_ENET_RX_FRSIZE 2048
151#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
152#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
153#define FEC_ENET_TX_FRSIZE 2048
154#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
155#define TX_RING_SIZE 16 /* Must be power of two */
156#define TX_RING_MOD_MASK 15 /* for this to work */
157
158#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE) 143#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
159#error "FEC: descriptor ring size constants too large" 144#error "FEC: descriptor ring size constants too large"
160#endif 145#endif
@@ -179,9 +164,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
179#define PKT_MINBUF_SIZE 64 164#define PKT_MINBUF_SIZE 64
180#define PKT_MAXBLR_SIZE 1520 165#define PKT_MAXBLR_SIZE 1520
181 166
182/* This device has up to three irqs on some platforms */
183#define FEC_IRQ_NUM 3
184
185/* 167/*
186 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 168 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
187 * size bits. Other FEC hardware does not, so we need to take that into 169 * size bits. Other FEC hardware does not, so we need to take that into
@@ -194,61 +176,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
194#define OPT_FRAME_SIZE 0 176#define OPT_FRAME_SIZE 0
195#endif 177#endif
196 178
197/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
198 * tx_bd_base always point to the base of the buffer descriptors. The
199 * cur_rx and cur_tx point to the currently available buffer.
200 * The dirty_tx tracks the current buffer that is being sent by the
201 * controller. The cur_tx and dirty_tx are equal under both completely
202 * empty and completely full conditions. The empty/ready indicator in
203 * the buffer descriptor determines the actual condition.
204 */
205struct fec_enet_private {
206 /* Hardware registers of the FEC device */
207 void __iomem *hwp;
208
209 struct net_device *netdev;
210
211 struct clk *clk_ipg;
212 struct clk *clk_ahb;
213
214 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
215 unsigned char *tx_bounce[TX_RING_SIZE];
216 struct sk_buff* tx_skbuff[TX_RING_SIZE];
217 struct sk_buff* rx_skbuff[RX_RING_SIZE];
218 ushort skb_cur;
219 ushort skb_dirty;
220
221 /* CPM dual port RAM relative addresses */
222 dma_addr_t bd_dma;
223 /* Address of Rx and Tx buffers */
224 struct bufdesc *rx_bd_base;
225 struct bufdesc *tx_bd_base;
226 /* The next free ring entry */
227 struct bufdesc *cur_rx, *cur_tx;
228 /* The ring entries to be free()ed */
229 struct bufdesc *dirty_tx;
230
231 uint tx_full;
232 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
233 spinlock_t hw_lock;
234
235 struct platform_device *pdev;
236
237 int opened;
238 int dev_id;
239
240 /* Phylib and MDIO interface */
241 struct mii_bus *mii_bus;
242 struct phy_device *phy_dev;
243 int mii_timeout;
244 uint phy_speed;
245 phy_interface_t phy_interface;
246 int link;
247 int full_duplex;
248 struct completion mdio_done;
249 int irq[FEC_IRQ_NUM];
250};
251
252/* FEC MII MMFR bits definition */ 179/* FEC MII MMFR bits definition */
253#define FEC_MMFR_ST (1 << 30) 180#define FEC_MMFR_ST (1 << 30)
254#define FEC_MMFR_OP_READ (2 << 28) 181#define FEC_MMFR_OP_READ (2 << 28)
@@ -353,6 +280,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
353 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 280 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
354 bdp->cbd_sc = status; 281 bdp->cbd_sc = status;
355 282
283#ifdef CONFIG_FEC_PTP
284 bdp->cbd_bdu = 0;
285 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
286 fep->hwts_tx_en)) {
287 bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
288 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
289 } else {
290
291 bdp->cbd_esc = BD_ENET_TX_INT;
292 }
293#endif
356 /* Trigger transmission start */ 294 /* Trigger transmission start */
357 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 295 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
358 296
@@ -510,10 +448,17 @@ fec_restart(struct net_device *ndev, int duplex)
510 writel(1 << 8, fep->hwp + FEC_X_WMRK); 448 writel(1 << 8, fep->hwp + FEC_X_WMRK);
511 } 449 }
512 450
451#ifdef CONFIG_FEC_PTP
452 ecntl |= (1 << 4);
453#endif
454
513 /* And last, enable the transmit and receive processing */ 455 /* And last, enable the transmit and receive processing */
514 writel(ecntl, fep->hwp + FEC_ECNTRL); 456 writel(ecntl, fep->hwp + FEC_ECNTRL);
515 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 457 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
516 458
459#ifdef CONFIG_FEC_PTP
460 fec_ptp_start_cyclecounter(ndev);
461#endif
517 /* Enable interrupts we wish to service */ 462 /* Enable interrupts we wish to service */
518 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 463 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
519} 464}
@@ -599,6 +544,19 @@ fec_enet_tx(struct net_device *ndev)
599 ndev->stats.tx_packets++; 544 ndev->stats.tx_packets++;
600 } 545 }
601 546
547#ifdef CONFIG_FEC_PTP
548 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
549 struct skb_shared_hwtstamps shhwtstamps;
550 unsigned long flags;
551
552 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
553 spin_lock_irqsave(&fep->tmreg_lock, flags);
554 shhwtstamps.hwtstamp = ns_to_ktime(
555 timecounter_cyc2time(&fep->tc, bdp->ts));
556 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
557 skb_tstamp_tx(skb, &shhwtstamps);
558 }
559#endif
602 if (status & BD_ENET_TX_READY) 560 if (status & BD_ENET_TX_READY)
603 printk("HEY! Enet xmit interrupt and TX_READY.\n"); 561 printk("HEY! Enet xmit interrupt and TX_READY.\n");
604 562
@@ -725,6 +683,21 @@ fec_enet_rx(struct net_device *ndev)
725 skb_put(skb, pkt_len - 4); /* Make room */ 683 skb_put(skb, pkt_len - 4); /* Make room */
726 skb_copy_to_linear_data(skb, data, pkt_len - 4); 684 skb_copy_to_linear_data(skb, data, pkt_len - 4);
727 skb->protocol = eth_type_trans(skb, ndev); 685 skb->protocol = eth_type_trans(skb, ndev);
686#ifdef CONFIG_FEC_PTP
687 /* Get receive timestamp from the skb */
688 if (fep->hwts_rx_en) {
689 struct skb_shared_hwtstamps *shhwtstamps =
690 skb_hwtstamps(skb);
691 unsigned long flags;
692
693 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
694
695 spin_lock_irqsave(&fep->tmreg_lock, flags);
696 shhwtstamps->hwtstamp = ns_to_ktime(
697 timecounter_cyc2time(&fep->tc, bdp->ts));
698 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
699 }
700#endif
728 if (!skb_defer_rx_timestamp(skb)) 701 if (!skb_defer_rx_timestamp(skb))
729 netif_rx(skb); 702 netif_rx(skb);
730 } 703 }
@@ -739,6 +712,12 @@ rx_processing_done:
739 status |= BD_ENET_RX_EMPTY; 712 status |= BD_ENET_RX_EMPTY;
740 bdp->cbd_sc = status; 713 bdp->cbd_sc = status;
741 714
715#ifdef CONFIG_FEC_PTP
716 bdp->cbd_esc = BD_ENET_RX_INT;
717 bdp->cbd_prot = 0;
718 bdp->cbd_bdu = 0;
719#endif
720
742 /* Update BD pointer to next entry */ 721 /* Update BD pointer to next entry */
743 if (status & BD_ENET_RX_WRAP) 722 if (status & BD_ENET_RX_WRAP)
744 bdp = fep->rx_bd_base; 723 bdp = fep->rx_bd_base;
@@ -1178,6 +1157,10 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1178 if (!phydev) 1157 if (!phydev)
1179 return -ENODEV; 1158 return -ENODEV;
1180 1159
1160#ifdef CONFIG_FEC_PTP
1161 if (cmd == SIOCSHWTSTAMP)
1162 return fec_ptp_ioctl(ndev, rq, cmd);
1163#endif
1181 return phy_mii_ioctl(phydev, rq, cmd); 1164 return phy_mii_ioctl(phydev, rq, cmd);
1182} 1165}
1183 1166
@@ -1224,6 +1207,9 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1224 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 1207 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
1225 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); 1208 FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
1226 bdp->cbd_sc = BD_ENET_RX_EMPTY; 1209 bdp->cbd_sc = BD_ENET_RX_EMPTY;
1210#ifdef CONFIG_FEC_PTP
1211 bdp->cbd_esc = BD_ENET_RX_INT;
1212#endif
1227 bdp++; 1213 bdp++;
1228 } 1214 }
1229 1215
@@ -1237,6 +1223,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1237 1223
1238 bdp->cbd_sc = 0; 1224 bdp->cbd_sc = 0;
1239 bdp->cbd_bufaddr = 0; 1225 bdp->cbd_bufaddr = 0;
1226
1227#ifdef CONFIG_FEC_PTP
1228 bdp->cbd_esc = BD_ENET_RX_INT;
1229#endif
1240 bdp++; 1230 bdp++;
1241 } 1231 }
1242 1232
@@ -1494,7 +1484,7 @@ static int fec_enet_init(struct net_device *ndev)
1494} 1484}
1495 1485
1496#ifdef CONFIG_OF 1486#ifdef CONFIG_OF
1497static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev) 1487static int fec_get_phy_mode_dt(struct platform_device *pdev)
1498{ 1488{
1499 struct device_node *np = pdev->dev.of_node; 1489 struct device_node *np = pdev->dev.of_node;
1500 1490
@@ -1504,7 +1494,7 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
1504 return -ENODEV; 1494 return -ENODEV;
1505} 1495}
1506 1496
1507static void __devinit fec_reset_phy(struct platform_device *pdev) 1497static void fec_reset_phy(struct platform_device *pdev)
1508{ 1498{
1509 int err, phy_reset; 1499 int err, phy_reset;
1510 int msec = 1; 1500 int msec = 1;
@@ -1543,7 +1533,7 @@ static inline void fec_reset_phy(struct platform_device *pdev)
1543} 1533}
1544#endif /* CONFIG_OF */ 1534#endif /* CONFIG_OF */
1545 1535
1546static int __devinit 1536static int
1547fec_probe(struct platform_device *pdev) 1537fec_probe(struct platform_device *pdev)
1548{ 1538{
1549 struct fec_enet_private *fep; 1539 struct fec_enet_private *fep;
@@ -1638,9 +1628,19 @@ fec_probe(struct platform_device *pdev)
1638 goto failed_clk; 1628 goto failed_clk;
1639 } 1629 }
1640 1630
1631#ifdef CONFIG_FEC_PTP
1632 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
1633 if (IS_ERR(fep->clk_ptp)) {
1634 ret = PTR_ERR(fep->clk_ptp);
1635 goto failed_clk;
1636 }
1637#endif
1638
1641 clk_prepare_enable(fep->clk_ahb); 1639 clk_prepare_enable(fep->clk_ahb);
1642 clk_prepare_enable(fep->clk_ipg); 1640 clk_prepare_enable(fep->clk_ipg);
1643 1641#ifdef CONFIG_FEC_PTP
1642 clk_prepare_enable(fep->clk_ptp);
1643#endif
1644 reg_phy = devm_regulator_get(&pdev->dev, "phy"); 1644 reg_phy = devm_regulator_get(&pdev->dev, "phy");
1645 if (!IS_ERR(reg_phy)) { 1645 if (!IS_ERR(reg_phy)) {
1646 ret = regulator_enable(reg_phy); 1646 ret = regulator_enable(reg_phy);
@@ -1668,6 +1668,10 @@ fec_probe(struct platform_device *pdev)
1668 if (ret) 1668 if (ret)
1669 goto failed_register; 1669 goto failed_register;
1670 1670
1671#ifdef CONFIG_FEC_PTP
1672 fec_ptp_init(ndev, pdev);
1673#endif
1674
1671 return 0; 1675 return 0;
1672 1676
1673failed_register: 1677failed_register:
@@ -1677,6 +1681,9 @@ failed_init:
1677failed_regulator: 1681failed_regulator:
1678 clk_disable_unprepare(fep->clk_ahb); 1682 clk_disable_unprepare(fep->clk_ahb);
1679 clk_disable_unprepare(fep->clk_ipg); 1683 clk_disable_unprepare(fep->clk_ipg);
1684#ifdef CONFIG_FEC_PTP
1685 clk_disable_unprepare(fep->clk_ptp);
1686#endif
1680failed_pin: 1687failed_pin:
1681failed_clk: 1688failed_clk:
1682 for (i = 0; i < FEC_IRQ_NUM; i++) { 1689 for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1694,7 +1701,7 @@ failed_alloc_etherdev:
1694 return ret; 1701 return ret;
1695} 1702}
1696 1703
1697static int __devexit 1704static int
1698fec_drv_remove(struct platform_device *pdev) 1705fec_drv_remove(struct platform_device *pdev)
1699{ 1706{
1700 struct net_device *ndev = platform_get_drvdata(pdev); 1707 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1709,6 +1716,12 @@ fec_drv_remove(struct platform_device *pdev)
1709 if (irq > 0) 1716 if (irq > 0)
1710 free_irq(irq, ndev); 1717 free_irq(irq, ndev);
1711 } 1718 }
1719#ifdef CONFIG_FEC_PTP
1720 del_timer_sync(&fep->time_keep);
1721 clk_disable_unprepare(fep->clk_ptp);
1722 if (fep->ptp_clock)
1723 ptp_clock_unregister(fep->ptp_clock);
1724#endif
1712 clk_disable_unprepare(fep->clk_ahb); 1725 clk_disable_unprepare(fep->clk_ahb);
1713 clk_disable_unprepare(fep->clk_ipg); 1726 clk_disable_unprepare(fep->clk_ipg);
1714 iounmap(fep->hwp); 1727 iounmap(fep->hwp);
@@ -1777,7 +1790,7 @@ static struct platform_driver fec_driver = {
1777 }, 1790 },
1778 .id_table = fec_devtype, 1791 .id_table = fec_devtype,
1779 .probe = fec_probe, 1792 .probe = fec_probe,
1780 .remove = __devexit_p(fec_drv_remove), 1793 .remove = fec_drv_remove,
1781}; 1794};
1782 1795
1783module_platform_driver(fec_driver); 1796module_platform_driver(fec_driver);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 8408c627b195..c5a3bc1475c7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -13,6 +13,12 @@
13#define FEC_H 13#define FEC_H
14/****************************************************************************/ 14/****************************************************************************/
15 15
16#ifdef CONFIG_FEC_PTP
17#include <linux/clocksource.h>
18#include <linux/net_tstamp.h>
19#include <linux/ptp_clock_kernel.h>
20#endif
21
16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 22#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 23 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
18 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 24 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
@@ -88,6 +94,13 @@ struct bufdesc {
88 unsigned short cbd_datlen; /* Data length */ 94 unsigned short cbd_datlen; /* Data length */
89 unsigned short cbd_sc; /* Control and status info */ 95 unsigned short cbd_sc; /* Control and status info */
90 unsigned long cbd_bufaddr; /* Buffer address */ 96 unsigned long cbd_bufaddr; /* Buffer address */
97#ifdef CONFIG_FEC_PTP
98 unsigned long cbd_esc;
99 unsigned long cbd_prot;
100 unsigned long cbd_bdu;
101 unsigned long ts;
102 unsigned short res0[4];
103#endif
91}; 104};
92#else 105#else
93struct bufdesc { 106struct bufdesc {
@@ -147,6 +160,112 @@ struct bufdesc {
147#define BD_ENET_TX_CSL ((ushort)0x0001) 160#define BD_ENET_TX_CSL ((ushort)0x0001)
148#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ 161#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
149 162
163/*enhanced buffer desciptor control/status used by Ethernet transmit*/
164#define BD_ENET_TX_INT 0x40000000
165#define BD_ENET_TX_TS 0x20000000
166
167
168/* This device has up to three irqs on some platforms */
169#define FEC_IRQ_NUM 3
170
171/* The number of Tx and Rx buffers. These are allocated from the page
172 * pool. The code may assume these are power of two, so it it best
173 * to keep them that size.
174 * We don't need to allocate pages for the transmitter. We just use
175 * the skbuffer directly.
176 */
177
178#define FEC_ENET_RX_PAGES 8
179#define FEC_ENET_RX_FRSIZE 2048
180#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
181#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
182#define FEC_ENET_TX_FRSIZE 2048
183#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
184#define TX_RING_SIZE 16 /* Must be power of two */
185#define TX_RING_MOD_MASK 15 /* for this to work */
186
187#define BD_ENET_RX_INT 0x00800000
188#define BD_ENET_RX_PTP ((ushort)0x0400)
189
190/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
191 * tx_bd_base always point to the base of the buffer descriptors. The
192 * cur_rx and cur_tx point to the currently available buffer.
193 * The dirty_tx tracks the current buffer that is being sent by the
194 * controller. The cur_tx and dirty_tx are equal under both completely
195 * empty and completely full conditions. The empty/ready indicator in
196 * the buffer descriptor determines the actual condition.
197 */
198struct fec_enet_private {
199 /* Hardware registers of the FEC device */
200 void __iomem *hwp;
201
202 struct net_device *netdev;
203
204 struct clk *clk_ipg;
205 struct clk *clk_ahb;
206#ifdef CONFIG_FEC_PTP
207 struct clk *clk_ptp;
208#endif
209
210 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
211 unsigned char *tx_bounce[TX_RING_SIZE];
212 struct sk_buff *tx_skbuff[TX_RING_SIZE];
213 struct sk_buff *rx_skbuff[RX_RING_SIZE];
214 ushort skb_cur;
215 ushort skb_dirty;
216
217 /* CPM dual port RAM relative addresses */
218 dma_addr_t bd_dma;
219 /* Address of Rx and Tx buffers */
220 struct bufdesc *rx_bd_base;
221 struct bufdesc *tx_bd_base;
222 /* The next free ring entry */
223 struct bufdesc *cur_rx, *cur_tx;
224 /* The ring entries to be free()ed */
225 struct bufdesc *dirty_tx;
226
227 uint tx_full;
228 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
229 spinlock_t hw_lock;
230
231 struct platform_device *pdev;
232
233 int opened;
234 int dev_id;
235
236 /* Phylib and MDIO interface */
237 struct mii_bus *mii_bus;
238 struct phy_device *phy_dev;
239 int mii_timeout;
240 uint phy_speed;
241 phy_interface_t phy_interface;
242 int link;
243 int full_duplex;
244 struct completion mdio_done;
245 int irq[FEC_IRQ_NUM];
246
247#ifdef CONFIG_FEC_PTP
248 struct ptp_clock *ptp_clock;
249 struct ptp_clock_info ptp_caps;
250 unsigned long last_overflow_check;
251 spinlock_t tmreg_lock;
252 struct cyclecounter cc;
253 struct timecounter tc;
254 int rx_hwtstamp_filter;
255 u32 base_incval;
256 u32 cycle_speed;
257 int hwts_rx_en;
258 int hwts_tx_en;
259 struct timer_list time_keep;
260#endif
261
262};
263
264#ifdef CONFIG_FEC_PTP
265void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
266void fec_ptp_start_cyclecounter(struct net_device *ndev);
267int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
268#endif
150 269
151/****************************************************************************/ 270/****************************************************************************/
152#endif /* FEC_H */ 271#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 2933d08b036e..817d081d2cd8 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -845,7 +845,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = {
845/* OF Driver */ 845/* OF Driver */
846/* ======================================================================== */ 846/* ======================================================================== */
847 847
848static int __devinit mpc52xx_fec_probe(struct platform_device *op) 848static int mpc52xx_fec_probe(struct platform_device *op)
849{ 849{
850 int rv; 850 int rv;
851 struct net_device *ndev; 851 struct net_device *ndev;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
new file mode 100644
index 000000000000..c40526c78c20
--- /dev/null
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -0,0 +1,383 @@
1/*
2 * Fast Ethernet Controller (ENET) PTP driver for MX6x.
3 *
4 * Copyright (C) 2012 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/string.h>
23#include <linux/ptrace.h>
24#include <linux/errno.h>
25#include <linux/ioport.h>
26#include <linux/slab.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/spinlock.h>
35#include <linux/workqueue.h>
36#include <linux/bitops.h>
37#include <linux/io.h>
38#include <linux/irq.h>
39#include <linux/clk.h>
40#include <linux/platform_device.h>
41#include <linux/phy.h>
42#include <linux/fec.h>
43#include <linux/of.h>
44#include <linux/of_device.h>
45#include <linux/of_gpio.h>
46#include <linux/of_net.h>
47
48#include "fec.h"
49
50/* FEC 1588 register bits */
51#define FEC_T_CTRL_SLAVE 0x00002000
52#define FEC_T_CTRL_CAPTURE 0x00000800
53#define FEC_T_CTRL_RESTART 0x00000200
54#define FEC_T_CTRL_PERIOD_RST 0x00000030
55#define FEC_T_CTRL_PERIOD_EN 0x00000010
56#define FEC_T_CTRL_ENABLE 0x00000001
57
58#define FEC_T_INC_MASK 0x0000007f
59#define FEC_T_INC_OFFSET 0
60#define FEC_T_INC_CORR_MASK 0x00007f00
61#define FEC_T_INC_CORR_OFFSET 8
62
63#define FEC_ATIME_CTRL 0x400
64#define FEC_ATIME 0x404
65#define FEC_ATIME_EVT_OFFSET 0x408
66#define FEC_ATIME_EVT_PERIOD 0x40c
67#define FEC_ATIME_CORR 0x410
68#define FEC_ATIME_INC 0x414
69#define FEC_TS_TIMESTAMP 0x418
70
71#define FEC_CC_MULT (1 << 31)
72/**
73 * fec_ptp_read - read raw cycle counter (to be used by time counter)
74 * @cc: the cyclecounter structure
75 *
76 * this function reads the cyclecounter registers and is called by the
77 * cyclecounter structure used to construct a ns counter from the
78 * arbitrary fixed point registers
79 */
80static cycle_t fec_ptp_read(const struct cyclecounter *cc)
81{
82 struct fec_enet_private *fep =
83 container_of(cc, struct fec_enet_private, cc);
84 u32 tempval;
85
86 tempval = readl(fep->hwp + FEC_ATIME_CTRL);
87 tempval |= FEC_T_CTRL_CAPTURE;
88 writel(tempval, fep->hwp + FEC_ATIME_CTRL);
89
90 return readl(fep->hwp + FEC_ATIME);
91}
92
93/**
94 * fec_ptp_start_cyclecounter - create the cycle counter from hw
95 * @ndev: network device
96 *
97 * this function initializes the timecounter and cyclecounter
98 * structures for use in generated a ns counter from the arbitrary
99 * fixed point cycles registers in the hardware.
100 */
101void fec_ptp_start_cyclecounter(struct net_device *ndev)
102{
103 struct fec_enet_private *fep = netdev_priv(ndev);
104 unsigned long flags;
105 int inc;
106
107 inc = 1000000000 / clk_get_rate(fep->clk_ptp);
108
109 /* grab the ptp lock */
110 spin_lock_irqsave(&fep->tmreg_lock, flags);
111
112 /* 1ns counter */
113 writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
114
115 /* use free running count */
116 writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD);
117
118 writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL);
119
120 memset(&fep->cc, 0, sizeof(fep->cc));
121 fep->cc.read = fec_ptp_read;
122 fep->cc.mask = CLOCKSOURCE_MASK(32);
123 fep->cc.shift = 31;
124 fep->cc.mult = FEC_CC_MULT;
125
126 /* reset the ns time counter */
127 timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
128
129 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
130}
131
132/**
133 * fec_ptp_adjfreq - adjust ptp cycle frequency
134 * @ptp: the ptp clock structure
135 * @ppb: parts per billion adjustment from base
136 *
137 * Adjust the frequency of the ptp cycle counter by the
138 * indicated ppb from the base frequency.
139 *
140 * Because ENET hardware frequency adjust is complex,
141 * using software method to do that.
142 */
143static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
144{
145 u64 diff;
146 unsigned long flags;
147 int neg_adj = 0;
148 u32 mult = FEC_CC_MULT;
149
150 struct fec_enet_private *fep =
151 container_of(ptp, struct fec_enet_private, ptp_caps);
152
153 if (ppb < 0) {
154 ppb = -ppb;
155 neg_adj = 1;
156 }
157
158 diff = mult;
159 diff *= ppb;
160 diff = div_u64(diff, 1000000000ULL);
161
162 spin_lock_irqsave(&fep->tmreg_lock, flags);
163 /*
164 * dummy read to set cycle_last in tc to now.
165 * So use adjusted mult to calculate when next call
166 * timercounter_read.
167 */
168 timecounter_read(&fep->tc);
169
170 fep->cc.mult = neg_adj ? mult - diff : mult + diff;
171
172 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
173
174 return 0;
175}
176
177/**
178 * fec_ptp_adjtime
179 * @ptp: the ptp clock structure
180 * @delta: offset to adjust the cycle counter by
181 *
182 * adjust the timer by resetting the timecounter structure.
183 */
184static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
185{
186 struct fec_enet_private *fep =
187 container_of(ptp, struct fec_enet_private, ptp_caps);
188 unsigned long flags;
189 u64 now;
190
191 spin_lock_irqsave(&fep->tmreg_lock, flags);
192
193 now = timecounter_read(&fep->tc);
194 now += delta;
195
196 /* reset the timecounter */
197 timecounter_init(&fep->tc, &fep->cc, now);
198
199 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
200
201 return 0;
202}
203
204/**
205 * fec_ptp_gettime
206 * @ptp: the ptp clock structure
207 * @ts: timespec structure to hold the current time value
208 *
209 * read the timecounter and return the correct value on ns,
210 * after converting it into a struct timespec.
211 */
212static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
213{
214 struct fec_enet_private *adapter =
215 container_of(ptp, struct fec_enet_private, ptp_caps);
216 u64 ns;
217 u32 remainder;
218 unsigned long flags;
219
220 spin_lock_irqsave(&adapter->tmreg_lock, flags);
221 ns = timecounter_read(&adapter->tc);
222 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
223
224 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
225 ts->tv_nsec = remainder;
226
227 return 0;
228}
229
230/**
231 * fec_ptp_settime
232 * @ptp: the ptp clock structure
233 * @ts: the timespec containing the new time for the cycle counter
234 *
235 * reset the timecounter to use a new base value instead of the kernel
236 * wall timer value.
237 */
238static int fec_ptp_settime(struct ptp_clock_info *ptp,
239 const struct timespec *ts)
240{
241 struct fec_enet_private *fep =
242 container_of(ptp, struct fec_enet_private, ptp_caps);
243
244 u64 ns;
245 unsigned long flags;
246
247 ns = ts->tv_sec * 1000000000ULL;
248 ns += ts->tv_nsec;
249
250 spin_lock_irqsave(&fep->tmreg_lock, flags);
251 timecounter_init(&fep->tc, &fep->cc, ns);
252 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
253 return 0;
254}
255
256/**
257 * fec_ptp_enable
258 * @ptp: the ptp clock structure
259 * @rq: the requested feature to change
260 * @on: whether to enable or disable the feature
261 *
262 */
263static int fec_ptp_enable(struct ptp_clock_info *ptp,
264 struct ptp_clock_request *rq, int on)
265{
266 return -EOPNOTSUPP;
267}
268
269/**
270 * fec_ptp_hwtstamp_ioctl - control hardware time stamping
271 * @ndev: pointer to net_device
272 * @ifreq: ioctl data
273 * @cmd: particular ioctl requested
274 */
275int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
276{
277 struct fec_enet_private *fep = netdev_priv(ndev);
278
279 struct hwtstamp_config config;
280
281 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
282 return -EFAULT;
283
284 /* reserved for future extensions */
285 if (config.flags)
286 return -EINVAL;
287
288 switch (config.tx_type) {
289 case HWTSTAMP_TX_OFF:
290 fep->hwts_tx_en = 0;
291 break;
292 case HWTSTAMP_TX_ON:
293 fep->hwts_tx_en = 1;
294 break;
295 default:
296 return -ERANGE;
297 }
298
299 switch (config.rx_filter) {
300 case HWTSTAMP_FILTER_NONE:
301 if (fep->hwts_rx_en)
302 fep->hwts_rx_en = 0;
303 config.rx_filter = HWTSTAMP_FILTER_NONE;
304 break;
305
306 default:
307 /*
308 * register RXMTRL must be set in order to do V1 packets,
309 * therefore it is not possible to time stamp both V1 Sync and
310 * Delay_Req messages and hardware does not support
311 * timestamping all packets => return error
312 */
313 fep->hwts_rx_en = 1;
314 config.rx_filter = HWTSTAMP_FILTER_ALL;
315 break;
316 }
317
318 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
319 -EFAULT : 0;
320}
321
322/**
323 * fec_time_keep - call timecounter_read every second to avoid timer overrun
324 * because ENET just support 32bit counter, will timeout in 4s
325 */
326static void fec_time_keep(unsigned long _data)
327{
328 struct fec_enet_private *fep = (struct fec_enet_private *)_data;
329 u64 ns;
330 unsigned long flags;
331
332 spin_lock_irqsave(&fep->tmreg_lock, flags);
333 ns = timecounter_read(&fep->tc);
334 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
335
336 mod_timer(&fep->time_keep, jiffies + HZ);
337}
338
339/**
340 * fec_ptp_init
341 * @ndev: The FEC network adapter
342 *
343 * This function performs the required steps for enabling ptp
344 * support. If ptp support has already been loaded it simply calls the
345 * cyclecounter init routine and exits.
346 */
347
348void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
349{
350 struct fec_enet_private *fep = netdev_priv(ndev);
351
352 fep->ptp_caps.owner = THIS_MODULE;
353 snprintf(fep->ptp_caps.name, 16, "fec ptp");
354
355 fep->ptp_caps.max_adj = 250000000;
356 fep->ptp_caps.n_alarm = 0;
357 fep->ptp_caps.n_ext_ts = 0;
358 fep->ptp_caps.n_per_out = 0;
359 fep->ptp_caps.pps = 0;
360 fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
361 fep->ptp_caps.adjtime = fec_ptp_adjtime;
362 fep->ptp_caps.gettime = fec_ptp_gettime;
363 fep->ptp_caps.settime = fec_ptp_settime;
364 fep->ptp_caps.enable = fec_ptp_enable;
365
366 spin_lock_init(&fep->tmreg_lock);
367
368 fec_ptp_start_cyclecounter(ndev);
369
370 init_timer(&fep->time_keep);
371 fep->time_keep.data = (unsigned long)fep;
372 fep->time_keep.function = fec_time_keep;
373 fep->time_keep.expires = jiffies + HZ;
374 add_timer(&fep->time_keep);
375
376 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
377 if (IS_ERR(fep->ptp_clock)) {
378 fep->ptp_clock = NULL;
379 pr_err("ptp_clock_register failed\n");
380 } else {
381 pr_info("registered PHC device on %s\n", ndev->name);
382 }
383}
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 2b7633f766d9..e9879c5af7ba 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1004,7 +1004,7 @@ static const struct net_device_ops fs_enet_netdev_ops = {
1004}; 1004};
1005 1005
1006static struct of_device_id fs_enet_match[]; 1006static struct of_device_id fs_enet_match[];
1007static int __devinit fs_enet_probe(struct platform_device *ofdev) 1007static int fs_enet_probe(struct platform_device *ofdev)
1008{ 1008{
1009 const struct of_device_id *match; 1009 const struct of_device_id *match;
1010 struct net_device *ndev; 1010 struct net_device *ndev;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index 151453309401..2bafbd37c247 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -108,8 +108,7 @@ static struct mdiobb_ops bb_ops = {
108 .get_mdio_data = mdio_read, 108 .get_mdio_data = mdio_read,
109}; 109};
110 110
111static int __devinit fs_mii_bitbang_init(struct mii_bus *bus, 111static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np)
112 struct device_node *np)
113{ 112{
114 struct resource res; 113 struct resource res;
115 const u32 *data; 114 const u32 *data;
@@ -150,7 +149,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
150 return 0; 149 return 0;
151} 150}
152 151
153static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) 152static int fs_enet_mdio_probe(struct platform_device *ofdev)
154{ 153{
155 struct mii_bus *new_bus; 154 struct mii_bus *new_bus;
156 struct bb_info *bitbang; 155 struct bb_info *bitbang;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index cdf702a59485..18e8ef203736 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -102,7 +102,7 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
102} 102}
103 103
104static struct of_device_id fs_enet_mdio_fec_match[]; 104static struct of_device_id fs_enet_mdio_fec_match[];
105static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) 105static int fs_enet_mdio_probe(struct platform_device *ofdev)
106{ 106{
107 const struct of_device_id *match; 107 const struct of_device_id *match;
108 struct resource res; 108 struct resource res;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 19ac096cb07b..bffb2edd6858 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -210,7 +210,7 @@ static int gfar_init_bds(struct net_device *ndev)
210 skb = gfar_new_skb(ndev); 210 skb = gfar_new_skb(ndev);
211 if (!skb) { 211 if (!skb) {
212 netdev_err(ndev, "Can't allocate RX buffers\n"); 212 netdev_err(ndev, "Can't allocate RX buffers\n");
213 goto err_rxalloc_fail; 213 return -ENOMEM;
214 } 214 }
215 rx_queue->rx_skbuff[j] = skb; 215 rx_queue->rx_skbuff[j] = skb;
216 216
@@ -223,10 +223,6 @@ static int gfar_init_bds(struct net_device *ndev)
223 } 223 }
224 224
225 return 0; 225 return 0;
226
227err_rxalloc_fail:
228 free_skb_resources(priv);
229 return -ENOMEM;
230} 226}
231 227
232static int gfar_alloc_skb_resources(struct net_device *ndev) 228static int gfar_alloc_skb_resources(struct net_device *ndev)
@@ -1359,7 +1355,11 @@ static int gfar_restore(struct device *dev)
1359 return 0; 1355 return 0;
1360 } 1356 }
1361 1357
1362 gfar_init_bds(ndev); 1358 if (gfar_init_bds(ndev)) {
1359 free_skb_resources(priv);
1360 return -ENOMEM;
1361 }
1362
1363 init_registers(ndev); 1363 init_registers(ndev);
1364 gfar_set_mac_address(ndev); 1364 gfar_set_mac_address(ndev);
1365 gfar_init_mac(ndev); 1365 gfar_init_mac(ndev);
@@ -1712,6 +1712,7 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1712 tx_queue->tx_skbuff[i] = NULL; 1712 tx_queue->tx_skbuff[i] = NULL;
1713 } 1713 }
1714 kfree(tx_queue->tx_skbuff); 1714 kfree(tx_queue->tx_skbuff);
1715 tx_queue->tx_skbuff = NULL;
1715} 1716}
1716 1717
1717static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) 1718static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
@@ -1735,6 +1736,7 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1735 rxbdp++; 1736 rxbdp++;
1736 } 1737 }
1737 kfree(rx_queue->rx_skbuff); 1738 kfree(rx_queue->rx_skbuff);
1739 rx_queue->rx_skbuff = NULL;
1738} 1740}
1739 1741
1740/* If there are any tx skbs or rx skbs still around, free them. 1742/* If there are any tx skbs or rx skbs still around, free them.
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 1afb5ea2a984..418068b941b1 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -189,7 +189,7 @@ static int xgmac_mdio_reset(struct mii_bus *bus)
189 return ret; 189 return ret;
190} 190}
191 191
192static int __devinit xgmac_mdio_probe(struct platform_device *pdev) 192static int xgmac_mdio_probe(struct platform_device *pdev)
193{ 193{
194 struct device_node *np = pdev->dev.of_node; 194 struct device_node *np = pdev->dev.of_node;
195 struct mii_bus *bus; 195 struct mii_bus *bus;
@@ -240,7 +240,7 @@ err_ioremap:
240 return ret; 240 return ret;
241} 241}
242 242
243static int __devexit xgmac_mdio_remove(struct platform_device *pdev) 243static int xgmac_mdio_remove(struct platform_device *pdev)
244{ 244{
245 struct mii_bus *bus = dev_get_drvdata(&pdev->dev); 245 struct mii_bus *bus = dev_get_drvdata(&pdev->dev);
246 246
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 3f4391bede81..e3c7c697fc45 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -308,7 +308,7 @@ static void wait(void)
308 * Read board id and convert to string. 308 * Read board id and convert to string.
309 * Effectively same code as decode_eisa_sig 309 * Effectively same code as decode_eisa_sig
310 */ 310 */
311static __devinit const char *hp100_read_id(int ioaddr) 311static const char *hp100_read_id(int ioaddr)
312{ 312{
313 int i; 313 int i;
314 static char str[HP100_SIG_LEN]; 314 static char str[HP100_SIG_LEN];
@@ -447,8 +447,8 @@ static const struct net_device_ops hp100_netdev_ops = {
447 .ndo_validate_addr = eth_validate_addr, 447 .ndo_validate_addr = eth_validate_addr,
448}; 448};
449 449
450static int __devinit hp100_probe1(struct net_device *dev, int ioaddr, 450static int hp100_probe1(struct net_device *dev, int ioaddr, u_char bus,
451 u_char bus, struct pci_dev *pci_dev) 451 struct pci_dev *pci_dev)
452{ 452{
453 int i; 453 int i;
454 int err = -ENODEV; 454 int err = -ENODEV;
@@ -2866,7 +2866,7 @@ static int __init hp100_eisa_probe (struct device *gendev)
2866 return err; 2866 return err;
2867} 2867}
2868 2868
2869static int __devexit hp100_eisa_remove (struct device *gendev) 2869static int hp100_eisa_remove(struct device *gendev)
2870{ 2870{
2871 struct net_device *dev = dev_get_drvdata(gendev); 2871 struct net_device *dev = dev_get_drvdata(gendev);
2872 cleanup_dev(dev); 2872 cleanup_dev(dev);
@@ -2878,14 +2878,14 @@ static struct eisa_driver hp100_eisa_driver = {
2878 .driver = { 2878 .driver = {
2879 .name = "hp100", 2879 .name = "hp100",
2880 .probe = hp100_eisa_probe, 2880 .probe = hp100_eisa_probe,
2881 .remove = __devexit_p (hp100_eisa_remove), 2881 .remove = hp100_eisa_remove,
2882 } 2882 }
2883}; 2883};
2884#endif 2884#endif
2885 2885
2886#ifdef CONFIG_PCI 2886#ifdef CONFIG_PCI
2887static int __devinit hp100_pci_probe (struct pci_dev *pdev, 2887static int hp100_pci_probe(struct pci_dev *pdev,
2888 const struct pci_device_id *ent) 2888 const struct pci_device_id *ent)
2889{ 2889{
2890 struct net_device *dev; 2890 struct net_device *dev;
2891 int ioaddr; 2891 int ioaddr;
@@ -2937,7 +2937,7 @@ static int __devinit hp100_pci_probe (struct pci_dev *pdev,
2937 return err; 2937 return err;
2938} 2938}
2939 2939
2940static void __devexit hp100_pci_remove (struct pci_dev *pdev) 2940static void hp100_pci_remove(struct pci_dev *pdev)
2941{ 2941{
2942 struct net_device *dev = pci_get_drvdata(pdev); 2942 struct net_device *dev = pci_get_drvdata(pdev);
2943 2943
@@ -2950,7 +2950,7 @@ static struct pci_driver hp100_pci_driver = {
2950 .name = "hp100", 2950 .name = "hp100",
2951 .id_table = hp100_pci_tbl, 2951 .id_table = hp100_pci_tbl,
2952 .probe = hp100_pci_probe, 2952 .probe = hp100_pci_probe,
2953 .remove = __devexit_p(hp100_pci_remove), 2953 .remove = hp100_pci_remove,
2954}; 2954};
2955#endif 2955#endif
2956 2956
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index 067db3f13e91..5d353c660068 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -72,7 +72,7 @@ static void ether1_timeout(struct net_device *dev);
72 72
73/* ------------------------------------------------------------------------- */ 73/* ------------------------------------------------------------------------- */
74 74
75static char version[] __devinitdata = "ether1 ethernet driver (c) 2000 Russell King v1.07\n"; 75static char version[] = "ether1 ethernet driver (c) 2000 Russell King v1.07\n";
76 76
77#define BUS_16 16 77#define BUS_16 16
78#define BUS_8 8 78#define BUS_8 8
@@ -250,7 +250,7 @@ ether1_readbuffer (struct net_device *dev, void *data, unsigned int start, unsig
250 } while (thislen); 250 } while (thislen);
251} 251}
252 252
253static int __devinit 253static int
254ether1_ramtest(struct net_device *dev, unsigned char byte) 254ether1_ramtest(struct net_device *dev, unsigned char byte)
255{ 255{
256 unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL); 256 unsigned char *buffer = kmalloc (BUFFER_SIZE, GFP_KERNEL);
@@ -304,7 +304,7 @@ ether1_reset (struct net_device *dev)
304 return BUS_16; 304 return BUS_16;
305} 305}
306 306
307static int __devinit 307static int
308ether1_init_2(struct net_device *dev) 308ether1_init_2(struct net_device *dev)
309{ 309{
310 int i; 310 int i;
@@ -638,12 +638,6 @@ ether1_txalloc (struct net_device *dev, int size)
638static int 638static int
639ether1_open (struct net_device *dev) 639ether1_open (struct net_device *dev)
640{ 640{
641 if (!is_valid_ether_addr(dev->dev_addr)) {
642 printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
643 dev->name);
644 return -EINVAL;
645 }
646
647 if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev)) 641 if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
648 return -EAGAIN; 642 return -EAGAIN;
649 643
@@ -972,7 +966,7 @@ ether1_setmulticastlist (struct net_device *dev)
972 966
973/* ------------------------------------------------------------------------- */ 967/* ------------------------------------------------------------------------- */
974 968
975static void __devinit ether1_banner(void) 969static void ether1_banner(void)
976{ 970{
977 static unsigned int version_printed = 0; 971 static unsigned int version_printed = 0;
978 972
@@ -991,7 +985,7 @@ static const struct net_device_ops ether1_netdev_ops = {
991 .ndo_set_mac_address = eth_mac_addr, 985 .ndo_set_mac_address = eth_mac_addr,
992}; 986};
993 987
994static int __devinit 988static int
995ether1_probe(struct expansion_card *ec, const struct ecard_id *id) 989ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
996{ 990{
997 struct net_device *dev; 991 struct net_device *dev;
@@ -1052,7 +1046,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
1052 return ret; 1046 return ret;
1053} 1047}
1054 1048
1055static void __devexit ether1_remove(struct expansion_card *ec) 1049static void ether1_remove(struct expansion_card *ec)
1056{ 1050{
1057 struct net_device *dev = ecard_get_drvdata(ec); 1051 struct net_device *dev = ecard_get_drvdata(ec);
1058 1052
@@ -1070,7 +1064,7 @@ static const struct ecard_id ether1_ids[] = {
1070 1064
1071static struct ecard_driver ether1_driver = { 1065static struct ecard_driver ether1_driver = {
1072 .probe = ether1_probe, 1066 .probe = ether1_probe,
1073 .remove = __devexit_p(ether1_remove), 1067 .remove = ether1_remove,
1074 .id_table = ether1_ids, 1068 .id_table = ether1_ids,
1075 .drv = { 1069 .drv = {
1076 .name = "ether1", 1070 .name = "ether1",
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 6eba352c52e0..f42f1b707733 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -150,7 +150,7 @@ static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
150 150
151#define LAN_PROM_ADDR 0xF0810000 151#define LAN_PROM_ADDR 0xF0810000
152 152
153static int __devinit 153static int
154lan_init_chip(struct parisc_device *dev) 154lan_init_chip(struct parisc_device *dev)
155{ 155{
156 struct net_device *netdevice; 156 struct net_device *netdevice;
@@ -195,7 +195,7 @@ lan_init_chip(struct parisc_device *dev)
195 return retval; 195 return retval;
196} 196}
197 197
198static int __devexit lan_remove_chip (struct parisc_device *pdev) 198static int lan_remove_chip(struct parisc_device *pdev)
199{ 199{
200 struct net_device *dev = parisc_get_drvdata(pdev); 200 struct net_device *dev = parisc_get_drvdata(pdev);
201 struct i596_private *lp = netdev_priv(dev); 201 struct i596_private *lp = netdev_priv(dev);
@@ -219,10 +219,10 @@ static struct parisc_driver lan_driver = {
219 .name = "lasi_82596", 219 .name = "lasi_82596",
220 .id_table = lan_tbl, 220 .id_table = lan_tbl,
221 .probe = lan_init_chip, 221 .probe = lan_init_chip,
222 .remove = __devexit_p(lan_remove_chip), 222 .remove = lan_remove_chip,
223}; 223};
224 224
225static int __devinit lasi_82596_init(void) 225static int lasi_82596_init(void)
226{ 226{
227 printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n"); 227 printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
228 return register_parisc_driver(&lan_driver); 228 return register_parisc_driver(&lan_driver);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 3efbd8dbb63d..f045ea4dc514 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -1048,7 +1048,7 @@ static const struct net_device_ops i596_netdev_ops = {
1048#endif 1048#endif
1049}; 1049};
1050 1050
1051static int __devinit i82596_probe(struct net_device *dev) 1051static int i82596_probe(struct net_device *dev)
1052{ 1052{
1053 int i; 1053 int i;
1054 struct i596_private *lp = netdev_priv(dev); 1054 struct i596_private *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 6b2a88817473..4ceae9a30274 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -75,7 +75,7 @@ static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
75} 75}
76 76
77 77
78static int __devinit sni_82596_probe(struct platform_device *dev) 78static int sni_82596_probe(struct platform_device *dev)
79{ 79{
80 struct net_device *netdevice; 80 struct net_device *netdevice;
81 struct i596_private *lp; 81 struct i596_private *lp;
@@ -147,7 +147,7 @@ probe_failed_free_mpu:
147 return retval; 147 return retval;
148} 148}
149 149
150static int __devexit sni_82596_driver_remove(struct platform_device *pdev) 150static int sni_82596_driver_remove(struct platform_device *pdev)
151{ 151{
152 struct net_device *dev = platform_get_drvdata(pdev); 152 struct net_device *dev = platform_get_drvdata(pdev);
153 struct i596_private *lp = netdev_priv(dev); 153 struct i596_private *lp = netdev_priv(dev);
@@ -163,14 +163,14 @@ static int __devexit sni_82596_driver_remove(struct platform_device *pdev)
163 163
164static struct platform_driver sni_82596_driver = { 164static struct platform_driver sni_82596_driver = {
165 .probe = sni_82596_probe, 165 .probe = sni_82596_probe,
166 .remove = __devexit_p(sni_82596_driver_remove), 166 .remove = sni_82596_driver_remove,
167 .driver = { 167 .driver = {
168 .name = sni_82596_string, 168 .name = sni_82596_string,
169 .owner = THIS_MODULE, 169 .owner = THIS_MODULE,
170 }, 170 },
171}; 171};
172 172
173static int __devinit sni_82596_init(void) 173static int sni_82596_init(void)
174{ 174{
175 printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n"); 175 printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n");
176 return platform_driver_register(&sni_82596_driver); 176 return platform_driver_register(&sni_82596_driver);
diff --git a/drivers/net/ethernet/ibm/Kconfig b/drivers/net/ethernet/ibm/Kconfig
index b9773d229192..6529d31595a7 100644
--- a/drivers/net/ethernet/ibm/Kconfig
+++ b/drivers/net/ethernet/ibm/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_IBM
6 bool "IBM devices" 6 bool "IBM devices"
7 default y 7 default y
8 depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \ 8 depends on MCA || PPC_PSERIES || PPC_PSERIES || PPC_DCR || \
9 (IBMEBUS && INET && SPARSEMEM) 9 (IBMEBUS && SPARSEMEM)
10 ---help--- 10 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y 11 If you have a network (Ethernet) card belonging to this class, say Y
12 and read the Ethernet-HOWTO, available from 12 and read the Ethernet-HOWTO, available from
@@ -33,8 +33,7 @@ source "drivers/net/ethernet/ibm/emac/Kconfig"
33 33
34config EHEA 34config EHEA
35 tristate "eHEA Ethernet support" 35 tristate "eHEA Ethernet support"
36 depends on IBMEBUS && INET && SPARSEMEM 36 depends on IBMEBUS && SPARSEMEM
37 select INET_LRO
38 ---help--- 37 ---help---
39 This driver supports the IBM pSeries eHEA ethernet adapter. 38 This driver supports the IBM pSeries eHEA ethernet adapter.
40 39
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index f4d2da0db1b1..19b64de7124b 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -98,10 +98,10 @@ static struct ehea_fw_handle_array ehea_fw_handles;
98static struct ehea_bcmc_reg_array ehea_bcmc_regs; 98static struct ehea_bcmc_reg_array ehea_bcmc_regs;
99 99
100 100
101static int __devinit ehea_probe_adapter(struct platform_device *dev, 101static int ehea_probe_adapter(struct platform_device *dev,
102 const struct of_device_id *id); 102 const struct of_device_id *id);
103 103
104static int __devexit ehea_remove(struct platform_device *dev); 104static int ehea_remove(struct platform_device *dev);
105 105
106static struct of_device_id ehea_device_table[] = { 106static struct of_device_id ehea_device_table[] = {
107 { 107 {
@@ -2909,7 +2909,7 @@ static ssize_t ehea_show_port_id(struct device *dev,
2909static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id, 2909static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2910 NULL); 2910 NULL);
2911 2911
2912static void __devinit logical_port_release(struct device *dev) 2912static void logical_port_release(struct device *dev)
2913{ 2913{
2914 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev); 2914 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2915 of_node_put(port->ofdev.dev.of_node); 2915 of_node_put(port->ofdev.dev.of_node);
@@ -3028,7 +3028,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3028 ehea_set_ethtool_ops(dev); 3028 ehea_set_ethtool_ops(dev);
3029 3029
3030 dev->hw_features = NETIF_F_SG | NETIF_F_TSO 3030 dev->hw_features = NETIF_F_SG | NETIF_F_TSO
3031 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO; 3031 | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX;
3032 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO 3032 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3033 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX 3033 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3034 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER 3034 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
@@ -3257,8 +3257,8 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
3257 device_remove_file(&dev->dev, &dev_attr_remove_port); 3257 device_remove_file(&dev->dev, &dev_attr_remove_port);
3258} 3258}
3259 3259
3260static int __devinit ehea_probe_adapter(struct platform_device *dev, 3260static int ehea_probe_adapter(struct platform_device *dev,
3261 const struct of_device_id *id) 3261 const struct of_device_id *id)
3262{ 3262{
3263 struct ehea_adapter *adapter; 3263 struct ehea_adapter *adapter;
3264 const u64 *adapter_handle; 3264 const u64 *adapter_handle;
@@ -3364,7 +3364,7 @@ out:
3364 return ret; 3364 return ret;
3365} 3365}
3366 3366
3367static int __devexit ehea_remove(struct platform_device *dev) 3367static int ehea_remove(struct platform_device *dev)
3368{ 3368{
3369 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev); 3369 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3370 int i; 3370 int i;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index a0fe6e3fce61..256bdb8e1994 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2261,8 +2261,8 @@ struct emac_depentry {
2261#define EMAC_DEP_PREV_IDX 5 2261#define EMAC_DEP_PREV_IDX 5
2262#define EMAC_DEP_COUNT 6 2262#define EMAC_DEP_COUNT 6
2263 2263
2264static int __devinit emac_check_deps(struct emac_instance *dev, 2264static int emac_check_deps(struct emac_instance *dev,
2265 struct emac_depentry *deps) 2265 struct emac_depentry *deps)
2266{ 2266{
2267 int i, there = 0; 2267 int i, there = 0;
2268 struct device_node *np; 2268 struct device_node *np;
@@ -2314,8 +2314,8 @@ static void emac_put_deps(struct emac_instance *dev)
2314 of_dev_put(dev->tah_dev); 2314 of_dev_put(dev->tah_dev);
2315} 2315}
2316 2316
2317static int __devinit emac_of_bus_notify(struct notifier_block *nb, 2317static int emac_of_bus_notify(struct notifier_block *nb, unsigned long action,
2318 unsigned long action, void *data) 2318 void *data)
2319{ 2319{
2320 /* We are only intereted in device addition */ 2320 /* We are only intereted in device addition */
2321 if (action == BUS_NOTIFY_BOUND_DRIVER) 2321 if (action == BUS_NOTIFY_BOUND_DRIVER)
@@ -2323,11 +2323,11 @@ static int __devinit emac_of_bus_notify(struct notifier_block *nb,
2323 return 0; 2323 return 0;
2324} 2324}
2325 2325
2326static struct notifier_block emac_of_bus_notifier __devinitdata = { 2326static struct notifier_block emac_of_bus_notifier = {
2327 .notifier_call = emac_of_bus_notify 2327 .notifier_call = emac_of_bus_notify
2328}; 2328};
2329 2329
2330static int __devinit emac_wait_deps(struct emac_instance *dev) 2330static int emac_wait_deps(struct emac_instance *dev)
2331{ 2331{
2332 struct emac_depentry deps[EMAC_DEP_COUNT]; 2332 struct emac_depentry deps[EMAC_DEP_COUNT];
2333 int i, err; 2333 int i, err;
@@ -2367,8 +2367,8 @@ static int __devinit emac_wait_deps(struct emac_instance *dev)
2367 return err; 2367 return err;
2368} 2368}
2369 2369
2370static int __devinit emac_read_uint_prop(struct device_node *np, const char *name, 2370static int emac_read_uint_prop(struct device_node *np, const char *name,
2371 u32 *val, int fatal) 2371 u32 *val, int fatal)
2372{ 2372{
2373 int len; 2373 int len;
2374 const u32 *prop = of_get_property(np, name, &len); 2374 const u32 *prop = of_get_property(np, name, &len);
@@ -2382,7 +2382,7 @@ static int __devinit emac_read_uint_prop(struct device_node *np, const char *nam
2382 return 0; 2382 return 0;
2383} 2383}
2384 2384
2385static int __devinit emac_init_phy(struct emac_instance *dev) 2385static int emac_init_phy(struct emac_instance *dev)
2386{ 2386{
2387 struct device_node *np = dev->ofdev->dev.of_node; 2387 struct device_node *np = dev->ofdev->dev.of_node;
2388 struct net_device *ndev = dev->ndev; 2388 struct net_device *ndev = dev->ndev;
@@ -2518,7 +2518,7 @@ static int __devinit emac_init_phy(struct emac_instance *dev)
2518 return 0; 2518 return 0;
2519} 2519}
2520 2520
2521static int __devinit emac_init_config(struct emac_instance *dev) 2521static int emac_init_config(struct emac_instance *dev)
2522{ 2522{
2523 struct device_node *np = dev->ofdev->dev.of_node; 2523 struct device_node *np = dev->ofdev->dev.of_node;
2524 const void *p; 2524 const void *p;
@@ -2703,7 +2703,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
2703 .ndo_change_mtu = emac_change_mtu, 2703 .ndo_change_mtu = emac_change_mtu,
2704}; 2704};
2705 2705
2706static int __devinit emac_probe(struct platform_device *ofdev) 2706static int emac_probe(struct platform_device *ofdev)
2707{ 2707{
2708 struct net_device *ndev; 2708 struct net_device *ndev;
2709 struct emac_instance *dev; 2709 struct emac_instance *dev;
@@ -2930,7 +2930,7 @@ static int __devinit emac_probe(struct platform_device *ofdev)
2930 return err; 2930 return err;
2931} 2931}
2932 2932
2933static int __devexit emac_remove(struct platform_device *ofdev) 2933static int emac_remove(struct platform_device *ofdev)
2934{ 2934{
2935 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev); 2935 struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
2936 2936
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 479e43e2f1ef..50ea12bfb579 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -33,8 +33,7 @@
33 33
34static int mal_count; 34static int mal_count;
35 35
36int __devinit mal_register_commac(struct mal_instance *mal, 36int mal_register_commac(struct mal_instance *mal, struct mal_commac *commac)
37 struct mal_commac *commac)
38{ 37{
39 unsigned long flags; 38 unsigned long flags;
40 39
@@ -517,7 +516,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
517 return regs + 1; 516 return regs + 1;
518} 517}
519 518
520static int __devinit mal_probe(struct platform_device *ofdev) 519static int mal_probe(struct platform_device *ofdev)
521{ 520{
522 struct mal_instance *mal; 521 struct mal_instance *mal;
523 int err = 0, i, bd_size; 522 int err = 0, i, bd_size;
@@ -729,7 +728,7 @@ static int __devinit mal_probe(struct platform_device *ofdev)
729 return err; 728 return err;
730} 729}
731 730
732static int __devexit mal_remove(struct platform_device *ofdev) 731static int mal_remove(struct platform_device *ofdev)
733{ 732{
734 struct mal_instance *mal = dev_get_drvdata(&ofdev->dev); 733 struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
735 734
@@ -738,13 +737,11 @@ static int __devexit mal_remove(struct platform_device *ofdev)
738 /* Synchronize with scheduled polling */ 737 /* Synchronize with scheduled polling */
739 napi_disable(&mal->napi); 738 napi_disable(&mal->napi);
740 739
741 if (!list_empty(&mal->list)) { 740 if (!list_empty(&mal->list))
742 /* This is *very* bad */ 741 /* This is *very* bad */
743 printk(KERN_EMERG 742 WARN(1, KERN_EMERG
744 "mal%d: commac list is not empty on remove!\n", 743 "mal%d: commac list is not empty on remove!\n",
745 mal->index); 744 mal->index);
746 WARN_ON(1);
747 }
748 745
749 dev_set_drvdata(&ofdev->dev, NULL); 746 dev_set_drvdata(&ofdev->dev, NULL);
750 747
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index d3123282e18e..39251765b55d 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -93,7 +93,7 @@ static inline u32 rgmii_mode_mask(int mode, int input)
93 } 93 }
94} 94}
95 95
96int __devinit rgmii_attach(struct platform_device *ofdev, int input, int mode) 96int rgmii_attach(struct platform_device *ofdev, int input, int mode)
97{ 97{
98 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 98 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
99 struct rgmii_regs __iomem *p = dev->base; 99 struct rgmii_regs __iomem *p = dev->base;
@@ -228,7 +228,7 @@ void *rgmii_dump_regs(struct platform_device *ofdev, void *buf)
228} 228}
229 229
230 230
231static int __devinit rgmii_probe(struct platform_device *ofdev) 231static int rgmii_probe(struct platform_device *ofdev)
232{ 232{
233 struct device_node *np = ofdev->dev.of_node; 233 struct device_node *np = ofdev->dev.of_node;
234 struct rgmii_instance *dev; 234 struct rgmii_instance *dev;
@@ -289,7 +289,7 @@ static int __devinit rgmii_probe(struct platform_device *ofdev)
289 return rc; 289 return rc;
290} 290}
291 291
292static int __devexit rgmii_remove(struct platform_device *ofdev) 292static int rgmii_remove(struct platform_device *ofdev)
293{ 293{
294 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); 294 struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev);
295 295
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index 872912ef518d..795f1393e2b6 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -23,7 +23,7 @@
23#include "emac.h" 23#include "emac.h"
24#include "core.h" 24#include "core.h"
25 25
26int __devinit tah_attach(struct platform_device *ofdev, int channel) 26int tah_attach(struct platform_device *ofdev, int channel)
27{ 27{
28 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); 28 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
29 29
@@ -87,7 +87,7 @@ void *tah_dump_regs(struct platform_device *ofdev, void *buf)
87 return regs + 1; 87 return regs + 1;
88} 88}
89 89
90static int __devinit tah_probe(struct platform_device *ofdev) 90static int tah_probe(struct platform_device *ofdev)
91{ 91{
92 struct device_node *np = ofdev->dev.of_node; 92 struct device_node *np = ofdev->dev.of_node;
93 struct tah_instance *dev; 93 struct tah_instance *dev;
@@ -135,7 +135,7 @@ static int __devinit tah_probe(struct platform_device *ofdev)
135 return rc; 135 return rc;
136} 136}
137 137
138static int __devexit tah_remove(struct platform_device *ofdev) 138static int tah_remove(struct platform_device *ofdev)
139{ 139{
140 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); 140 struct tah_instance *dev = dev_get_drvdata(&ofdev->dev);
141 141
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 415e9b4d5408..f91202f42125 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -82,7 +82,7 @@ static inline u32 zmii_mode_mask(int mode, int input)
82 } 82 }
83} 83}
84 84
85int __devinit zmii_attach(struct platform_device *ofdev, int input, int *mode) 85int zmii_attach(struct platform_device *ofdev, int input, int *mode)
86{ 86{
87 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 87 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
88 struct zmii_regs __iomem *p = dev->base; 88 struct zmii_regs __iomem *p = dev->base;
@@ -231,7 +231,7 @@ void *zmii_dump_regs(struct platform_device *ofdev, void *buf)
231 return regs + 1; 231 return regs + 1;
232} 232}
233 233
234static int __devinit zmii_probe(struct platform_device *ofdev) 234static int zmii_probe(struct platform_device *ofdev)
235{ 235{
236 struct device_node *np = ofdev->dev.of_node; 236 struct device_node *np = ofdev->dev.of_node;
237 struct zmii_instance *dev; 237 struct zmii_instance *dev;
@@ -282,7 +282,7 @@ static int __devinit zmii_probe(struct platform_device *ofdev)
282 return rc; 282 return rc;
283} 283}
284 284
285static int __devexit zmii_remove(struct platform_device *ofdev) 285static int zmii_remove(struct platform_device *ofdev)
286{ 286{
287 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); 287 struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev);
288 288
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b68d28a130e6..f2fdbb79837e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1324,8 +1324,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1324#endif 1324#endif
1325}; 1325};
1326 1326
1327static int __devinit ibmveth_probe(struct vio_dev *dev, 1327static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1328 const struct vio_device_id *id)
1329{ 1328{
1330 int rc, i; 1329 int rc, i;
1331 struct net_device *netdev; 1330 struct net_device *netdev;
@@ -1426,7 +1425,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
1426 return 0; 1425 return 0;
1427} 1426}
1428 1427
1429static int __devexit ibmveth_remove(struct vio_dev *dev) 1428static int ibmveth_remove(struct vio_dev *dev)
1430{ 1429{
1431 struct net_device *netdev = dev_get_drvdata(&dev->dev); 1430 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1432 struct ibmveth_adapter *adapter = netdev_priv(netdev); 1431 struct ibmveth_adapter *adapter = netdev_priv(netdev);
@@ -1593,7 +1592,7 @@ static int ibmveth_resume(struct device *dev)
1593 return 0; 1592 return 0;
1594} 1593}
1595 1594
1596static struct vio_device_id ibmveth_device_table[] __devinitdata = { 1595static struct vio_device_id ibmveth_device_table[] = {
1597 { "network", "IBM,l-lan"}, 1596 { "network", "IBM,l-lan"},
1598 { "", "" } 1597 { "", "" }
1599}; 1598};
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 1b563bb959c2..068d78151658 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2167,7 +2167,7 @@ static const struct ethtool_ops ipg_ethtool_ops = {
2167 .nway_reset = ipg_nway_reset, 2167 .nway_reset = ipg_nway_reset,
2168}; 2168};
2169 2169
2170static void __devexit ipg_remove(struct pci_dev *pdev) 2170static void ipg_remove(struct pci_dev *pdev)
2171{ 2171{
2172 struct net_device *dev = pci_get_drvdata(pdev); 2172 struct net_device *dev = pci_get_drvdata(pdev);
2173 struct ipg_nic_private *sp = netdev_priv(dev); 2173 struct ipg_nic_private *sp = netdev_priv(dev);
@@ -2199,8 +2199,7 @@ static const struct net_device_ops ipg_netdev_ops = {
2199 .ndo_validate_addr = eth_validate_addr, 2199 .ndo_validate_addr = eth_validate_addr,
2200}; 2200};
2201 2201
2202static int __devinit ipg_probe(struct pci_dev *pdev, 2202static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2203 const struct pci_device_id *id)
2204{ 2203{
2205 unsigned int i = id->driver_data; 2204 unsigned int i = id->driver_data;
2206 struct ipg_nic_private *sp; 2205 struct ipg_nic_private *sp;
@@ -2296,7 +2295,7 @@ static struct pci_driver ipg_pci_driver = {
2296 .name = IPG_DRIVER_NAME, 2295 .name = IPG_DRIVER_NAME,
2297 .id_table = ipg_pci_tbl, 2296 .id_table = ipg_pci_tbl,
2298 .probe = ipg_probe, 2297 .probe = ipg_probe,
2299 .remove = __devexit_p(ipg_remove), 2298 .remove = ipg_remove,
2300}; 2299};
2301 2300
2302static int __init ipg_init_module(void) 2301static int __init ipg_init_module(void)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0cafe4fe9406..ddee4060948a 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -93,6 +93,7 @@ config E1000E
93config IGB 93config IGB
94 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" 94 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
95 depends on PCI 95 depends on PCI
96 select PTP_1588_CLOCK
96 ---help--- 97 ---help---
97 This driver supports Intel(R) 82575/82576 gigabit ethernet family of 98 This driver supports Intel(R) 82575/82576 gigabit ethernet family of
98 adapters. For more information on how to identify your adapter, go 99 adapters. For more information on how to identify your adapter, go
@@ -120,19 +121,6 @@ config IGB_DCA
120 driver. DCA is a method for warming the CPU cache before data 121 driver. DCA is a method for warming the CPU cache before data
121 is used, with the intent of lessening the impact of cache misses. 122 is used, with the intent of lessening the impact of cache misses.
122 123
123config IGB_PTP
124 bool "PTP Hardware Clock (PHC)"
125 default n
126 depends on IGB && EXPERIMENTAL
127 select PPS
128 select PTP_1588_CLOCK
129 ---help---
130 Say Y here if you want to use PTP Hardware Clock (PHC) in the
131 driver. Only the basic clock operations have been implemented.
132
133 Every timestamp and clock read operations must consult the
134 overflow counter to form a correct time value.
135
136config IGBVF 124config IGBVF
137 tristate "Intel(R) 82576 Virtual Function Ethernet support" 125 tristate "Intel(R) 82576 Virtual Function Ethernet support"
138 depends on PCI 126 depends on PCI
@@ -178,8 +166,9 @@ config IXGB
178 166
179config IXGBE 167config IXGBE
180 tristate "Intel(R) 10GbE PCI Express adapters support" 168 tristate "Intel(R) 10GbE PCI Express adapters support"
181 depends on PCI && INET 169 depends on PCI
182 select MDIO 170 select MDIO
171 select PTP_1588_CLOCK
183 ---help--- 172 ---help---
184 This driver supports Intel(R) 10GbE PCI Express family of 173 This driver supports Intel(R) 10GbE PCI Express family of
185 adapters. For more information on how to identify your adapter, go 174 adapters. For more information on how to identify your adapter, go
@@ -222,19 +211,6 @@ config IXGBE_DCB
222 211
223 If unsure, say N. 212 If unsure, say N.
224 213
225config IXGBE_PTP
226 bool "PTP Clock Support"
227 default n
228 depends on IXGBE && EXPERIMENTAL
229 select PPS
230 select PTP_1588_CLOCK
231 ---help---
232 Say Y here if you want support for 1588 Timestamping with a
233 PHC device, using the PTP 1588 Clock support. This is
234 required to enable timestamping support for the device.
235
236 If unsure, say N.
237
238config IXGBEVF 214config IXGBEVF
239 tristate "Intel(R) 82599 Virtual Function Ethernet support" 215 tristate "Intel(R) 82599 Virtual Function Ethernet support"
240 depends on PCI_MSI 216 depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 29ce9bd27f94..a59f0779e1c3 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2829,8 +2829,7 @@ static const struct net_device_ops e100_netdev_ops = {
2829 .ndo_set_features = e100_set_features, 2829 .ndo_set_features = e100_set_features,
2830}; 2830};
2831 2831
2832static int __devinit e100_probe(struct pci_dev *pdev, 2832static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2833 const struct pci_device_id *ent)
2834{ 2833{
2835 struct net_device *netdev; 2834 struct net_device *netdev;
2836 struct nic *nic; 2835 struct nic *nic;
@@ -2981,7 +2980,7 @@ err_out_free_dev:
2981 return err; 2980 return err;
2982} 2981}
2983 2982
2984static void __devexit e100_remove(struct pci_dev *pdev) 2983static void e100_remove(struct pci_dev *pdev)
2985{ 2984{
2986 struct net_device *netdev = pci_get_drvdata(pdev); 2985 struct net_device *netdev = pci_get_drvdata(pdev);
2987 2986
@@ -3167,7 +3166,7 @@ static struct pci_driver e100_driver = {
3167 .name = DRV_NAME, 3166 .name = DRV_NAME,
3168 .id_table = e100_id_table, 3167 .id_table = e100_id_table,
3169 .probe = e100_probe, 3168 .probe = e100_probe,
3170 .remove = __devexit_p(e100_remove), 3169 .remove = e100_remove,
3171#ifdef CONFIG_PM 3170#ifdef CONFIG_PM
3172 /* Power Management hooks */ 3171 /* Power Management hooks */
3173 .suspend = e100_suspend, 3172 .suspend = e100_suspend,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 3d6839528761..8fedd2451538 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -107,6 +107,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
107}; 107};
108 108
109static DEFINE_SPINLOCK(e1000_eeprom_lock); 109static DEFINE_SPINLOCK(e1000_eeprom_lock);
110static DEFINE_SPINLOCK(e1000_phy_lock);
110 111
111/** 112/**
112 * e1000_set_phy_type - Set the phy type member in the hw struct. 113 * e1000_set_phy_type - Set the phy type member in the hw struct.
@@ -2830,19 +2831,25 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
2830s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) 2831s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
2831{ 2832{
2832 u32 ret_val; 2833 u32 ret_val;
2834 unsigned long flags;
2833 2835
2834 e_dbg("e1000_read_phy_reg"); 2836 e_dbg("e1000_read_phy_reg");
2835 2837
2838 spin_lock_irqsave(&e1000_phy_lock, flags);
2839
2836 if ((hw->phy_type == e1000_phy_igp) && 2840 if ((hw->phy_type == e1000_phy_igp) &&
2837 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2841 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
2838 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2842 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
2839 (u16) reg_addr); 2843 (u16) reg_addr);
2840 if (ret_val) 2844 if (ret_val) {
2845 spin_unlock_irqrestore(&e1000_phy_lock, flags);
2841 return ret_val; 2846 return ret_val;
2847 }
2842 } 2848 }
2843 2849
2844 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, 2850 ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
2845 phy_data); 2851 phy_data);
2852 spin_unlock_irqrestore(&e1000_phy_lock, flags);
2846 2853
2847 return ret_val; 2854 return ret_val;
2848} 2855}
@@ -2965,19 +2972,25 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2965s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) 2972s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
2966{ 2973{
2967 u32 ret_val; 2974 u32 ret_val;
2975 unsigned long flags;
2968 2976
2969 e_dbg("e1000_write_phy_reg"); 2977 e_dbg("e1000_write_phy_reg");
2970 2978
2979 spin_lock_irqsave(&e1000_phy_lock, flags);
2980
2971 if ((hw->phy_type == e1000_phy_igp) && 2981 if ((hw->phy_type == e1000_phy_igp) &&
2972 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2982 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
2973 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 2983 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
2974 (u16) reg_addr); 2984 (u16) reg_addr);
2975 if (ret_val) 2985 if (ret_val) {
2986 spin_unlock_irqrestore(&e1000_phy_lock, flags);
2976 return ret_val; 2987 return ret_val;
2988 }
2977 } 2989 }
2978 2990
2979 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, 2991 ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
2980 phy_data); 2992 phy_data);
2993 spin_unlock_irqrestore(&e1000_phy_lock, flags);
2981 2994
2982 return ret_val; 2995 return ret_val;
2983} 2996}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 222bfaff4622..294da56b824c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -111,7 +111,7 @@ void e1000_update_stats(struct e1000_adapter *adapter);
111static int e1000_init_module(void); 111static int e1000_init_module(void);
112static void e1000_exit_module(void); 112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void __devexit e1000_remove(struct pci_dev *pdev); 114static void e1000_remove(struct pci_dev *pdev);
115static int e1000_alloc_queues(struct e1000_adapter *adapter); 115static int e1000_alloc_queues(struct e1000_adapter *adapter);
116static int e1000_sw_init(struct e1000_adapter *adapter); 116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev); 117static int e1000_open(struct net_device *netdev);
@@ -202,7 +202,7 @@ static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name, 202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl, 203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe, 204 .probe = e1000_probe,
205 .remove = __devexit_p(e1000_remove), 205 .remove = e1000_remove,
206#ifdef CONFIG_PM 206#ifdef CONFIG_PM
207 /* Power Management Hooks */ 207 /* Power Management Hooks */
208 .suspend = e1000_suspend, 208 .suspend = e1000_suspend,
@@ -938,8 +938,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
938 * The OS initialization, configuring of the adapter private structure, 938 * The OS initialization, configuring of the adapter private structure,
939 * and a hardware reset occur. 939 * and a hardware reset occur.
940 **/ 940 **/
941static int __devinit e1000_probe(struct pci_dev *pdev, 941static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
942 const struct pci_device_id *ent)
943{ 942{
944 struct net_device *netdev; 943 struct net_device *netdev;
945 struct e1000_adapter *adapter; 944 struct e1000_adapter *adapter;
@@ -1273,7 +1272,7 @@ err_pci_reg:
1273 * memory. 1272 * memory.
1274 **/ 1273 **/
1275 1274
1276static void __devexit e1000_remove(struct pci_dev *pdev) 1275static void e1000_remove(struct pci_dev *pdev)
1277{ 1276{
1278 struct net_device *netdev = pci_get_drvdata(pdev); 1277 struct net_device *netdev = pci_get_drvdata(pdev);
1279 struct e1000_adapter *adapter = netdev_priv(netdev); 1278 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1309,7 +1308,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
1309 * e1000_init_hw_struct MUST be called before this function 1308 * e1000_init_hw_struct MUST be called before this function
1310 **/ 1309 **/
1311 1310
1312static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 1311static int e1000_sw_init(struct e1000_adapter *adapter)
1313{ 1312{
1314 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1313 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1315 1314
@@ -1340,7 +1339,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1340 * number of queues at compile-time. 1339 * number of queues at compile-time.
1341 **/ 1340 **/
1342 1341
1343static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 1342static int e1000_alloc_queues(struct e1000_adapter *adapter)
1344{ 1343{
1345 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1344 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1346 sizeof(struct e1000_tx_ring), GFP_KERNEL); 1345 sizeof(struct e1000_tx_ring), GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 1301eba8b57a..750fc0194f37 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -45,7 +45,7 @@
45 45
46#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } 46#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
47#define E1000_PARAM(X, desc) \ 47#define E1000_PARAM(X, desc) \
48 static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ 48 static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
49 static unsigned int num_##X; \ 49 static unsigned int num_##X; \
50 module_param_array_named(X, X, int, &num_##X, 0); \ 50 module_param_array_named(X, X, int, &num_##X, 0); \
51 MODULE_PARM_DESC(X, desc); 51 MODULE_PARM_DESC(X, desc);
@@ -205,9 +205,9 @@ struct e1000_option {
205 } arg; 205 } arg;
206}; 206};
207 207
208static int __devinit e1000_validate_option(unsigned int *value, 208static int e1000_validate_option(unsigned int *value,
209 const struct e1000_option *opt, 209 const struct e1000_option *opt,
210 struct e1000_adapter *adapter) 210 struct e1000_adapter *adapter)
211{ 211{
212 if (*value == OPTION_UNSET) { 212 if (*value == OPTION_UNSET) {
213 *value = opt->def; 213 *value = opt->def;
@@ -268,7 +268,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
268 * in a variable in the adapter structure. 268 * in a variable in the adapter structure.
269 **/ 269 **/
270 270
271void __devinit e1000_check_options(struct e1000_adapter *adapter) 271void e1000_check_options(struct e1000_adapter *adapter)
272{ 272{
273 struct e1000_option opt; 273 struct e1000_option opt;
274 int bd = adapter->bd_number; 274 int bd = adapter->bd_number;
@@ -534,7 +534,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
534 * Handles speed and duplex options on fiber adapters 534 * Handles speed and duplex options on fiber adapters
535 **/ 535 **/
536 536
537static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter) 537static void e1000_check_fiber_options(struct e1000_adapter *adapter)
538{ 538{
539 int bd = adapter->bd_number; 539 int bd = adapter->bd_number;
540 if (num_Speed > bd) { 540 if (num_Speed > bd) {
@@ -560,7 +560,7 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
560 * Handles speed and duplex options on copper adapters 560 * Handles speed and duplex options on copper adapters
561 **/ 561 **/
562 562
563static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter) 563static void e1000_check_copper_options(struct e1000_adapter *adapter)
564{ 564{
565 struct e1000_option opt; 565 struct e1000_option opt;
566 unsigned int speed, dplx, an; 566 unsigned int speed, dplx, an;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 4dd18a1f45d2..e73c2c355993 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -26,8 +26,7 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29/* 29/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
30 * 80003ES2LAN Gigabit Ethernet Controller (Copper)
31 * 80003ES2LAN Gigabit Ethernet Controller (Serdes) 30 * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
32 */ 31 */
33 32
@@ -80,7 +79,8 @@
80 1 = 50-80M 79 1 = 50-80M
81 2 = 80-110M 80 2 = 80-110M
82 3 = 110-140M 81 3 = 110-140M
83 4 = >140M */ 82 4 = >140M
83 */
84 84
85/* Kumeran Mode Control Register (Page 193, Register 16) */ 85/* Kumeran Mode Control Register (Page 193, Register 16) */
86#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 86#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
@@ -95,8 +95,7 @@
95/* In-Band Control Register (Page 194, Register 18) */ 95/* In-Band Control Register (Page 194, Register 18) */
96#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ 96#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
97 97
98/* 98/* A table for the GG82563 cable length where the range is defined
99 * A table for the GG82563 cable length where the range is defined
100 * with a lower bound at "index" and the upper bound at 99 * with a lower bound at "index" and the upper bound at
101 * "index + 5". 100 * "index + 5".
102 */ 101 */
@@ -183,8 +182,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
183 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 182 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
184 E1000_EECD_SIZE_EX_SHIFT); 183 E1000_EECD_SIZE_EX_SHIFT);
185 184
186 /* 185 /* Added to a constant, "size" becomes the left-shift value
187 * Added to a constant, "size" becomes the left-shift value
188 * for setting word_size. 186 * for setting word_size.
189 */ 187 */
190 size += NVM_WORD_SIZE_BASE_SHIFT; 188 size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -375,8 +373,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
375 if (!(swfw_sync & (fwmask | swmask))) 373 if (!(swfw_sync & (fwmask | swmask)))
376 break; 374 break;
377 375
378 /* 376 /* Firmware currently using resource (fwmask)
379 * Firmware currently using resource (fwmask)
380 * or other software thread using resource (swmask) 377 * or other software thread using resource (swmask)
381 */ 378 */
382 e1000e_put_hw_semaphore(hw); 379 e1000e_put_hw_semaphore(hw);
@@ -442,8 +439,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
442 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 439 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
443 page_select = GG82563_PHY_PAGE_SELECT; 440 page_select = GG82563_PHY_PAGE_SELECT;
444 } else { 441 } else {
445 /* 442 /* Use Alternative Page Select register to access
446 * Use Alternative Page Select register to access
447 * registers 30 and 31 443 * registers 30 and 31
448 */ 444 */
449 page_select = GG82563_PHY_PAGE_SELECT_ALT; 445 page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -457,8 +453,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
457 } 453 }
458 454
459 if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { 455 if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
460 /* 456 /* The "ready" bit in the MDIC register may be incorrectly set
461 * The "ready" bit in the MDIC register may be incorrectly set
462 * before the device has completed the "Page Select" MDI 457 * before the device has completed the "Page Select" MDI
463 * transaction. So we wait 200us after each MDI command... 458 * transaction. So we wait 200us after each MDI command...
464 */ 459 */
@@ -513,8 +508,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
513 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 508 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
514 page_select = GG82563_PHY_PAGE_SELECT; 509 page_select = GG82563_PHY_PAGE_SELECT;
515 } else { 510 } else {
516 /* 511 /* Use Alternative Page Select register to access
517 * Use Alternative Page Select register to access
518 * registers 30 and 31 512 * registers 30 and 31
519 */ 513 */
520 page_select = GG82563_PHY_PAGE_SELECT_ALT; 514 page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -528,8 +522,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
528 } 522 }
529 523
530 if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { 524 if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
531 /* 525 /* The "ready" bit in the MDIC register may be incorrectly set
532 * The "ready" bit in the MDIC register may be incorrectly set
533 * before the device has completed the "Page Select" MDI 526 * before the device has completed the "Page Select" MDI
534 * transaction. So we wait 200us after each MDI command... 527 * transaction. So we wait 200us after each MDI command...
535 */ 528 */
@@ -618,8 +611,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
618 u16 phy_data; 611 u16 phy_data;
619 bool link; 612 bool link;
620 613
621 /* 614 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
622 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
623 * forced whenever speed and duplex are forced. 615 * forced whenever speed and duplex are forced.
624 */ 616 */
625 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 617 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -657,8 +649,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
657 return ret_val; 649 return ret_val;
658 650
659 if (!link) { 651 if (!link) {
660 /* 652 /* We didn't get link.
661 * We didn't get link.
662 * Reset the DSP and cross our fingers. 653 * Reset the DSP and cross our fingers.
663 */ 654 */
664 ret_val = e1000e_phy_reset_dsp(hw); 655 ret_val = e1000e_phy_reset_dsp(hw);
@@ -677,8 +668,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
677 if (ret_val) 668 if (ret_val)
678 return ret_val; 669 return ret_val;
679 670
680 /* 671 /* Resetting the phy means we need to verify the TX_CLK corresponds
681 * Resetting the phy means we need to verify the TX_CLK corresponds
682 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. 672 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
683 */ 673 */
684 phy_data &= ~GG82563_MSCR_TX_CLK_MASK; 674 phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
@@ -687,8 +677,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
687 else 677 else
688 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; 678 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
689 679
690 /* 680 /* In addition, we must re-enable CRS on Tx for both half and full
691 * In addition, we must re-enable CRS on Tx for both half and full
692 * duplex. 681 * duplex.
693 */ 682 */
694 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; 683 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -766,8 +755,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
766 s32 ret_val; 755 s32 ret_val;
767 u16 kum_reg_data; 756 u16 kum_reg_data;
768 757
769 /* 758 /* Prevent the PCI-E bus from sticking if there is no TLP connection
770 * Prevent the PCI-E bus from sticking if there is no TLP connection
771 * on the last TLP read/write transaction when MAC is reset. 759 * on the last TLP read/write transaction when MAC is reset.
772 */ 760 */
773 ret_val = e1000e_disable_pcie_master(hw); 761 ret_val = e1000e_disable_pcie_master(hw);
@@ -899,8 +887,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
899 hw->dev_spec.e80003es2lan.mdic_wa_enable = false; 887 hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
900 } 888 }
901 889
902 /* 890 /* Clear all of the statistics registers (clear on read). It is
903 * Clear all of the statistics registers (clear on read). It is
904 * important that we do this after we have tried to establish link 891 * important that we do this after we have tried to establish link
905 * because the symbol error count will increment wildly if there 892 * because the symbol error count will increment wildly if there
906 * is no link. 893 * is no link.
@@ -945,8 +932,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
945 reg |= (1 << 28); 932 reg |= (1 << 28);
946 ew32(TARC(1), reg); 933 ew32(TARC(1), reg);
947 934
948 /* 935 /* Disable IPv6 extension header parsing because some malformed
949 * Disable IPv6 extension header parsing because some malformed
950 * IPv6 headers can hang the Rx. 936 * IPv6 headers can hang the Rx.
951 */ 937 */
952 reg = er32(RFCTL); 938 reg = er32(RFCTL);
@@ -979,8 +965,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
979 if (ret_val) 965 if (ret_val)
980 return ret_val; 966 return ret_val;
981 967
982 /* 968 /* Options:
983 * Options:
984 * MDI/MDI-X = 0 (default) 969 * MDI/MDI-X = 0 (default)
985 * 0 - Auto for all speeds 970 * 0 - Auto for all speeds
986 * 1 - MDI mode 971 * 1 - MDI mode
@@ -1006,8 +991,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
1006 break; 991 break;
1007 } 992 }
1008 993
1009 /* 994 /* Options:
1010 * Options:
1011 * disable_polarity_correction = 0 (default) 995 * disable_polarity_correction = 0 (default)
1012 * Automatic Correction for Reversed Cable Polarity 996 * Automatic Correction for Reversed Cable Polarity
1013 * 0 - Disabled 997 * 0 - Disabled
@@ -1065,8 +1049,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
1065 if (ret_val) 1049 if (ret_val)
1066 return ret_val; 1050 return ret_val;
1067 1051
1068 /* 1052 /* Do not init these registers when the HW is in IAMT mode, since the
1069 * Do not init these registers when the HW is in IAMT mode, since the
1070 * firmware will have already initialized them. We only initialize 1053 * firmware will have already initialized them. We only initialize
1071 * them if the HW is not in IAMT mode. 1054 * them if the HW is not in IAMT mode.
1072 */ 1055 */
@@ -1087,8 +1070,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
1087 return ret_val; 1070 return ret_val;
1088 } 1071 }
1089 1072
1090 /* 1073 /* Workaround: Disable padding in Kumeran interface in the MAC
1091 * Workaround: Disable padding in Kumeran interface in the MAC
1092 * and in the PHY to avoid CRC errors. 1074 * and in the PHY to avoid CRC errors.
1093 */ 1075 */
1094 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); 1076 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
@@ -1121,8 +1103,7 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1121 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1103 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1122 ew32(CTRL, ctrl); 1104 ew32(CTRL, ctrl);
1123 1105
1124 /* 1106 /* Set the mac to wait the maximum time between each
1125 * Set the mac to wait the maximum time between each
1126 * iteration and increase the max iterations when 1107 * iteration and increase the max iterations when
1127 * polling the phy; this fixes erroneous timeouts at 10Mbps. 1108 * polling the phy; this fixes erroneous timeouts at 10Mbps.
1128 */ 1109 */
@@ -1352,8 +1333,7 @@ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
1352{ 1333{
1353 s32 ret_val = 0; 1334 s32 ret_val = 0;
1354 1335
1355 /* 1336 /* If there's an alternate MAC address place it in RAR0
1356 * If there's an alternate MAC address place it in RAR0
1357 * so that it will override the Si installed default perm 1337 * so that it will override the Si installed default perm
1358 * address. 1338 * address.
1359 */ 1339 */
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index c98586408005..c77d010d5c59 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -26,8 +26,7 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29/* 29/* 82571EB Gigabit Ethernet Controller
30 * 82571EB Gigabit Ethernet Controller
31 * 82571EB Gigabit Ethernet Controller (Copper) 30 * 82571EB Gigabit Ethernet Controller (Copper)
32 * 82571EB Gigabit Ethernet Controller (Fiber) 31 * 82571EB Gigabit Ethernet Controller (Fiber)
33 * 82571EB Dual Port Gigabit Mezzanine Adapter 32 * 82571EB Dual Port Gigabit Mezzanine Adapter
@@ -191,8 +190,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
191 if (((eecd >> 15) & 0x3) == 0x3) { 190 if (((eecd >> 15) & 0x3) == 0x3) {
192 nvm->type = e1000_nvm_flash_hw; 191 nvm->type = e1000_nvm_flash_hw;
193 nvm->word_size = 2048; 192 nvm->word_size = 2048;
194 /* 193 /* Autonomous Flash update bit must be cleared due
195 * Autonomous Flash update bit must be cleared due
196 * to Flash update issue. 194 * to Flash update issue.
197 */ 195 */
198 eecd &= ~E1000_EECD_AUPDEN; 196 eecd &= ~E1000_EECD_AUPDEN;
@@ -204,8 +202,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
204 nvm->type = e1000_nvm_eeprom_spi; 202 nvm->type = e1000_nvm_eeprom_spi;
205 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 203 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
206 E1000_EECD_SIZE_EX_SHIFT); 204 E1000_EECD_SIZE_EX_SHIFT);
207 /* 205 /* Added to a constant, "size" becomes the left-shift value
208 * Added to a constant, "size" becomes the left-shift value
209 * for setting word_size. 206 * for setting word_size.
210 */ 207 */
211 size += NVM_WORD_SIZE_BASE_SHIFT; 208 size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -291,8 +288,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
291 288
292 /* FWSM register */ 289 /* FWSM register */
293 mac->has_fwsm = true; 290 mac->has_fwsm = true;
294 /* 291 /* ARC supported; valid only if manageability features are
295 * ARC supported; valid only if manageability features are
296 * enabled. 292 * enabled.
297 */ 293 */
298 mac->arc_subsystem_valid = !!(er32(FWSM) & 294 mac->arc_subsystem_valid = !!(er32(FWSM) &
@@ -314,8 +310,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
314 break; 310 break;
315 } 311 }
316 312
317 /* 313 /* Ensure that the inter-port SWSM.SMBI lock bit is clear before
318 * Ensure that the inter-port SWSM.SMBI lock bit is clear before
319 * first NVM or PHY access. This should be done for single-port 314 * first NVM or PHY access. This should be done for single-port
320 * devices, and for one port only on dual-port devices so that 315 * devices, and for one port only on dual-port devices so that
321 * for those devices we can still use the SMBI lock to synchronize 316 * for those devices we can still use the SMBI lock to synchronize
@@ -352,11 +347,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
352 ew32(SWSM, swsm & ~E1000_SWSM_SMBI); 347 ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
353 } 348 }
354 349
355 /* 350 /* Initialize device specific counter of SMBI acquisition timeouts. */
356 * Initialize device specific counter of SMBI acquisition 351 hw->dev_spec.e82571.smb_counter = 0;
357 * timeouts.
358 */
359 hw->dev_spec.e82571.smb_counter = 0;
360 352
361 return 0; 353 return 0;
362} 354}
@@ -445,8 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
445 switch (hw->mac.type) { 437 switch (hw->mac.type) {
446 case e1000_82571: 438 case e1000_82571:
447 case e1000_82572: 439 case e1000_82572:
448 /* 440 /* The 82571 firmware may still be configuring the PHY.
449 * The 82571 firmware may still be configuring the PHY.
450 * In this case, we cannot access the PHY until the 441 * In this case, we cannot access the PHY until the
451 * configuration is done. So we explicitly set the 442 * configuration is done. So we explicitly set the
452 * PHY ID. 443 * PHY ID.
@@ -492,8 +483,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
492 s32 fw_timeout = hw->nvm.word_size + 1; 483 s32 fw_timeout = hw->nvm.word_size + 1;
493 s32 i = 0; 484 s32 i = 0;
494 485
495 /* 486 /* If we have timedout 3 times on trying to acquire
496 * If we have timedout 3 times on trying to acquire
497 * the inter-port SMBI semaphore, there is old code 487 * the inter-port SMBI semaphore, there is old code
498 * operating on the other port, and it is not 488 * operating on the other port, and it is not
499 * releasing SMBI. Modify the number of times that 489 * releasing SMBI. Modify the number of times that
@@ -787,8 +777,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
787 if (ret_val) 777 if (ret_val)
788 return ret_val; 778 return ret_val;
789 779
790 /* 780 /* If our nvm is an EEPROM, then we're done
791 * If our nvm is an EEPROM, then we're done
792 * otherwise, commit the checksum to the flash NVM. 781 * otherwise, commit the checksum to the flash NVM.
793 */ 782 */
794 if (hw->nvm.type != e1000_nvm_flash_hw) 783 if (hw->nvm.type != e1000_nvm_flash_hw)
@@ -806,8 +795,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
806 795
807 /* Reset the firmware if using STM opcode. */ 796 /* Reset the firmware if using STM opcode. */
808 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { 797 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
809 /* 798 /* The enabling of and the actual reset must be done
810 * The enabling of and the actual reset must be done
811 * in two write cycles. 799 * in two write cycles.
812 */ 800 */
813 ew32(HICR, E1000_HICR_FW_RESET_ENABLE); 801 ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
@@ -867,8 +855,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
867 u32 i, eewr = 0; 855 u32 i, eewr = 0;
868 s32 ret_val = 0; 856 s32 ret_val = 0;
869 857
870 /* 858 /* A check for invalid values: offset too large, too many words,
871 * A check for invalid values: offset too large, too many words,
872 * and not enough words. 859 * and not enough words.
873 */ 860 */
874 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 861 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -957,8 +944,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
957 } else { 944 } else {
958 data &= ~IGP02E1000_PM_D0_LPLU; 945 data &= ~IGP02E1000_PM_D0_LPLU;
959 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); 946 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
960 /* 947 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
961 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
962 * during Dx states where the power conservation is most 948 * during Dx states where the power conservation is most
963 * important. During driver activity we should enable 949 * important. During driver activity we should enable
964 * SmartSpeed, so performance is maintained. 950 * SmartSpeed, so performance is maintained.
@@ -1002,8 +988,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1002 u32 ctrl, ctrl_ext, eecd, tctl; 988 u32 ctrl, ctrl_ext, eecd, tctl;
1003 s32 ret_val; 989 s32 ret_val;
1004 990
1005 /* 991 /* Prevent the PCI-E bus from sticking if there is no TLP connection
1006 * Prevent the PCI-E bus from sticking if there is no TLP connection
1007 * on the last TLP read/write transaction when MAC is reset. 992 * on the last TLP read/write transaction when MAC is reset.
1008 */ 993 */
1009 ret_val = e1000e_disable_pcie_master(hw); 994 ret_val = e1000e_disable_pcie_master(hw);
@@ -1021,8 +1006,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1021 1006
1022 usleep_range(10000, 20000); 1007 usleep_range(10000, 20000);
1023 1008
1024 /* 1009 /* Must acquire the MDIO ownership before MAC reset.
1025 * Must acquire the MDIO ownership before MAC reset.
1026 * Ownership defaults to firmware after a reset. 1010 * Ownership defaults to firmware after a reset.
1027 */ 1011 */
1028 switch (hw->mac.type) { 1012 switch (hw->mac.type) {
@@ -1067,8 +1051,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1067 /* We don't want to continue accessing MAC registers. */ 1051 /* We don't want to continue accessing MAC registers. */
1068 return ret_val; 1052 return ret_val;
1069 1053
1070 /* 1054 /* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
1071 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
1072 * Need to wait for Phy configuration completion before accessing 1055 * Need to wait for Phy configuration completion before accessing
1073 * NVM and Phy. 1056 * NVM and Phy.
1074 */ 1057 */
@@ -1076,8 +1059,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1076 switch (hw->mac.type) { 1059 switch (hw->mac.type) {
1077 case e1000_82571: 1060 case e1000_82571:
1078 case e1000_82572: 1061 case e1000_82572:
1079 /* 1062 /* REQ and GNT bits need to be cleared when using AUTO_RD
1080 * REQ and GNT bits need to be cleared when using AUTO_RD
1081 * to access the EEPROM. 1063 * to access the EEPROM.
1082 */ 1064 */
1083 eecd = er32(EECD); 1065 eecd = er32(EECD);
@@ -1138,8 +1120,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1138 e_dbg("Initializing the IEEE VLAN\n"); 1120 e_dbg("Initializing the IEEE VLAN\n");
1139 mac->ops.clear_vfta(hw); 1121 mac->ops.clear_vfta(hw);
1140 1122
1141 /* Setup the receive address. */ 1123 /* Setup the receive address.
1142 /*
1143 * If, however, a locally administered address was assigned to the 1124 * If, however, a locally administered address was assigned to the
1144 * 82571, we must reserve a RAR for it to work around an issue where 1125 * 82571, we must reserve a RAR for it to work around an issue where
1145 * resetting one port will reload the MAC on the other port. 1126 * resetting one port will reload the MAC on the other port.
@@ -1183,8 +1164,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1183 break; 1164 break;
1184 } 1165 }
1185 1166
1186 /* 1167 /* Clear all of the statistics registers (clear on read). It is
1187 * Clear all of the statistics registers (clear on read). It is
1188 * important that we do this after we have tried to establish link 1168 * important that we do this after we have tried to establish link
1189 * because the symbol error count will increment wildly if there 1169 * because the symbol error count will increment wildly if there
1190 * is no link. 1170 * is no link.
@@ -1281,8 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1281 ew32(PBA_ECC, reg); 1261 ew32(PBA_ECC, reg);
1282 } 1262 }
1283 1263
1284 /* 1264 /* Workaround for hardware errata.
1285 * Workaround for hardware errata.
1286 * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 1265 * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
1287 */ 1266 */
1288 if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { 1267 if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
@@ -1291,8 +1270,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1291 ew32(CTRL_EXT, reg); 1270 ew32(CTRL_EXT, reg);
1292 } 1271 }
1293 1272
1294 /* 1273 /* Disable IPv6 extension header parsing because some malformed
1295 * Disable IPv6 extension header parsing because some malformed
1296 * IPv6 headers can hang the Rx. 1274 * IPv6 headers can hang the Rx.
1297 */ 1275 */
1298 if (hw->mac.type <= e1000_82573) { 1276 if (hw->mac.type <= e1000_82573) {
@@ -1309,8 +1287,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1309 reg |= (1 << 22); 1287 reg |= (1 << 22);
1310 ew32(GCR, reg); 1288 ew32(GCR, reg);
1311 1289
1312 /* 1290 /* Workaround for hardware errata.
1313 * Workaround for hardware errata.
1314 * apply workaround for hardware errata documented in errata 1291 * apply workaround for hardware errata documented in errata
1315 * docs Fixes issue where some error prone or unreliable PCIe 1292 * docs Fixes issue where some error prone or unreliable PCIe
1316 * completions are occurring, particularly with ASPM enabled. 1293 * completions are occurring, particularly with ASPM enabled.
@@ -1344,8 +1321,7 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
1344 case e1000_82574: 1321 case e1000_82574:
1345 case e1000_82583: 1322 case e1000_82583:
1346 if (hw->mng_cookie.vlan_id != 0) { 1323 if (hw->mng_cookie.vlan_id != 0) {
1347 /* 1324 /* The VFTA is a 4096b bit-field, each identifying
1348 * The VFTA is a 4096b bit-field, each identifying
1349 * a single VLAN ID. The following operations 1325 * a single VLAN ID. The following operations
1350 * determine which 32b entry (i.e. offset) into the 1326 * determine which 32b entry (i.e. offset) into the
1351 * array we want to set the VLAN ID (i.e. bit) of 1327 * array we want to set the VLAN ID (i.e. bit) of
@@ -1362,8 +1338,7 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
1362 break; 1338 break;
1363 } 1339 }
1364 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 1340 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
1365 /* 1341 /* If the offset we want to clear is the same offset of the
1366 * If the offset we want to clear is the same offset of the
1367 * manageability VLAN ID, then clear all bits except that of 1342 * manageability VLAN ID, then clear all bits except that of
1368 * the manageability unit. 1343 * the manageability unit.
1369 */ 1344 */
@@ -1401,8 +1376,7 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
1401 1376
1402 ctrl = hw->mac.ledctl_mode2; 1377 ctrl = hw->mac.ledctl_mode2;
1403 if (!(E1000_STATUS_LU & er32(STATUS))) { 1378 if (!(E1000_STATUS_LU & er32(STATUS))) {
1404 /* 1379 /* If no link, then turn LED on by setting the invert bit
1405 * If no link, then turn LED on by setting the invert bit
1406 * for each LED that's "on" (0x0E) in ledctl_mode2. 1380 * for each LED that's "on" (0x0E) in ledctl_mode2.
1407 */ 1381 */
1408 for (i = 0; i < 4; i++) 1382 for (i = 0; i < 4; i++)
@@ -1427,8 +1401,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
1427 u16 receive_errors = 0; 1401 u16 receive_errors = 0;
1428 s32 ret_val = 0; 1402 s32 ret_val = 0;
1429 1403
1430 /* 1404 /* Read PHY Receive Error counter first, if its is max - all F's then
1431 * Read PHY Receive Error counter first, if its is max - all F's then
1432 * read the Base1000T status register If both are max then PHY is hung. 1405 * read the Base1000T status register If both are max then PHY is hung.
1433 */ 1406 */
1434 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); 1407 ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
@@ -1458,8 +1431,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
1458 **/ 1431 **/
1459static s32 e1000_setup_link_82571(struct e1000_hw *hw) 1432static s32 e1000_setup_link_82571(struct e1000_hw *hw)
1460{ 1433{
1461 /* 1434 /* 82573 does not have a word in the NVM to determine
1462 * 82573 does not have a word in the NVM to determine
1463 * the default flow control setting, so we explicitly 1435 * the default flow control setting, so we explicitly
1464 * set it to full. 1436 * set it to full.
1465 */ 1437 */
@@ -1526,8 +1498,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
1526 switch (hw->mac.type) { 1498 switch (hw->mac.type) {
1527 case e1000_82571: 1499 case e1000_82571:
1528 case e1000_82572: 1500 case e1000_82572:
1529 /* 1501 /* If SerDes loopback mode is entered, there is no form
1530 * If SerDes loopback mode is entered, there is no form
1531 * of reset to take the adapter out of that mode. So we 1502 * of reset to take the adapter out of that mode. So we
1532 * have to explicitly take the adapter out of loopback 1503 * have to explicitly take the adapter out of loopback
1533 * mode. This prevents drivers from twiddling their thumbs 1504 * mode. This prevents drivers from twiddling their thumbs
@@ -1584,8 +1555,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1584 switch (mac->serdes_link_state) { 1555 switch (mac->serdes_link_state) {
1585 case e1000_serdes_link_autoneg_complete: 1556 case e1000_serdes_link_autoneg_complete:
1586 if (!(status & E1000_STATUS_LU)) { 1557 if (!(status & E1000_STATUS_LU)) {
1587 /* 1558 /* We have lost link, retry autoneg before
1588 * We have lost link, retry autoneg before
1589 * reporting link failure 1559 * reporting link failure
1590 */ 1560 */
1591 mac->serdes_link_state = 1561 mac->serdes_link_state =
@@ -1598,8 +1568,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1598 break; 1568 break;
1599 1569
1600 case e1000_serdes_link_forced_up: 1570 case e1000_serdes_link_forced_up:
1601 /* 1571 /* If we are receiving /C/ ordered sets, re-enable
1602 * If we are receiving /C/ ordered sets, re-enable
1603 * auto-negotiation in the TXCW register and disable 1572 * auto-negotiation in the TXCW register and disable
1604 * forced link in the Device Control register in an 1573 * forced link in the Device Control register in an
1605 * attempt to auto-negotiate with our link partner. 1574 * attempt to auto-negotiate with our link partner.
@@ -1619,8 +1588,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1619 1588
1620 case e1000_serdes_link_autoneg_progress: 1589 case e1000_serdes_link_autoneg_progress:
1621 if (rxcw & E1000_RXCW_C) { 1590 if (rxcw & E1000_RXCW_C) {
1622 /* 1591 /* We received /C/ ordered sets, meaning the
1623 * We received /C/ ordered sets, meaning the
1624 * link partner has autonegotiated, and we can 1592 * link partner has autonegotiated, and we can
1625 * trust the Link Up (LU) status bit. 1593 * trust the Link Up (LU) status bit.
1626 */ 1594 */
@@ -1636,8 +1604,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1636 e_dbg("AN_PROG -> DOWN\n"); 1604 e_dbg("AN_PROG -> DOWN\n");
1637 } 1605 }
1638 } else { 1606 } else {
1639 /* 1607 /* The link partner did not autoneg.
1640 * The link partner did not autoneg.
1641 * Force link up and full duplex, and change 1608 * Force link up and full duplex, and change
1642 * state to forced. 1609 * state to forced.
1643 */ 1610 */
@@ -1660,8 +1627,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1660 1627
1661 case e1000_serdes_link_down: 1628 case e1000_serdes_link_down:
1662 default: 1629 default:
1663 /* 1630 /* The link was down but the receiver has now gained
1664 * The link was down but the receiver has now gained
1665 * valid sync, so lets see if we can bring the link 1631 * valid sync, so lets see if we can bring the link
1666 * up. 1632 * up.
1667 */ 1633 */
@@ -1679,8 +1645,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1679 mac->serdes_link_state = e1000_serdes_link_down; 1645 mac->serdes_link_state = e1000_serdes_link_down;
1680 e_dbg("ANYSTATE -> DOWN\n"); 1646 e_dbg("ANYSTATE -> DOWN\n");
1681 } else { 1647 } else {
1682 /* 1648 /* Check several times, if SYNCH bit and CONFIG
1683 * Check several times, if SYNCH bit and CONFIG
1684 * bit both are consistently 1 then simply ignore 1649 * bit both are consistently 1 then simply ignore
1685 * the IV bit and restart Autoneg 1650 * the IV bit and restart Autoneg
1686 */ 1651 */
@@ -1780,8 +1745,7 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1780 1745
1781 /* If workaround is activated... */ 1746 /* If workaround is activated... */
1782 if (state) 1747 if (state)
1783 /* 1748 /* Hold a copy of the LAA in RAR[14] This is done so that
1784 * Hold a copy of the LAA in RAR[14] This is done so that
1785 * between the time RAR[0] gets clobbered and the time it 1749 * between the time RAR[0] gets clobbered and the time it
1786 * gets fixed, the actual LAA is in one of the RARs and no 1750 * gets fixed, the actual LAA is in one of the RARs and no
1787 * incoming packets directed to this port are dropped. 1751 * incoming packets directed to this port are dropped.
@@ -1810,8 +1774,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1810 if (nvm->type != e1000_nvm_flash_hw) 1774 if (nvm->type != e1000_nvm_flash_hw)
1811 return 0; 1775 return 0;
1812 1776
1813 /* 1777 /* Check bit 4 of word 10h. If it is 0, firmware is done updating
1814 * Check bit 4 of word 10h. If it is 0, firmware is done updating
1815 * 10h-12h. Checksum may need to be fixed. 1778 * 10h-12h. Checksum may need to be fixed.
1816 */ 1779 */
1817 ret_val = e1000_read_nvm(hw, 0x10, 1, &data); 1780 ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
@@ -1819,8 +1782,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1819 return ret_val; 1782 return ret_val;
1820 1783
1821 if (!(data & 0x10)) { 1784 if (!(data & 0x10)) {
1822 /* 1785 /* Read 0x23 and check bit 15. This bit is a 1
1823 * Read 0x23 and check bit 15. This bit is a 1
1824 * when the checksum has already been fixed. If 1786 * when the checksum has already been fixed. If
1825 * the checksum is still wrong and this bit is a 1787 * the checksum is still wrong and this bit is a
1826 * 1, we need to return bad checksum. Otherwise, 1788 * 1, we need to return bad checksum. Otherwise,
@@ -1852,8 +1814,7 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
1852 if (hw->mac.type == e1000_82571) { 1814 if (hw->mac.type == e1000_82571) {
1853 s32 ret_val = 0; 1815 s32 ret_val = 0;
1854 1816
1855 /* 1817 /* If there's an alternate MAC address place it in RAR0
1856 * If there's an alternate MAC address place it in RAR0
1857 * so that it will override the Si installed default perm 1818 * so that it will override the Si installed default perm
1858 * address. 1819 * address.
1859 */ 1820 */
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 76edbc1be33b..02a12b69555f 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -185,8 +185,7 @@
185#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 185#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
186#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 186#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
187 187
188/* 188/* Use byte values for the following shift parameters
189 * Use byte values for the following shift parameters
190 * Usage: 189 * Usage:
191 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & 190 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
192 * E1000_PSRCTL_BSIZE0_MASK) | 191 * E1000_PSRCTL_BSIZE0_MASK) |
@@ -242,8 +241,7 @@
242#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ 241#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
243#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ 242#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
244 243
245/* 244/* Bit definitions for the Management Data IO (MDIO) and Management Data
246 * Bit definitions for the Management Data IO (MDIO) and Management Data
247 * Clock (MDC) pins in the Device Control Register. 245 * Clock (MDC) pins in the Device Control Register.
248 */ 246 */
249 247
@@ -424,8 +422,7 @@
424#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ 422#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
425#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ 423#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
426 424
427/* 425/* This defines the bits that are set in the Interrupt Mask
428 * This defines the bits that are set in the Interrupt Mask
429 * Set/Read Register. Each bit is documented below: 426 * Set/Read Register. Each bit is documented below:
430 * o RXT0 = Receiver Timer Interrupt (ring 0) 427 * o RXT0 = Receiver Timer Interrupt (ring 0)
431 * o TXDW = Transmit Descriptor Written Back 428 * o TXDW = Transmit Descriptor Written Back
@@ -475,8 +472,7 @@
475/* 802.1q VLAN Packet Size */ 472/* 802.1q VLAN Packet Size */
476#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 473#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
477 474
478/* Receive Address */ 475/* Receive Address
479/*
480 * Number of high/low register pairs in the RAR. The RAR (Receive Address 476 * Number of high/low register pairs in the RAR. The RAR (Receive Address
481 * Registers) holds the directed and multicast addresses that we monitor. 477 * Registers) holds the directed and multicast addresses that we monitor.
482 * Technically, we have 16 spots. However, we reserve one of these spots 478 * Technically, we have 16 spots. However, we reserve one of these spots
@@ -723,8 +719,7 @@
723#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ 719#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
724#define MAX_PHY_MULTI_PAGE_REG 0xF 720#define MAX_PHY_MULTI_PAGE_REG 0xF
725 721
726/* Bit definitions for valid PHY IDs. */ 722/* Bit definitions for valid PHY IDs.
727/*
728 * I = Integrated 723 * I = Integrated
729 * E = External 724 * E = External
730 */ 725 */
@@ -762,8 +757,7 @@
762#define M88E1000_PSCR_AUTO_X_1000T 0x0040 757#define M88E1000_PSCR_AUTO_X_1000T 0x0040
763/* Auto crossover enabled all speeds */ 758/* Auto crossover enabled all speeds */
764#define M88E1000_PSCR_AUTO_X_MODE 0x0060 759#define M88E1000_PSCR_AUTO_X_MODE 0x0060
765/* 760/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
766 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
767 * 0=Normal 10BASE-T Rx Threshold 761 * 0=Normal 10BASE-T Rx Threshold
768 */ 762 */
769#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ 763#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
@@ -779,14 +773,12 @@
779 773
780#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 774#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
781 775
782/* 776/* Number of times we will attempt to autonegotiate before downshifting if we
783 * Number of times we will attempt to autonegotiate before downshifting if we
784 * are the master 777 * are the master
785 */ 778 */
786#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 779#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
787#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 780#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
788/* 781/* Number of times we will attempt to autonegotiate before downshifting if we
789 * Number of times we will attempt to autonegotiate before downshifting if we
790 * are the slave 782 * are the slave
791 */ 783 */
792#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 784#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
@@ -808,8 +800,7 @@
808#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ 800#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
809 ((reg) & MAX_PHY_REG_ADDRESS)) 801 ((reg) & MAX_PHY_REG_ADDRESS))
810 802
811/* 803/* Bits...
812 * Bits...
813 * 15-5: page 804 * 15-5: page
814 * 4-0: register offset 805 * 4-0: register offset
815 */ 806 */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 04668b47a1df..6782a2eea1bc 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -161,8 +161,7 @@ struct e1000_info;
161/* Time to wait before putting the device into D3 if there's no link (in ms). */ 161/* Time to wait before putting the device into D3 if there's no link (in ms). */
162#define LINK_TIMEOUT 100 162#define LINK_TIMEOUT 100
163 163
164/* 164/* Count for polling __E1000_RESET condition every 10-20msec.
165 * Count for polling __E1000_RESET condition every 10-20msec.
166 * Experimentation has shown the reset can take approximately 210msec. 165 * Experimentation has shown the reset can take approximately 210msec.
167 */ 166 */
168#define E1000_CHECK_RESET_COUNT 25 167#define E1000_CHECK_RESET_COUNT 25
@@ -172,8 +171,7 @@ struct e1000_info;
172#define BURST_RDTR 0x20 171#define BURST_RDTR 0x20
173#define BURST_RADV 0x20 172#define BURST_RADV 0x20
174 173
175/* 174/* in the case of WTHRESH, it appears at least the 82571/2 hardware
176 * in the case of WTHRESH, it appears at least the 82571/2 hardware
177 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when 175 * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
178 * WTHRESH=4, so a setting of 5 gives the most efficient bus 176 * WTHRESH=4, so a setting of 5 gives the most efficient bus
179 * utilization but to avoid possible Tx stalls, set it to 1 177 * utilization but to avoid possible Tx stalls, set it to 1
@@ -214,8 +212,7 @@ struct e1000_ps_page {
214 u64 dma; /* must be u64 - written to hw */ 212 u64 dma; /* must be u64 - written to hw */
215}; 213};
216 214
217/* 215/* wrappers around a pointer to a socket buffer,
218 * wrappers around a pointer to a socket buffer,
219 * so a DMA handle can be stored along with the buffer 216 * so a DMA handle can be stored along with the buffer
220 */ 217 */
221struct e1000_buffer { 218struct e1000_buffer {
@@ -305,9 +302,7 @@ struct e1000_adapter {
305 u16 tx_itr; 302 u16 tx_itr;
306 u16 rx_itr; 303 u16 rx_itr;
307 304
308 /* 305 /* Tx */
309 * Tx
310 */
311 struct e1000_ring *tx_ring /* One per active queue */ 306 struct e1000_ring *tx_ring /* One per active queue */
312 ____cacheline_aligned_in_smp; 307 ____cacheline_aligned_in_smp;
313 u32 tx_fifo_limit; 308 u32 tx_fifo_limit;
@@ -340,9 +335,7 @@ struct e1000_adapter {
340 u32 tx_fifo_size; 335 u32 tx_fifo_size;
341 u32 tx_dma_failed; 336 u32 tx_dma_failed;
342 337
343 /* 338 /* Rx */
344 * Rx
345 */
346 bool (*clean_rx) (struct e1000_ring *ring, int *work_done, 339 bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
347 int work_to_do) ____cacheline_aligned_in_smp; 340 int work_to_do) ____cacheline_aligned_in_smp;
348 void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count, 341 void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index c11ac2756667..f95bc6ee1c22 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -214,7 +214,8 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
214 mac->autoneg = 0; 214 mac->autoneg = 0;
215 215
216 /* Make sure dplx is at most 1 bit and lsb of speed is not set 216 /* Make sure dplx is at most 1 bit and lsb of speed is not set
217 * for the switch() below to work */ 217 * for the switch() below to work
218 */
218 if ((spd & 1) || (dplx & ~1)) 219 if ((spd & 1) || (dplx & ~1))
219 goto err_inval; 220 goto err_inval;
220 221
@@ -263,8 +264,7 @@ static int e1000_set_settings(struct net_device *netdev,
263 struct e1000_adapter *adapter = netdev_priv(netdev); 264 struct e1000_adapter *adapter = netdev_priv(netdev);
264 struct e1000_hw *hw = &adapter->hw; 265 struct e1000_hw *hw = &adapter->hw;
265 266
266 /* 267 /* When SoL/IDER sessions are active, autoneg/speed/duplex
267 * When SoL/IDER sessions are active, autoneg/speed/duplex
268 * cannot be changed 268 * cannot be changed
269 */ 269 */
270 if (hw->phy.ops.check_reset_block && 270 if (hw->phy.ops.check_reset_block &&
@@ -273,8 +273,7 @@ static int e1000_set_settings(struct net_device *netdev,
273 return -EINVAL; 273 return -EINVAL;
274 } 274 }
275 275
276 /* 276 /* MDI setting is only allowed when autoneg enabled because
277 * MDI setting is only allowed when autoneg enabled because
278 * some hardware doesn't allow MDI setting when speed or 277 * some hardware doesn't allow MDI setting when speed or
279 * duplex is forced. 278 * duplex is forced.
280 */ 279 */
@@ -316,8 +315,7 @@ static int e1000_set_settings(struct net_device *netdev,
316 315
317 /* MDI-X => 2; MDI => 1; Auto => 3 */ 316 /* MDI-X => 2; MDI => 1; Auto => 3 */
318 if (ecmd->eth_tp_mdix_ctrl) { 317 if (ecmd->eth_tp_mdix_ctrl) {
319 /* 318 /* fix up the value for auto (3 => 0) as zero is mapped
320 * fix up the value for auto (3 => 0) as zero is mapped
321 * internally to auto 319 * internally to auto
322 */ 320 */
323 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) 321 if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
@@ -454,8 +452,8 @@ static void e1000_get_regs(struct net_device *netdev,
454 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ 452 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
455 453
456 /* ethtool doesn't use anything past this point, so all this 454 /* ethtool doesn't use anything past this point, so all this
457 * code is likely legacy junk for apps that may or may not 455 * code is likely legacy junk for apps that may or may not exist
458 * exist */ 456 */
459 if (hw->phy.type == e1000_phy_m88) { 457 if (hw->phy.type == e1000_phy_m88) {
460 e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 458 e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
461 regs_buff[13] = (u32)phy_data; /* cable length */ 459 regs_buff[13] = (u32)phy_data; /* cable length */
@@ -598,8 +596,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
598 if (ret_val) 596 if (ret_val)
599 goto out; 597 goto out;
600 598
601 /* 599 /* Update the checksum over the first part of the EEPROM if needed
602 * Update the checksum over the first part of the EEPROM if needed
603 * and flush shadow RAM for applicable controllers 600 * and flush shadow RAM for applicable controllers
604 */ 601 */
605 if ((first_word <= NVM_CHECKSUM_REG) || 602 if ((first_word <= NVM_CHECKSUM_REG) ||
@@ -623,8 +620,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
623 strlcpy(drvinfo->version, e1000e_driver_version, 620 strlcpy(drvinfo->version, e1000e_driver_version,
624 sizeof(drvinfo->version)); 621 sizeof(drvinfo->version));
625 622
626 /* 623 /* EEPROM image version # is reported as firmware version # for
627 * EEPROM image version # is reported as firmware version # for
628 * PCI-E controllers 624 * PCI-E controllers
629 */ 625 */
630 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 626 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -708,8 +704,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
708 704
709 e1000e_down(adapter); 705 e1000e_down(adapter);
710 706
711 /* 707 /* We can't just free everything and then setup again, because the
712 * We can't just free everything and then setup again, because the
713 * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring 708 * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
714 * structs. First, attempt to allocate new resources... 709 * structs. First, attempt to allocate new resources...
715 */ 710 */
@@ -813,8 +808,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
813 u32 mask; 808 u32 mask;
814 u32 wlock_mac = 0; 809 u32 wlock_mac = 0;
815 810
816 /* 811 /* The status register is Read Only, so a write should fail.
817 * The status register is Read Only, so a write should fail.
818 * Some bits that get toggled are ignored. 812 * Some bits that get toggled are ignored.
819 */ 813 */
820 switch (mac->type) { 814 switch (mac->type) {
@@ -996,8 +990,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
996 } 990 }
997 991
998 if (!shared_int) { 992 if (!shared_int) {
999 /* 993 /* Disable the interrupt to be reported in
1000 * Disable the interrupt to be reported in
1001 * the cause register and then force the same 994 * the cause register and then force the same
1002 * interrupt and see if one gets posted. If 995 * interrupt and see if one gets posted. If
1003 * an interrupt was posted to the bus, the 996 * an interrupt was posted to the bus, the
@@ -1015,8 +1008,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
1015 } 1008 }
1016 } 1009 }
1017 1010
1018 /* 1011 /* Enable the interrupt to be reported in
1019 * Enable the interrupt to be reported in
1020 * the cause register and then force the same 1012 * the cause register and then force the same
1021 * interrupt and see if one gets posted. If 1013 * interrupt and see if one gets posted. If
1022 * an interrupt was not posted to the bus, the 1014 * an interrupt was not posted to the bus, the
@@ -1034,8 +1026,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
1034 } 1026 }
1035 1027
1036 if (!shared_int) { 1028 if (!shared_int) {
1037 /* 1029 /* Disable the other interrupts to be reported in
1038 * Disable the other interrupts to be reported in
1039 * the cause register and then force the other 1030 * the cause register and then force the other
1040 * interrupts and see if any get posted. If 1031 * interrupts and see if any get posted. If
1041 * an interrupt was posted to the bus, the 1032 * an interrupt was posted to the bus, the
@@ -1378,8 +1369,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1378 hw->phy.type == e1000_phy_m88) { 1369 hw->phy.type == e1000_phy_m88) {
1379 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1370 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1380 } else { 1371 } else {
1381 /* 1372 /* Set the ILOS bit on the fiber Nic if half duplex link is
1382 * Set the ILOS bit on the fiber Nic if half duplex link is
1383 * detected. 1373 * detected.
1384 */ 1374 */
1385 if ((er32(STATUS) & E1000_STATUS_FD) == 0) 1375 if ((er32(STATUS) & E1000_STATUS_FD) == 0)
@@ -1388,8 +1378,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1388 1378
1389 ew32(CTRL, ctrl_reg); 1379 ew32(CTRL, ctrl_reg);
1390 1380
1391 /* 1381 /* Disable the receiver on the PHY so when a cable is plugged in, the
1392 * Disable the receiver on the PHY so when a cable is plugged in, the
1393 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1382 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1394 */ 1383 */
1395 if (hw->phy.type == e1000_phy_m88) 1384 if (hw->phy.type == e1000_phy_m88)
@@ -1408,8 +1397,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1408 1397
1409 /* special requirements for 82571/82572 fiber adapters */ 1398 /* special requirements for 82571/82572 fiber adapters */
1410 1399
1411 /* 1400 /* jump through hoops to make sure link is up because serdes
1412 * jump through hoops to make sure link is up because serdes
1413 * link is hardwired up 1401 * link is hardwired up
1414 */ 1402 */
1415 ctrl |= E1000_CTRL_SLU; 1403 ctrl |= E1000_CTRL_SLU;
@@ -1429,8 +1417,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1429 ew32(CTRL, ctrl); 1417 ew32(CTRL, ctrl);
1430 } 1418 }
1431 1419
1432 /* 1420 /* special write to serdes control register to enable SerDes analog
1433 * special write to serdes control register to enable SerDes analog
1434 * loopback 1421 * loopback
1435 */ 1422 */
1436#define E1000_SERDES_LB_ON 0x410 1423#define E1000_SERDES_LB_ON 0x410
@@ -1448,8 +1435,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1448 u32 ctrlext = er32(CTRL_EXT); 1435 u32 ctrlext = er32(CTRL_EXT);
1449 u32 ctrl = er32(CTRL); 1436 u32 ctrl = er32(CTRL);
1450 1437
1451 /* 1438 /* save CTRL_EXT to restore later, reuse an empty variable (unused
1452 * save CTRL_EXT to restore later, reuse an empty variable (unused
1453 * on mac_type 80003es2lan) 1439 * on mac_type 80003es2lan)
1454 */ 1440 */
1455 adapter->tx_fifo_head = ctrlext; 1441 adapter->tx_fifo_head = ctrlext;
@@ -1585,8 +1571,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1585 1571
1586 ew32(RDT(0), rx_ring->count - 1); 1572 ew32(RDT(0), rx_ring->count - 1);
1587 1573
1588 /* 1574 /* Calculate the loop count based on the largest descriptor ring
1589 * Calculate the loop count based on the largest descriptor ring
1590 * The idea is to wrap the largest ring a number of times using 64 1575 * The idea is to wrap the largest ring a number of times using 64
1591 * send/receive pairs during each loop 1576 * send/receive pairs during each loop
1592 */ 1577 */
@@ -1627,8 +1612,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1627 l++; 1612 l++;
1628 if (l == rx_ring->count) 1613 if (l == rx_ring->count)
1629 l = 0; 1614 l = 0;
1630 /* 1615 /* time + 20 msecs (200 msecs on 2.4) is more than
1631 * time + 20 msecs (200 msecs on 2.4) is more than
1632 * enough time to complete the receives, if it's 1616 * enough time to complete the receives, if it's
1633 * exceeded, break and error off 1617 * exceeded, break and error off
1634 */ 1618 */
@@ -1649,10 +1633,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1649{ 1633{
1650 struct e1000_hw *hw = &adapter->hw; 1634 struct e1000_hw *hw = &adapter->hw;
1651 1635
1652 /* 1636 /* PHY loopback cannot be performed if SoL/IDER sessions are active */
1653 * PHY loopback cannot be performed if SoL/IDER
1654 * sessions are active
1655 */
1656 if (hw->phy.ops.check_reset_block && 1637 if (hw->phy.ops.check_reset_block &&
1657 hw->phy.ops.check_reset_block(hw)) { 1638 hw->phy.ops.check_reset_block(hw)) {
1658 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); 1639 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
@@ -1686,8 +1667,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1686 int i = 0; 1667 int i = 0;
1687 hw->mac.serdes_has_link = false; 1668 hw->mac.serdes_has_link = false;
1688 1669
1689 /* 1670 /* On some blade server designs, link establishment
1690 * On some blade server designs, link establishment
1691 * could take as long as 2-3 minutes 1671 * could take as long as 2-3 minutes
1692 */ 1672 */
1693 do { 1673 do {
@@ -1701,8 +1681,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1701 } else { 1681 } else {
1702 hw->mac.ops.check_for_link(hw); 1682 hw->mac.ops.check_for_link(hw);
1703 if (hw->mac.autoneg) 1683 if (hw->mac.autoneg)
1704 /* 1684 /* On some Phy/switch combinations, link establishment
1705 * On some Phy/switch combinations, link establishment
1706 * can take a few seconds more than expected. 1685 * can take a few seconds more than expected.
1707 */ 1686 */
1708 msleep(5000); 1687 msleep(5000);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index d37bfd96c987..cf217777586c 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -85,8 +85,7 @@ enum e1e_registers {
85 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ 85 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
86 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ 86 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
87 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ 87 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
88/* 88/* Convenience macros
89 * Convenience macros
90 * 89 *
91 * Note: "_n" is the queue number of the register to be written to. 90 * Note: "_n" is the queue number of the register to be written to.
92 * 91 *
@@ -800,8 +799,7 @@ struct e1000_mac_operations {
800 s32 (*read_mac_addr)(struct e1000_hw *); 799 s32 (*read_mac_addr)(struct e1000_hw *);
801}; 800};
802 801
803/* 802/* When to use various PHY register access functions:
804 * When to use various PHY register access functions:
805 * 803 *
806 * Func Caller 804 * Func Caller
807 * Function Does Does When to use 805 * Function Does Does When to use
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e3a7b07df629..976336547607 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -26,8 +26,7 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29/* 29/* 82562G 10/100 Network Connection
30 * 82562G 10/100 Network Connection
31 * 82562G-2 10/100 Network Connection 30 * 82562G-2 10/100 Network Connection
32 * 82562GT 10/100 Network Connection 31 * 82562GT 10/100 Network Connection
33 * 82562GT-2 10/100 Network Connection 32 * 82562GT-2 10/100 Network Connection
@@ -354,8 +353,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
354 return true; 353 return true;
355 } 354 }
356 355
357 /* 356 /* In case the PHY needs to be in mdio slow mode,
358 * In case the PHY needs to be in mdio slow mode,
359 * set slow mode and try to get the PHY id again. 357 * set slow mode and try to get the PHY id again.
360 */ 358 */
361 hw->phy.ops.release(hw); 359 hw->phy.ops.release(hw);
@@ -386,8 +384,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
386 return ret_val; 384 return ret_val;
387 } 385 }
388 386
389 /* 387 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
390 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
391 * inaccessible and resetting the PHY is not blocked, toggle the 388 * inaccessible and resetting the PHY is not blocked, toggle the
392 * LANPHYPC Value bit to force the interconnect to PCIe mode. 389 * LANPHYPC Value bit to force the interconnect to PCIe mode.
393 */ 390 */
@@ -396,8 +393,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
396 if (e1000_phy_is_accessible_pchlan(hw)) 393 if (e1000_phy_is_accessible_pchlan(hw))
397 break; 394 break;
398 395
399 /* 396 /* Before toggling LANPHYPC, see if PHY is accessible by
400 * Before toggling LANPHYPC, see if PHY is accessible by
401 * forcing MAC to SMBus mode first. 397 * forcing MAC to SMBus mode first.
402 */ 398 */
403 mac_reg = er32(CTRL_EXT); 399 mac_reg = er32(CTRL_EXT);
@@ -406,8 +402,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
406 402
407 /* fall-through */ 403 /* fall-through */
408 case e1000_pch2lan: 404 case e1000_pch2lan:
409 /* 405 /* Gate automatic PHY configuration by hardware on
410 * Gate automatic PHY configuration by hardware on
411 * non-managed 82579 406 * non-managed 82579
412 */ 407 */
413 if ((hw->mac.type == e1000_pch2lan) && 408 if ((hw->mac.type == e1000_pch2lan) &&
@@ -474,8 +469,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
474 469
475 hw->phy.ops.release(hw); 470 hw->phy.ops.release(hw);
476 471
477 /* 472 /* Reset the PHY before any access to it. Doing so, ensures
478 * Reset the PHY before any access to it. Doing so, ensures
479 * that the PHY is in a known good state before we read/write 473 * that the PHY is in a known good state before we read/write
480 * PHY registers. The generic reset is sufficient here, 474 * PHY registers. The generic reset is sufficient here,
481 * because we haven't determined the PHY type yet. 475 * because we haven't determined the PHY type yet.
@@ -536,8 +530,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
536 /* fall-through */ 530 /* fall-through */
537 case e1000_pch2lan: 531 case e1000_pch2lan:
538 case e1000_pch_lpt: 532 case e1000_pch_lpt:
539 /* 533 /* In case the PHY needs to be in mdio slow mode,
540 * In case the PHY needs to be in mdio slow mode,
541 * set slow mode and try to get the PHY id again. 534 * set slow mode and try to get the PHY id again.
542 */ 535 */
543 ret_val = e1000_set_mdio_slow_mode_hv(hw); 536 ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -593,8 +586,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
593 phy->ops.power_up = e1000_power_up_phy_copper; 586 phy->ops.power_up = e1000_power_up_phy_copper;
594 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 587 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
595 588
596 /* 589 /* We may need to do this twice - once for IGP and if that fails,
597 * We may need to do this twice - once for IGP and if that fails,
598 * we'll set BM func pointers and try again 590 * we'll set BM func pointers and try again
599 */ 591 */
600 ret_val = e1000e_determine_phy_address(hw); 592 ret_val = e1000e_determine_phy_address(hw);
@@ -679,8 +671,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
679 671
680 gfpreg = er32flash(ICH_FLASH_GFPREG); 672 gfpreg = er32flash(ICH_FLASH_GFPREG);
681 673
682 /* 674 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
683 * sector_X_addr is a "sector"-aligned address (4096 bytes)
684 * Add 1 to sector_end_addr since this sector is included in 675 * Add 1 to sector_end_addr since this sector is included in
685 * the overall size. 676 * the overall size.
686 */ 677 */
@@ -690,8 +681,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
690 /* flash_base_addr is byte-aligned */ 681 /* flash_base_addr is byte-aligned */
691 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 682 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
692 683
693 /* 684 /* find total size of the NVM, then cut in half since the total
694 * find total size of the NVM, then cut in half since the total
695 * size represents two separate NVM banks. 685 * size represents two separate NVM banks.
696 */ 686 */
697 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 687 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
@@ -788,8 +778,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
788 if (mac->type == e1000_ich8lan) 778 if (mac->type == e1000_ich8lan)
789 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 779 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
790 780
791 /* 781 /* Gate automatic PHY configuration by hardware on managed
792 * Gate automatic PHY configuration by hardware on managed
793 * 82579 and i217 782 * 82579 and i217
794 */ 783 */
795 if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) && 784 if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
@@ -840,8 +829,7 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
840 goto release; 829 goto release;
841 e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability); 830 e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
842 831
843 /* 832 /* EEE is not supported in 100Half, so ignore partner's EEE
844 * EEE is not supported in 100Half, so ignore partner's EEE
845 * in 100 ability if full-duplex is not advertised. 833 * in 100 ability if full-duplex is not advertised.
846 */ 834 */
847 e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg); 835 e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
@@ -869,8 +857,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
869 bool link; 857 bool link;
870 u16 phy_reg; 858 u16 phy_reg;
871 859
872 /* 860 /* We only want to go out to the PHY registers to see if Auto-Neg
873 * We only want to go out to the PHY registers to see if Auto-Neg
874 * has completed and/or if our link status has changed. The 861 * has completed and/or if our link status has changed. The
875 * get_link_status flag is set upon receiving a Link Status 862 * get_link_status flag is set upon receiving a Link Status
876 * Change or Rx Sequence Error interrupt. 863 * Change or Rx Sequence Error interrupt.
@@ -878,8 +865,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
878 if (!mac->get_link_status) 865 if (!mac->get_link_status)
879 return 0; 866 return 0;
880 867
881 /* 868 /* First we want to see if the MII Status Register reports
882 * First we want to see if the MII Status Register reports
883 * link. If so, then we want to get the current speed/duplex 869 * link. If so, then we want to get the current speed/duplex
884 * of the PHY. 870 * of the PHY.
885 */ 871 */
@@ -914,8 +900,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
914 return ret_val; 900 return ret_val;
915 } 901 }
916 902
917 /* 903 /* Workaround for PCHx parts in half-duplex:
918 * Workaround for PCHx parts in half-duplex:
919 * Set the number of preambles removed from the packet 904 * Set the number of preambles removed from the packet
920 * when it is passed from the PHY to the MAC to prevent 905 * when it is passed from the PHY to the MAC to prevent
921 * the MAC from misinterpreting the packet type. 906 * the MAC from misinterpreting the packet type.
@@ -932,8 +917,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
932 break; 917 break;
933 } 918 }
934 919
935 /* 920 /* Check if there was DownShift, must be checked
936 * Check if there was DownShift, must be checked
937 * immediately after link-up 921 * immediately after link-up
938 */ 922 */
939 e1000e_check_downshift(hw); 923 e1000e_check_downshift(hw);
@@ -943,22 +927,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
943 if (ret_val) 927 if (ret_val)
944 return ret_val; 928 return ret_val;
945 929
946 /* 930 /* If we are forcing speed/duplex, then we simply return since
947 * If we are forcing speed/duplex, then we simply return since
948 * we have already determined whether we have link or not. 931 * we have already determined whether we have link or not.
949 */ 932 */
950 if (!mac->autoneg) 933 if (!mac->autoneg)
951 return -E1000_ERR_CONFIG; 934 return -E1000_ERR_CONFIG;
952 935
953 /* 936 /* Auto-Neg is enabled. Auto Speed Detection takes care
954 * Auto-Neg is enabled. Auto Speed Detection takes care
955 * of MAC speed/duplex configuration. So we only need to 937 * of MAC speed/duplex configuration. So we only need to
956 * configure Collision Distance in the MAC. 938 * configure Collision Distance in the MAC.
957 */ 939 */
958 mac->ops.config_collision_dist(hw); 940 mac->ops.config_collision_dist(hw);
959 941
960 /* 942 /* Configure Flow Control now that Auto-Neg has completed.
961 * Configure Flow Control now that Auto-Neg has completed.
962 * First, we need to restore the desired flow control 943 * First, we need to restore the desired flow control
963 * settings because we may have had to re-autoneg with a 944 * settings because we may have had to re-autoneg with a
964 * different link partner. 945 * different link partner.
@@ -1000,8 +981,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1000 if (rc) 981 if (rc)
1001 return rc; 982 return rc;
1002 983
1003 /* 984 /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1004 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1005 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). 985 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
1006 */ 986 */
1007 if ((adapter->hw.phy.type == e1000_phy_ife) || 987 if ((adapter->hw.phy.type == e1000_phy_ife) ||
@@ -1191,8 +1171,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1191{ 1171{
1192 u32 rar_low, rar_high; 1172 u32 rar_low, rar_high;
1193 1173
1194 /* 1174 /* HW expects these in little endian so we reverse the byte order
1195 * HW expects these in little endian so we reverse the byte order
1196 * from network order (big endian) to little endian 1175 * from network order (big endian) to little endian
1197 */ 1176 */
1198 rar_low = ((u32)addr[0] | 1177 rar_low = ((u32)addr[0] |
@@ -1256,8 +1235,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1256 u32 rar_low, rar_high; 1235 u32 rar_low, rar_high;
1257 u32 wlock_mac; 1236 u32 wlock_mac;
1258 1237
1259 /* 1238 /* HW expects these in little endian so we reverse the byte order
1260 * HW expects these in little endian so we reverse the byte order
1261 * from network order (big endian) to little endian 1239 * from network order (big endian) to little endian
1262 */ 1240 */
1263 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | 1241 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
@@ -1277,8 +1255,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1277 return; 1255 return;
1278 } 1256 }
1279 1257
1280 /* 1258 /* The manageability engine (ME) can lock certain SHRAR registers that
1281 * The manageability engine (ME) can lock certain SHRAR registers that
1282 * it is using - those registers are unavailable for use. 1259 * it is using - those registers are unavailable for use.
1283 */ 1260 */
1284 if (index < hw->mac.rar_entry_count) { 1261 if (index < hw->mac.rar_entry_count) {
@@ -1387,8 +1364,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1387 s32 ret_val = 0; 1364 s32 ret_val = 0;
1388 u16 word_addr, reg_data, reg_addr, phy_page = 0; 1365 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1389 1366
1390 /* 1367 /* Initialize the PHY from the NVM on ICH platforms. This
1391 * Initialize the PHY from the NVM on ICH platforms. This
1392 * is needed due to an issue where the NVM configuration is 1368 * is needed due to an issue where the NVM configuration is
1393 * not properly autoloaded after power transitions. 1369 * not properly autoloaded after power transitions.
1394 * Therefore, after each PHY reset, we will load the 1370 * Therefore, after each PHY reset, we will load the
@@ -1422,8 +1398,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1422 if (!(data & sw_cfg_mask)) 1398 if (!(data & sw_cfg_mask))
1423 goto release; 1399 goto release;
1424 1400
1425 /* 1401 /* Make sure HW does not configure LCD from PHY
1426 * Make sure HW does not configure LCD from PHY
1427 * extended configuration before SW configuration 1402 * extended configuration before SW configuration
1428 */ 1403 */
1429 data = er32(EXTCNF_CTRL); 1404 data = er32(EXTCNF_CTRL);
@@ -1443,8 +1418,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1443 if (((hw->mac.type == e1000_pchlan) && 1418 if (((hw->mac.type == e1000_pchlan) &&
1444 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || 1419 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1445 (hw->mac.type > e1000_pchlan)) { 1420 (hw->mac.type > e1000_pchlan)) {
1446 /* 1421 /* HW configures the SMBus address and LEDs when the
1447 * HW configures the SMBus address and LEDs when the
1448 * OEM and LCD Write Enable bits are set in the NVM. 1422 * OEM and LCD Write Enable bits are set in the NVM.
1449 * When both NVM bits are cleared, SW will configure 1423 * When both NVM bits are cleared, SW will configure
1450 * them instead. 1424 * them instead.
@@ -1748,8 +1722,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1748 } 1722 }
1749 1723
1750 if (hw->phy.type == e1000_phy_82578) { 1724 if (hw->phy.type == e1000_phy_82578) {
1751 /* 1725 /* Return registers to default by doing a soft reset then
1752 * Return registers to default by doing a soft reset then
1753 * writing 0x3140 to the control register. 1726 * writing 0x3140 to the control register.
1754 */ 1727 */
1755 if (hw->phy.revision < 2) { 1728 if (hw->phy.revision < 2) {
@@ -1769,8 +1742,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1769 if (ret_val) 1742 if (ret_val)
1770 return ret_val; 1743 return ret_val;
1771 1744
1772 /* 1745 /* Configure the K1 Si workaround during phy reset assuming there is
1773 * Configure the K1 Si workaround during phy reset assuming there is
1774 * link so that it disables K1 if link is in 1Gbps. 1746 * link so that it disables K1 if link is in 1Gbps.
1775 */ 1747 */
1776 ret_val = e1000_k1_gig_workaround_hv(hw, true); 1748 ret_val = e1000_k1_gig_workaround_hv(hw, true);
@@ -1853,8 +1825,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1853 return ret_val; 1825 return ret_val;
1854 1826
1855 if (enable) { 1827 if (enable) {
1856 /* 1828 /* Write Rx addresses (rar_entry_count for RAL/H, +4 for
1857 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1858 * SHRAL/H) and initial CRC values to the MAC 1829 * SHRAL/H) and initial CRC values to the MAC
1859 */ 1830 */
1860 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 1831 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
@@ -2131,8 +2102,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2131 udelay(100); 2102 udelay(100);
2132 } while ((!data) && --loop); 2103 } while ((!data) && --loop);
2133 2104
2134 /* 2105 /* If basic configuration is incomplete before the above loop
2135 * If basic configuration is incomplete before the above loop
2136 * count reaches 0, loading the configuration from NVM will 2106 * count reaches 0, loading the configuration from NVM will
2137 * leave the PHY in a bad state possibly resulting in no link. 2107 * leave the PHY in a bad state possibly resulting in no link.
2138 */ 2108 */
@@ -2299,8 +2269,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2299 if (phy->type != e1000_phy_igp_3) 2269 if (phy->type != e1000_phy_igp_3)
2300 return 0; 2270 return 0;
2301 2271
2302 /* 2272 /* Call gig speed drop workaround on LPLU before accessing
2303 * Call gig speed drop workaround on LPLU before accessing
2304 * any PHY registers 2273 * any PHY registers
2305 */ 2274 */
2306 if (hw->mac.type == e1000_ich8lan) 2275 if (hw->mac.type == e1000_ich8lan)
@@ -2319,8 +2288,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2319 if (phy->type != e1000_phy_igp_3) 2288 if (phy->type != e1000_phy_igp_3)
2320 return 0; 2289 return 0;
2321 2290
2322 /* 2291 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2323 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2324 * during Dx states where the power conservation is most 2292 * during Dx states where the power conservation is most
2325 * important. During driver activity we should enable 2293 * important. During driver activity we should enable
2326 * SmartSpeed, so performance is maintained. 2294 * SmartSpeed, so performance is maintained.
@@ -2382,8 +2350,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2382 if (phy->type != e1000_phy_igp_3) 2350 if (phy->type != e1000_phy_igp_3)
2383 return 0; 2351 return 0;
2384 2352
2385 /* 2353 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
2386 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2387 * during Dx states where the power conservation is most 2354 * during Dx states where the power conservation is most
2388 * important. During driver activity we should enable 2355 * important. During driver activity we should enable
2389 * SmartSpeed, so performance is maintained. 2356 * SmartSpeed, so performance is maintained.
@@ -2420,8 +2387,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2420 if (phy->type != e1000_phy_igp_3) 2387 if (phy->type != e1000_phy_igp_3)
2421 return 0; 2388 return 0;
2422 2389
2423 /* 2390 /* Call gig speed drop workaround on LPLU before accessing
2424 * Call gig speed drop workaround on LPLU before accessing
2425 * any PHY registers 2391 * any PHY registers
2426 */ 2392 */
2427 if (hw->mac.type == e1000_ich8lan) 2393 if (hw->mac.type == e1000_ich8lan)
@@ -2589,8 +2555,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2589 2555
2590 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 2556 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2591 2557
2592 /* 2558 /* Either we should have a hardware SPI cycle in progress
2593 * Either we should have a hardware SPI cycle in progress
2594 * bit to check against, in order to start a new cycle or 2559 * bit to check against, in order to start a new cycle or
2595 * FDONE bit should be changed in the hardware so that it 2560 * FDONE bit should be changed in the hardware so that it
2596 * is 1 after hardware reset, which can then be used as an 2561 * is 1 after hardware reset, which can then be used as an
@@ -2599,8 +2564,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2599 */ 2564 */
2600 2565
2601 if (!hsfsts.hsf_status.flcinprog) { 2566 if (!hsfsts.hsf_status.flcinprog) {
2602 /* 2567 /* There is no cycle running at present,
2603 * There is no cycle running at present,
2604 * so we can start a cycle. 2568 * so we can start a cycle.
2605 * Begin by setting Flash Cycle Done. 2569 * Begin by setting Flash Cycle Done.
2606 */ 2570 */
@@ -2610,8 +2574,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2610 } else { 2574 } else {
2611 s32 i; 2575 s32 i;
2612 2576
2613 /* 2577 /* Otherwise poll for sometime so the current
2614 * Otherwise poll for sometime so the current
2615 * cycle has a chance to end before giving up. 2578 * cycle has a chance to end before giving up.
2616 */ 2579 */
2617 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 2580 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
@@ -2623,8 +2586,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2623 udelay(1); 2586 udelay(1);
2624 } 2587 }
2625 if (!ret_val) { 2588 if (!ret_val) {
2626 /* 2589 /* Successful in waiting for previous cycle to timeout,
2627 * Successful in waiting for previous cycle to timeout,
2628 * now set the Flash Cycle Done. 2590 * now set the Flash Cycle Done.
2629 */ 2591 */
2630 hsfsts.hsf_status.flcdone = 1; 2592 hsfsts.hsf_status.flcdone = 1;
@@ -2753,8 +2715,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2753 ret_val = e1000_flash_cycle_ich8lan(hw, 2715 ret_val = e1000_flash_cycle_ich8lan(hw,
2754 ICH_FLASH_READ_COMMAND_TIMEOUT); 2716 ICH_FLASH_READ_COMMAND_TIMEOUT);
2755 2717
2756 /* 2718 /* Check if FCERR is set to 1, if set to 1, clear it
2757 * Check if FCERR is set to 1, if set to 1, clear it
2758 * and try the whole sequence a few more times, else 2719 * and try the whole sequence a few more times, else
2759 * read in (shift in) the Flash Data0, the order is 2720 * read in (shift in) the Flash Data0, the order is
2760 * least significant byte first msb to lsb 2721 * least significant byte first msb to lsb
@@ -2767,8 +2728,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2767 *data = (u16)(flash_data & 0x0000FFFF); 2728 *data = (u16)(flash_data & 0x0000FFFF);
2768 break; 2729 break;
2769 } else { 2730 } else {
2770 /* 2731 /* If we've gotten here, then things are probably
2771 * If we've gotten here, then things are probably
2772 * completely hosed, but if the error condition is 2732 * completely hosed, but if the error condition is
2773 * detected, it won't hurt to give it another try... 2733 * detected, it won't hurt to give it another try...
2774 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 2734 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -2849,8 +2809,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2849 2809
2850 nvm->ops.acquire(hw); 2810 nvm->ops.acquire(hw);
2851 2811
2852 /* 2812 /* We're writing to the opposite bank so if we're on bank 1,
2853 * We're writing to the opposite bank so if we're on bank 1,
2854 * write to bank 0 etc. We also need to erase the segment that 2813 * write to bank 0 etc. We also need to erase the segment that
2855 * is going to be written 2814 * is going to be written
2856 */ 2815 */
@@ -2875,8 +2834,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2875 } 2834 }
2876 2835
2877 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 2836 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2878 /* 2837 /* Determine whether to write the value stored
2879 * Determine whether to write the value stored
2880 * in the other NVM bank or a modified value stored 2838 * in the other NVM bank or a modified value stored
2881 * in the shadow RAM 2839 * in the shadow RAM
2882 */ 2840 */
@@ -2890,8 +2848,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2890 break; 2848 break;
2891 } 2849 }
2892 2850
2893 /* 2851 /* If the word is 0x13, then make sure the signature bits
2894 * If the word is 0x13, then make sure the signature bits
2895 * (15:14) are 11b until the commit has completed. 2852 * (15:14) are 11b until the commit has completed.
2896 * This will allow us to write 10b which indicates the 2853 * This will allow us to write 10b which indicates the
2897 * signature is valid. We want to do this after the write 2854 * signature is valid. We want to do this after the write
@@ -2920,8 +2877,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2920 break; 2877 break;
2921 } 2878 }
2922 2879
2923 /* 2880 /* Don't bother writing the segment valid bits if sector
2924 * Don't bother writing the segment valid bits if sector
2925 * programming failed. 2881 * programming failed.
2926 */ 2882 */
2927 if (ret_val) { 2883 if (ret_val) {
@@ -2930,8 +2886,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2930 goto release; 2886 goto release;
2931 } 2887 }
2932 2888
2933 /* 2889 /* Finally validate the new segment by setting bit 15:14
2934 * Finally validate the new segment by setting bit 15:14
2935 * to 10b in word 0x13 , this can be done without an 2890 * to 10b in word 0x13 , this can be done without an
2936 * erase as well since these bits are 11 to start with 2891 * erase as well since these bits are 11 to start with
2937 * and we need to change bit 14 to 0b 2892 * and we need to change bit 14 to 0b
@@ -2948,8 +2903,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2948 if (ret_val) 2903 if (ret_val)
2949 goto release; 2904 goto release;
2950 2905
2951 /* 2906 /* And invalidate the previously valid segment by setting
2952 * And invalidate the previously valid segment by setting
2953 * its signature word (0x13) high_byte to 0b. This can be 2907 * its signature word (0x13) high_byte to 0b. This can be
2954 * done without an erase because flash erase sets all bits 2908 * done without an erase because flash erase sets all bits
2955 * to 1's. We can write 1's to 0's without an erase 2909 * to 1's. We can write 1's to 0's without an erase
@@ -2968,8 +2922,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2968release: 2922release:
2969 nvm->ops.release(hw); 2923 nvm->ops.release(hw);
2970 2924
2971 /* 2925 /* Reload the EEPROM, or else modifications will not appear
2972 * Reload the EEPROM, or else modifications will not appear
2973 * until after the next adapter reset. 2926 * until after the next adapter reset.
2974 */ 2927 */
2975 if (!ret_val) { 2928 if (!ret_val) {
@@ -2997,8 +2950,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2997 s32 ret_val; 2950 s32 ret_val;
2998 u16 data; 2951 u16 data;
2999 2952
3000 /* 2953 /* Read 0x19 and check bit 6. If this bit is 0, the checksum
3001 * Read 0x19 and check bit 6. If this bit is 0, the checksum
3002 * needs to be fixed. This bit is an indication that the NVM 2954 * needs to be fixed. This bit is an indication that the NVM
3003 * was prepared by OEM software and did not calculate the 2955 * was prepared by OEM software and did not calculate the
3004 * checksum...a likely scenario. 2956 * checksum...a likely scenario.
@@ -3048,8 +3000,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
3048 pr0.range.wpe = true; 3000 pr0.range.wpe = true;
3049 ew32flash(ICH_FLASH_PR0, pr0.regval); 3001 ew32flash(ICH_FLASH_PR0, pr0.regval);
3050 3002
3051 /* 3003 /* Lock down a subset of GbE Flash Control Registers, e.g.
3052 * Lock down a subset of GbE Flash Control Registers, e.g.
3053 * PR0 to prevent the write-protection from being lifted. 3004 * PR0 to prevent the write-protection from being lifted.
3054 * Once FLOCKDN is set, the registers protected by it cannot 3005 * Once FLOCKDN is set, the registers protected by it cannot
3055 * be written until FLOCKDN is cleared by a hardware reset. 3006 * be written until FLOCKDN is cleared by a hardware reset.
@@ -3109,8 +3060,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3109 3060
3110 ew32flash(ICH_FLASH_FDATA0, flash_data); 3061 ew32flash(ICH_FLASH_FDATA0, flash_data);
3111 3062
3112 /* 3063 /* check if FCERR is set to 1 , if set to 1, clear it
3113 * check if FCERR is set to 1 , if set to 1, clear it
3114 * and try the whole sequence a few more times else done 3064 * and try the whole sequence a few more times else done
3115 */ 3065 */
3116 ret_val = e1000_flash_cycle_ich8lan(hw, 3066 ret_val = e1000_flash_cycle_ich8lan(hw,
@@ -3118,8 +3068,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3118 if (!ret_val) 3068 if (!ret_val)
3119 break; 3069 break;
3120 3070
3121 /* 3071 /* If we're here, then things are most likely
3122 * If we're here, then things are most likely
3123 * completely hosed, but if the error condition 3072 * completely hosed, but if the error condition
3124 * is detected, it won't hurt to give it another 3073 * is detected, it won't hurt to give it another
3125 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 3074 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -3207,8 +3156,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3207 3156
3208 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3157 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3209 3158
3210 /* 3159 /* Determine HW Sector size: Read BERASE bits of hw flash status
3211 * Determine HW Sector size: Read BERASE bits of hw flash status
3212 * register 3160 * register
3213 * 00: The Hw sector is 256 bytes, hence we need to erase 16 3161 * 00: The Hw sector is 256 bytes, hence we need to erase 16
3214 * consecutive sectors. The start index for the nth Hw sector 3162 * consecutive sectors. The start index for the nth Hw sector
@@ -3253,16 +3201,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3253 if (ret_val) 3201 if (ret_val)
3254 return ret_val; 3202 return ret_val;
3255 3203
3256 /* 3204 /* Write a value 11 (block Erase) in Flash
3257 * Write a value 11 (block Erase) in Flash
3258 * Cycle field in hw flash control 3205 * Cycle field in hw flash control
3259 */ 3206 */
3260 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3207 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3261 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 3208 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
3262 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3209 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3263 3210
3264 /* 3211 /* Write the last 24 bits of an index within the
3265 * Write the last 24 bits of an index within the
3266 * block into Flash Linear address field in Flash 3212 * block into Flash Linear address field in Flash
3267 * Address. 3213 * Address.
3268 */ 3214 */
@@ -3274,8 +3220,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3274 if (!ret_val) 3220 if (!ret_val)
3275 break; 3221 break;
3276 3222
3277 /* 3223 /* Check if FCERR is set to 1. If 1,
3278 * Check if FCERR is set to 1. If 1,
3279 * clear it and try the whole sequence 3224 * clear it and try the whole sequence
3280 * a few more times else Done 3225 * a few more times else Done
3281 */ 3226 */
@@ -3403,8 +3348,7 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3403 3348
3404 ret_val = e1000e_get_bus_info_pcie(hw); 3349 ret_val = e1000e_get_bus_info_pcie(hw);
3405 3350
3406 /* 3351 /* ICH devices are "PCI Express"-ish. They have
3407 * ICH devices are "PCI Express"-ish. They have
3408 * a configuration space, but do not contain 3352 * a configuration space, but do not contain
3409 * PCI Express Capability registers, so bus width 3353 * PCI Express Capability registers, so bus width
3410 * must be hardcoded. 3354 * must be hardcoded.
@@ -3429,8 +3373,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3429 u32 ctrl, reg; 3373 u32 ctrl, reg;
3430 s32 ret_val; 3374 s32 ret_val;
3431 3375
3432 /* 3376 /* Prevent the PCI-E bus from sticking if there is no TLP connection
3433 * Prevent the PCI-E bus from sticking if there is no TLP connection
3434 * on the last TLP read/write transaction when MAC is reset. 3377 * on the last TLP read/write transaction when MAC is reset.
3435 */ 3378 */
3436 ret_val = e1000e_disable_pcie_master(hw); 3379 ret_val = e1000e_disable_pcie_master(hw);
@@ -3440,8 +3383,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3440 e_dbg("Masking off all interrupts\n"); 3383 e_dbg("Masking off all interrupts\n");
3441 ew32(IMC, 0xffffffff); 3384 ew32(IMC, 0xffffffff);
3442 3385
3443 /* 3386 /* Disable the Transmit and Receive units. Then delay to allow
3444 * Disable the Transmit and Receive units. Then delay to allow
3445 * any pending transactions to complete before we hit the MAC 3387 * any pending transactions to complete before we hit the MAC
3446 * with the global reset. 3388 * with the global reset.
3447 */ 3389 */
@@ -3474,15 +3416,13 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3474 ctrl = er32(CTRL); 3416 ctrl = er32(CTRL);
3475 3417
3476 if (!hw->phy.ops.check_reset_block(hw)) { 3418 if (!hw->phy.ops.check_reset_block(hw)) {
3477 /* 3419 /* Full-chip reset requires MAC and PHY reset at the same
3478 * Full-chip reset requires MAC and PHY reset at the same
3479 * time to make sure the interface between MAC and the 3420 * time to make sure the interface between MAC and the
3480 * external PHY is reset. 3421 * external PHY is reset.
3481 */ 3422 */
3482 ctrl |= E1000_CTRL_PHY_RST; 3423 ctrl |= E1000_CTRL_PHY_RST;
3483 3424
3484 /* 3425 /* Gate automatic PHY configuration by hardware on
3485 * Gate automatic PHY configuration by hardware on
3486 * non-managed 82579 3426 * non-managed 82579
3487 */ 3427 */
3488 if ((hw->mac.type == e1000_pch2lan) && 3428 if ((hw->mac.type == e1000_pch2lan) &&
@@ -3516,8 +3456,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3516 return ret_val; 3456 return ret_val;
3517 } 3457 }
3518 3458
3519 /* 3459 /* For PCH, this write will make sure that any noise
3520 * For PCH, this write will make sure that any noise
3521 * will be detected as a CRC error and be dropped rather than show up 3460 * will be detected as a CRC error and be dropped rather than show up
3522 * as a bad packet to the DMA engine. 3461 * as a bad packet to the DMA engine.
3523 */ 3462 */
@@ -3569,8 +3508,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3569 for (i = 0; i < mac->mta_reg_count; i++) 3508 for (i = 0; i < mac->mta_reg_count; i++)
3570 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 3509 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3571 3510
3572 /* 3511 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
3573 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3574 * the ME. Disable wakeup by clearing the host wakeup bit. 3512 * the ME. Disable wakeup by clearing the host wakeup bit.
3575 * Reset the phy after disabling host wakeup to reset the Rx buffer. 3513 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3576 */ 3514 */
@@ -3600,8 +3538,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3600 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 3538 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3601 ew32(TXDCTL(1), txdctl); 3539 ew32(TXDCTL(1), txdctl);
3602 3540
3603 /* 3541 /* ICH8 has opposite polarity of no_snoop bits.
3604 * ICH8 has opposite polarity of no_snoop bits.
3605 * By default, we should use snoop behavior. 3542 * By default, we should use snoop behavior.
3606 */ 3543 */
3607 if (mac->type == e1000_ich8lan) 3544 if (mac->type == e1000_ich8lan)
@@ -3614,8 +3551,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3614 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 3551 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3615 ew32(CTRL_EXT, ctrl_ext); 3552 ew32(CTRL_EXT, ctrl_ext);
3616 3553
3617 /* 3554 /* Clear all of the statistics registers (clear on read). It is
3618 * Clear all of the statistics registers (clear on read). It is
3619 * important that we do this after we have tried to establish link 3555 * important that we do this after we have tried to establish link
3620 * because the symbol error count will increment wildly if there 3556 * because the symbol error count will increment wildly if there
3621 * is no link. 3557 * is no link.
@@ -3676,15 +3612,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3676 ew32(STATUS, reg); 3612 ew32(STATUS, reg);
3677 } 3613 }
3678 3614
3679 /* 3615 /* work-around descriptor data corruption issue during nfs v2 udp
3680 * work-around descriptor data corruption issue during nfs v2 udp
3681 * traffic, just disable the nfs filtering capability 3616 * traffic, just disable the nfs filtering capability
3682 */ 3617 */
3683 reg = er32(RFCTL); 3618 reg = er32(RFCTL);
3684 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 3619 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3685 3620
3686 /* 3621 /* Disable IPv6 extension header parsing because some malformed
3687 * Disable IPv6 extension header parsing because some malformed
3688 * IPv6 headers can hang the Rx. 3622 * IPv6 headers can hang the Rx.
3689 */ 3623 */
3690 if (hw->mac.type == e1000_ich8lan) 3624 if (hw->mac.type == e1000_ich8lan)
@@ -3709,8 +3643,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3709 if (hw->phy.ops.check_reset_block(hw)) 3643 if (hw->phy.ops.check_reset_block(hw))
3710 return 0; 3644 return 0;
3711 3645
3712 /* 3646 /* ICH parts do not have a word in the NVM to determine
3713 * ICH parts do not have a word in the NVM to determine
3714 * the default flow control setting, so we explicitly 3647 * the default flow control setting, so we explicitly
3715 * set it to full. 3648 * set it to full.
3716 */ 3649 */
@@ -3722,8 +3655,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3722 hw->fc.requested_mode = e1000_fc_full; 3655 hw->fc.requested_mode = e1000_fc_full;
3723 } 3656 }
3724 3657
3725 /* 3658 /* Save off the requested flow control mode for use later. Depending
3726 * Save off the requested flow control mode for use later. Depending
3727 * on the link partner's capabilities, we may or may not use this mode. 3659 * on the link partner's capabilities, we may or may not use this mode.
3728 */ 3660 */
3729 hw->fc.current_mode = hw->fc.requested_mode; 3661 hw->fc.current_mode = hw->fc.requested_mode;
@@ -3771,8 +3703,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3771 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 3703 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3772 ew32(CTRL, ctrl); 3704 ew32(CTRL, ctrl);
3773 3705
3774 /* 3706 /* Set the mac to wait the maximum time between each iteration
3775 * Set the mac to wait the maximum time between each iteration
3776 * and increase the max iterations when polling the phy; 3707 * and increase the max iterations when polling the phy;
3777 * this fixes erroneous timeouts at 10Mbps. 3708 * this fixes erroneous timeouts at 10Mbps.
3778 */ 3709 */
@@ -3892,8 +3823,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3892 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 3823 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3893 return 0; 3824 return 0;
3894 3825
3895 /* 3826 /* Make sure link is up before proceeding. If not just return.
3896 * Make sure link is up before proceeding. If not just return.
3897 * Attempting this while link is negotiating fouled up link 3827 * Attempting this while link is negotiating fouled up link
3898 * stability 3828 * stability
3899 */ 3829 */
@@ -3925,8 +3855,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3925 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 3855 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3926 ew32(PHY_CTRL, phy_ctrl); 3856 ew32(PHY_CTRL, phy_ctrl);
3927 3857
3928 /* 3858 /* Call gig speed drop workaround on Gig disable before accessing
3929 * Call gig speed drop workaround on Gig disable before accessing
3930 * any PHY registers 3859 * any PHY registers
3931 */ 3860 */
3932 e1000e_gig_downshift_workaround_ich8lan(hw); 3861 e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -3983,8 +3912,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3983 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 3912 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3984 ew32(PHY_CTRL, reg); 3913 ew32(PHY_CTRL, reg);
3985 3914
3986 /* 3915 /* Call gig speed drop workaround on Gig disable before
3987 * Call gig speed drop workaround on Gig disable before
3988 * accessing any PHY registers 3916 * accessing any PHY registers
3989 */ 3917 */
3990 if (hw->mac.type == e1000_ich8lan) 3918 if (hw->mac.type == e1000_ich8lan)
@@ -4078,8 +4006,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4078 goto release; 4006 goto release;
4079 e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert); 4007 e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
4080 4008
4081 /* 4009 /* Disable LPLU if both link partners support 100BaseT
4082 * Disable LPLU if both link partners support 100BaseT
4083 * EEE and 100Full is advertised on both ends of the 4010 * EEE and 100Full is advertised on both ends of the
4084 * link. 4011 * link.
4085 */ 4012 */
@@ -4091,8 +4018,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4091 E1000_PHY_CTRL_NOND0A_LPLU); 4018 E1000_PHY_CTRL_NOND0A_LPLU);
4092 } 4019 }
4093 4020
4094 /* 4021 /* For i217 Intel Rapid Start Technology support,
4095 * For i217 Intel Rapid Start Technology support,
4096 * when the system is going into Sx and no manageability engine 4022 * when the system is going into Sx and no manageability engine
4097 * is present, the driver must configure proxy to reset only on 4023 * is present, the driver must configure proxy to reset only on
4098 * power good. LPI (Low Power Idle) state must also reset only 4024 * power good. LPI (Low Power Idle) state must also reset only
@@ -4106,8 +4032,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4106 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; 4032 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4107 e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); 4033 e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
4108 4034
4109 /* 4035 /* Set bit enable LPI (EEE) to reset only on
4110 * Set bit enable LPI (EEE) to reset only on
4111 * power good. 4036 * power good.
4112 */ 4037 */
4113 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); 4038 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
@@ -4120,8 +4045,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4120 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4045 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4121 } 4046 }
4122 4047
4123 /* 4048 /* Enable MTA to reset for Intel Rapid Start Technology
4124 * Enable MTA to reset for Intel Rapid Start Technology
4125 * Support 4049 * Support
4126 */ 4050 */
4127 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4051 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
@@ -4175,8 +4099,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4175 return; 4099 return;
4176 } 4100 }
4177 4101
4178 /* 4102 /* For i217 Intel Rapid Start Technology support when the system
4179 * For i217 Intel Rapid Start Technology support when the system
4180 * is transitioning from Sx and no manageability engine is present 4103 * is transitioning from Sx and no manageability engine is present
4181 * configure SMBus to restore on reset, disable proxy, and enable 4104 * configure SMBus to restore on reset, disable proxy, and enable
4182 * the reset on MTA (Multicast table array). 4105 * the reset on MTA (Multicast table array).
@@ -4191,8 +4114,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4191 } 4114 }
4192 4115
4193 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 4116 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
4194 /* 4117 /* Restore clear on SMB if no manageability engine
4195 * Restore clear on SMB if no manageability engine
4196 * is present 4118 * is present
4197 */ 4119 */
4198 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4120 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
@@ -4298,8 +4220,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
4298 u16 data = (u16)hw->mac.ledctl_mode2; 4220 u16 data = (u16)hw->mac.ledctl_mode2;
4299 u32 i, led; 4221 u32 i, led;
4300 4222
4301 /* 4223 /* If no link, then turn LED on by setting the invert bit
4302 * If no link, then turn LED on by setting the invert bit
4303 * for each LED that's mode is "link_up" in ledctl_mode2. 4224 * for each LED that's mode is "link_up" in ledctl_mode2.
4304 */ 4225 */
4305 if (!(er32(STATUS) & E1000_STATUS_LU)) { 4226 if (!(er32(STATUS) & E1000_STATUS_LU)) {
@@ -4329,8 +4250,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
4329 u16 data = (u16)hw->mac.ledctl_mode1; 4250 u16 data = (u16)hw->mac.ledctl_mode1;
4330 u32 i, led; 4251 u32 i, led;
4331 4252
4332 /* 4253 /* If no link, then turn LED off by clearing the invert bit
4333 * If no link, then turn LED off by clearing the invert bit
4334 * for each LED that's mode is "link_up" in ledctl_mode1. 4254 * for each LED that's mode is "link_up" in ledctl_mode1.
4335 */ 4255 */
4336 if (!(er32(STATUS) & E1000_STATUS_LU)) { 4256 if (!(er32(STATUS) & E1000_STATUS_LU)) {
@@ -4375,8 +4295,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
4375 } else { 4295 } else {
4376 ret_val = e1000e_get_auto_rd_done(hw); 4296 ret_val = e1000e_get_auto_rd_done(hw);
4377 if (ret_val) { 4297 if (ret_val) {
4378 /* 4298 /* When auto config read does not complete, do not
4379 * When auto config read does not complete, do not
4380 * return with an error. This can happen in situations 4299 * return with an error. This can happen in situations
4381 * where there is no eeprom and prevents getting link. 4300 * where there is no eeprom and prevents getting link.
4382 */ 4301 */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index a13439928488..54d9dafaf126 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -73,8 +73,7 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
73 struct e1000_bus_info *bus = &hw->bus; 73 struct e1000_bus_info *bus = &hw->bus;
74 u32 reg; 74 u32 reg;
75 75
76 /* 76 /* The status register reports the correct function number
77 * The status register reports the correct function number
78 * for the device regardless of function swap state. 77 * for the device regardless of function swap state.
79 */ 78 */
80 reg = er32(STATUS); 79 reg = er32(STATUS);
@@ -210,8 +209,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
210 return 0; 209 return 0;
211 } 210 }
212 211
213 /* 212 /* We have a valid alternate MAC address, and we want to treat it the
214 * We have a valid alternate MAC address, and we want to treat it the
215 * same as the normal permanent MAC address stored by the HW into the 213 * same as the normal permanent MAC address stored by the HW into the
216 * RAR. Do this by mapping this address into RAR0. 214 * RAR. Do this by mapping this address into RAR0.
217 */ 215 */
@@ -233,8 +231,7 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
233{ 231{
234 u32 rar_low, rar_high; 232 u32 rar_low, rar_high;
235 233
236 /* 234 /* HW expects these in little endian so we reverse the byte order
237 * HW expects these in little endian so we reverse the byte order
238 * from network order (big endian) to little endian 235 * from network order (big endian) to little endian
239 */ 236 */
240 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | 237 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
@@ -246,8 +243,7 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
246 if (rar_low || rar_high) 243 if (rar_low || rar_high)
247 rar_high |= E1000_RAH_AV; 244 rar_high |= E1000_RAH_AV;
248 245
249 /* 246 /* Some bridges will combine consecutive 32-bit writes into
250 * Some bridges will combine consecutive 32-bit writes into
251 * a single burst write, which will malfunction on some parts. 247 * a single burst write, which will malfunction on some parts.
252 * The flushes avoid this. 248 * The flushes avoid this.
253 */ 249 */
@@ -273,15 +269,13 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
273 /* Register count multiplied by bits per register */ 269 /* Register count multiplied by bits per register */
274 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 270 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
275 271
276 /* 272 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
277 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
278 * where 0xFF would still fall within the hash mask. 273 * where 0xFF would still fall within the hash mask.
279 */ 274 */
280 while (hash_mask >> bit_shift != 0xFF) 275 while (hash_mask >> bit_shift != 0xFF)
281 bit_shift++; 276 bit_shift++;
282 277
283 /* 278 /* The portion of the address that is used for the hash table
284 * The portion of the address that is used for the hash table
285 * is determined by the mc_filter_type setting. 279 * is determined by the mc_filter_type setting.
286 * The algorithm is such that there is a total of 8 bits of shifting. 280 * The algorithm is such that there is a total of 8 bits of shifting.
287 * The bit_shift for a mc_filter_type of 0 represents the number of 281 * The bit_shift for a mc_filter_type of 0 represents the number of
@@ -423,8 +417,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
423 s32 ret_val; 417 s32 ret_val;
424 bool link; 418 bool link;
425 419
426 /* 420 /* We only want to go out to the PHY registers to see if Auto-Neg
427 * We only want to go out to the PHY registers to see if Auto-Neg
428 * has completed and/or if our link status has changed. The 421 * has completed and/or if our link status has changed. The
429 * get_link_status flag is set upon receiving a Link Status 422 * get_link_status flag is set upon receiving a Link Status
430 * Change or Rx Sequence Error interrupt. 423 * Change or Rx Sequence Error interrupt.
@@ -432,8 +425,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
432 if (!mac->get_link_status) 425 if (!mac->get_link_status)
433 return 0; 426 return 0;
434 427
435 /* 428 /* First we want to see if the MII Status Register reports
436 * First we want to see if the MII Status Register reports
437 * link. If so, then we want to get the current speed/duplex 429 * link. If so, then we want to get the current speed/duplex
438 * of the PHY. 430 * of the PHY.
439 */ 431 */
@@ -446,28 +438,24 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
446 438
447 mac->get_link_status = false; 439 mac->get_link_status = false;
448 440
449 /* 441 /* Check if there was DownShift, must be checked
450 * Check if there was DownShift, must be checked
451 * immediately after link-up 442 * immediately after link-up
452 */ 443 */
453 e1000e_check_downshift(hw); 444 e1000e_check_downshift(hw);
454 445
455 /* 446 /* If we are forcing speed/duplex, then we simply return since
456 * If we are forcing speed/duplex, then we simply return since
457 * we have already determined whether we have link or not. 447 * we have already determined whether we have link or not.
458 */ 448 */
459 if (!mac->autoneg) 449 if (!mac->autoneg)
460 return -E1000_ERR_CONFIG; 450 return -E1000_ERR_CONFIG;
461 451
462 /* 452 /* Auto-Neg is enabled. Auto Speed Detection takes care
463 * Auto-Neg is enabled. Auto Speed Detection takes care
464 * of MAC speed/duplex configuration. So we only need to 453 * of MAC speed/duplex configuration. So we only need to
465 * configure Collision Distance in the MAC. 454 * configure Collision Distance in the MAC.
466 */ 455 */
467 mac->ops.config_collision_dist(hw); 456 mac->ops.config_collision_dist(hw);
468 457
469 /* 458 /* Configure Flow Control now that Auto-Neg has completed.
470 * Configure Flow Control now that Auto-Neg has completed.
471 * First, we need to restore the desired flow control 459 * First, we need to restore the desired flow control
472 * settings because we may have had to re-autoneg with a 460 * settings because we may have had to re-autoneg with a
473 * different link partner. 461 * different link partner.
@@ -498,8 +486,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
498 status = er32(STATUS); 486 status = er32(STATUS);
499 rxcw = er32(RXCW); 487 rxcw = er32(RXCW);
500 488
501 /* 489 /* If we don't have link (auto-negotiation failed or link partner
502 * If we don't have link (auto-negotiation failed or link partner
503 * cannot auto-negotiate), the cable is plugged in (we have signal), 490 * cannot auto-negotiate), the cable is plugged in (we have signal),
504 * and our link partner is not trying to auto-negotiate with us (we 491 * and our link partner is not trying to auto-negotiate with us (we
505 * are receiving idles or data), we need to force link up. We also 492 * are receiving idles or data), we need to force link up. We also
@@ -530,8 +517,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
530 return ret_val; 517 return ret_val;
531 } 518 }
532 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 519 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
533 /* 520 /* If we are forcing link and we are receiving /C/ ordered
534 * If we are forcing link and we are receiving /C/ ordered
535 * sets, re-enable auto-negotiation in the TXCW register 521 * sets, re-enable auto-negotiation in the TXCW register
536 * and disable forced link in the Device Control register 522 * and disable forced link in the Device Control register
537 * in an attempt to auto-negotiate with our link partner. 523 * in an attempt to auto-negotiate with our link partner.
@@ -565,8 +551,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
565 status = er32(STATUS); 551 status = er32(STATUS);
566 rxcw = er32(RXCW); 552 rxcw = er32(RXCW);
567 553
568 /* 554 /* If we don't have link (auto-negotiation failed or link partner
569 * If we don't have link (auto-negotiation failed or link partner
570 * cannot auto-negotiate), and our link partner is not trying to 555 * cannot auto-negotiate), and our link partner is not trying to
571 * auto-negotiate with us (we are receiving idles or data), 556 * auto-negotiate with us (we are receiving idles or data),
572 * we need to force link up. We also need to give auto-negotiation 557 * we need to force link up. We also need to give auto-negotiation
@@ -595,8 +580,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
595 return ret_val; 580 return ret_val;
596 } 581 }
597 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 582 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
598 /* 583 /* If we are forcing link and we are receiving /C/ ordered
599 * If we are forcing link and we are receiving /C/ ordered
600 * sets, re-enable auto-negotiation in the TXCW register 584 * sets, re-enable auto-negotiation in the TXCW register
601 * and disable forced link in the Device Control register 585 * and disable forced link in the Device Control register
602 * in an attempt to auto-negotiate with our link partner. 586 * in an attempt to auto-negotiate with our link partner.
@@ -607,8 +591,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
607 591
608 mac->serdes_has_link = true; 592 mac->serdes_has_link = true;
609 } else if (!(E1000_TXCW_ANE & er32(TXCW))) { 593 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
610 /* 594 /* If we force link for non-auto-negotiation switch, check
611 * If we force link for non-auto-negotiation switch, check
612 * link status based on MAC synchronization for internal 595 * link status based on MAC synchronization for internal
613 * serdes media type. 596 * serdes media type.
614 */ 597 */
@@ -665,8 +648,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
665 s32 ret_val; 648 s32 ret_val;
666 u16 nvm_data; 649 u16 nvm_data;
667 650
668 /* 651 /* Read and store word 0x0F of the EEPROM. This word contains bits
669 * Read and store word 0x0F of the EEPROM. This word contains bits
670 * that determine the hardware's default PAUSE (flow control) mode, 652 * that determine the hardware's default PAUSE (flow control) mode,
671 * a bit that determines whether the HW defaults to enabling or 653 * a bit that determines whether the HW defaults to enabling or
672 * disabling auto-negotiation, and the direction of the 654 * disabling auto-negotiation, and the direction of the
@@ -705,15 +687,13 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
705{ 687{
706 s32 ret_val; 688 s32 ret_val;
707 689
708 /* 690 /* In the case of the phy reset being blocked, we already have a link.
709 * In the case of the phy reset being blocked, we already have a link.
710 * We do not need to set it up again. 691 * We do not need to set it up again.
711 */ 692 */
712 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) 693 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
713 return 0; 694 return 0;
714 695
715 /* 696 /* If requested flow control is set to default, set flow control
716 * If requested flow control is set to default, set flow control
717 * based on the EEPROM flow control settings. 697 * based on the EEPROM flow control settings.
718 */ 698 */
719 if (hw->fc.requested_mode == e1000_fc_default) { 699 if (hw->fc.requested_mode == e1000_fc_default) {
@@ -722,8 +702,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
722 return ret_val; 702 return ret_val;
723 } 703 }
724 704
725 /* 705 /* Save off the requested flow control mode for use later. Depending
726 * Save off the requested flow control mode for use later. Depending
727 * on the link partner's capabilities, we may or may not use this mode. 706 * on the link partner's capabilities, we may or may not use this mode.
728 */ 707 */
729 hw->fc.current_mode = hw->fc.requested_mode; 708 hw->fc.current_mode = hw->fc.requested_mode;
@@ -735,8 +714,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
735 if (ret_val) 714 if (ret_val)
736 return ret_val; 715 return ret_val;
737 716
738 /* 717 /* Initialize the flow control address, type, and PAUSE timer
739 * Initialize the flow control address, type, and PAUSE timer
740 * registers to their default values. This is done even if flow 718 * registers to their default values. This is done even if flow
741 * control is disabled, because it does not hurt anything to 719 * control is disabled, because it does not hurt anything to
742 * initialize these registers. 720 * initialize these registers.
@@ -763,8 +741,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
763 struct e1000_mac_info *mac = &hw->mac; 741 struct e1000_mac_info *mac = &hw->mac;
764 u32 txcw; 742 u32 txcw;
765 743
766 /* 744 /* Check for a software override of the flow control settings, and
767 * Check for a software override of the flow control settings, and
768 * setup the device accordingly. If auto-negotiation is enabled, then 745 * setup the device accordingly. If auto-negotiation is enabled, then
769 * software will have to set the "PAUSE" bits to the correct value in 746 * software will have to set the "PAUSE" bits to the correct value in
770 * the Transmit Config Word Register (TXCW) and re-start auto- 747 * the Transmit Config Word Register (TXCW) and re-start auto-
@@ -786,8 +763,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
786 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 763 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
787 break; 764 break;
788 case e1000_fc_rx_pause: 765 case e1000_fc_rx_pause:
789 /* 766 /* Rx Flow control is enabled and Tx Flow control is disabled
790 * Rx Flow control is enabled and Tx Flow control is disabled
791 * by a software over-ride. Since there really isn't a way to 767 * by a software over-ride. Since there really isn't a way to
792 * advertise that we are capable of Rx Pause ONLY, we will 768 * advertise that we are capable of Rx Pause ONLY, we will
793 * advertise that we support both symmetric and asymmetric Rx 769 * advertise that we support both symmetric and asymmetric Rx
@@ -797,15 +773,13 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
797 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 773 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
798 break; 774 break;
799 case e1000_fc_tx_pause: 775 case e1000_fc_tx_pause:
800 /* 776 /* Tx Flow control is enabled, and Rx Flow control is disabled,
801 * Tx Flow control is enabled, and Rx Flow control is disabled,
802 * by a software over-ride. 777 * by a software over-ride.
803 */ 778 */
804 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); 779 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
805 break; 780 break;
806 case e1000_fc_full: 781 case e1000_fc_full:
807 /* 782 /* Flow control (both Rx and Tx) is enabled by a software
808 * Flow control (both Rx and Tx) is enabled by a software
809 * over-ride. 783 * over-ride.
810 */ 784 */
811 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 785 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
@@ -835,8 +809,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
835 u32 i, status; 809 u32 i, status;
836 s32 ret_val; 810 s32 ret_val;
837 811
838 /* 812 /* If we have a signal (the cable is plugged in, or assumed true for
839 * If we have a signal (the cable is plugged in, or assumed true for
840 * serdes media) then poll for a "Link-Up" indication in the Device 813 * serdes media) then poll for a "Link-Up" indication in the Device
841 * Status Register. Time-out if a link isn't seen in 500 milliseconds 814 * Status Register. Time-out if a link isn't seen in 500 milliseconds
842 * seconds (Auto-negotiation should complete in less than 500 815 * seconds (Auto-negotiation should complete in less than 500
@@ -851,8 +824,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
851 if (i == FIBER_LINK_UP_LIMIT) { 824 if (i == FIBER_LINK_UP_LIMIT) {
852 e_dbg("Never got a valid link from auto-neg!!!\n"); 825 e_dbg("Never got a valid link from auto-neg!!!\n");
853 mac->autoneg_failed = true; 826 mac->autoneg_failed = true;
854 /* 827 /* AutoNeg failed to achieve a link, so we'll call
855 * AutoNeg failed to achieve a link, so we'll call
856 * mac->check_for_link. This routine will force the 828 * mac->check_for_link. This routine will force the
857 * link up if we detect a signal. This will allow us to 829 * link up if we detect a signal. This will allow us to
858 * communicate with non-autonegotiating link partners. 830 * communicate with non-autonegotiating link partners.
@@ -894,8 +866,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
894 if (ret_val) 866 if (ret_val)
895 return ret_val; 867 return ret_val;
896 868
897 /* 869 /* Since auto-negotiation is enabled, take the link out of reset (the
898 * Since auto-negotiation is enabled, take the link out of reset (the
899 * link will be in reset, because we previously reset the chip). This 870 * link will be in reset, because we previously reset the chip). This
900 * will restart auto-negotiation. If auto-negotiation is successful 871 * will restart auto-negotiation. If auto-negotiation is successful
901 * then the link-up status bit will be set and the flow control enable 872 * then the link-up status bit will be set and the flow control enable
@@ -907,8 +878,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
907 e1e_flush(); 878 e1e_flush();
908 usleep_range(1000, 2000); 879 usleep_range(1000, 2000);
909 880
910 /* 881 /* For these adapters, the SW definable pin 1 is set when the optics
911 * For these adapters, the SW definable pin 1 is set when the optics
912 * detect a signal. If we have a signal, then poll for a "Link-Up" 882 * detect a signal. If we have a signal, then poll for a "Link-Up"
913 * indication. 883 * indication.
914 */ 884 */
@@ -954,16 +924,14 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
954{ 924{
955 u32 fcrtl = 0, fcrth = 0; 925 u32 fcrtl = 0, fcrth = 0;
956 926
957 /* 927 /* Set the flow control receive threshold registers. Normally,
958 * Set the flow control receive threshold registers. Normally,
959 * these registers will be set to a default threshold that may be 928 * these registers will be set to a default threshold that may be
960 * adjusted later by the driver's runtime code. However, if the 929 * adjusted later by the driver's runtime code. However, if the
961 * ability to transmit pause frames is not enabled, then these 930 * ability to transmit pause frames is not enabled, then these
962 * registers will be set to 0. 931 * registers will be set to 0.
963 */ 932 */
964 if (hw->fc.current_mode & e1000_fc_tx_pause) { 933 if (hw->fc.current_mode & e1000_fc_tx_pause) {
965 /* 934 /* We need to set up the Receive Threshold high and low water
966 * We need to set up the Receive Threshold high and low water
967 * marks as well as (optionally) enabling the transmission of 935 * marks as well as (optionally) enabling the transmission of
968 * XON frames. 936 * XON frames.
969 */ 937 */
@@ -995,8 +963,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
995 963
996 ctrl = er32(CTRL); 964 ctrl = er32(CTRL);
997 965
998 /* 966 /* Because we didn't get link via the internal auto-negotiation
999 * Because we didn't get link via the internal auto-negotiation
1000 * mechanism (we either forced link or we got link via PHY 967 * mechanism (we either forced link or we got link via PHY
1001 * auto-neg), we have to manually enable/disable transmit an 968 * auto-neg), we have to manually enable/disable transmit an
1002 * receive flow control. 969 * receive flow control.
@@ -1057,8 +1024,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1057 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 1024 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1058 u16 speed, duplex; 1025 u16 speed, duplex;
1059 1026
1060 /* 1027 /* Check for the case where we have fiber media and auto-neg failed
1061 * Check for the case where we have fiber media and auto-neg failed
1062 * so we had to force link. In this case, we need to force the 1028 * so we had to force link. In this case, we need to force the
1063 * configuration of the MAC to match the "fc" parameter. 1029 * configuration of the MAC to match the "fc" parameter.
1064 */ 1030 */
@@ -1076,15 +1042,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1076 return ret_val; 1042 return ret_val;
1077 } 1043 }
1078 1044
1079 /* 1045 /* Check for the case where we have copper media and auto-neg is
1080 * Check for the case where we have copper media and auto-neg is
1081 * enabled. In this case, we need to check and see if Auto-Neg 1046 * enabled. In this case, we need to check and see if Auto-Neg
1082 * has completed, and if so, how the PHY and link partner has 1047 * has completed, and if so, how the PHY and link partner has
1083 * flow control configured. 1048 * flow control configured.
1084 */ 1049 */
1085 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { 1050 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1086 /* 1051 /* Read the MII Status Register and check to see if AutoNeg
1087 * Read the MII Status Register and check to see if AutoNeg
1088 * has completed. We read this twice because this reg has 1052 * has completed. We read this twice because this reg has
1089 * some "sticky" (latched) bits. 1053 * some "sticky" (latched) bits.
1090 */ 1054 */
@@ -1100,8 +1064,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1100 return ret_val; 1064 return ret_val;
1101 } 1065 }
1102 1066
1103 /* 1067 /* The AutoNeg process has completed, so we now need to
1104 * The AutoNeg process has completed, so we now need to
1105 * read both the Auto Negotiation Advertisement 1068 * read both the Auto Negotiation Advertisement
1106 * Register (Address 4) and the Auto_Negotiation Base 1069 * Register (Address 4) and the Auto_Negotiation Base
1107 * Page Ability Register (Address 5) to determine how 1070 * Page Ability Register (Address 5) to determine how
@@ -1115,8 +1078,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1115 if (ret_val) 1078 if (ret_val)
1116 return ret_val; 1079 return ret_val;
1117 1080
1118 /* 1081 /* Two bits in the Auto Negotiation Advertisement Register
1119 * Two bits in the Auto Negotiation Advertisement Register
1120 * (Address 4) and two bits in the Auto Negotiation Base 1082 * (Address 4) and two bits in the Auto Negotiation Base
1121 * Page Ability Register (Address 5) determine flow control 1083 * Page Ability Register (Address 5) determine flow control
1122 * for both the PHY and the link partner. The following 1084 * for both the PHY and the link partner. The following
@@ -1151,8 +1113,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1151 */ 1113 */
1152 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 1114 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1153 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 1115 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1154 /* 1116 /* Now we need to check if the user selected Rx ONLY
1155 * Now we need to check if the user selected Rx ONLY
1156 * of pause frames. In this case, we had to advertise 1117 * of pause frames. In this case, we had to advertise
1157 * FULL flow control because we could not advertise Rx 1118 * FULL flow control because we could not advertise Rx
1158 * ONLY. Hence, we must now check to see if we need to 1119 * ONLY. Hence, we must now check to see if we need to
@@ -1166,8 +1127,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1166 e_dbg("Flow Control = Rx PAUSE frames only.\n"); 1127 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1167 } 1128 }
1168 } 1129 }
1169 /* 1130 /* For receiving PAUSE frames ONLY.
1170 * For receiving PAUSE frames ONLY.
1171 * 1131 *
1172 * LOCAL DEVICE | LINK PARTNER 1132 * LOCAL DEVICE | LINK PARTNER
1173 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1133 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1181,8 +1141,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1181 hw->fc.current_mode = e1000_fc_tx_pause; 1141 hw->fc.current_mode = e1000_fc_tx_pause;
1182 e_dbg("Flow Control = Tx PAUSE frames only.\n"); 1142 e_dbg("Flow Control = Tx PAUSE frames only.\n");
1183 } 1143 }
1184 /* 1144 /* For transmitting PAUSE frames ONLY.
1185 * For transmitting PAUSE frames ONLY.
1186 * 1145 *
1187 * LOCAL DEVICE | LINK PARTNER 1146 * LOCAL DEVICE | LINK PARTNER
1188 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1147 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1196,16 +1155,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1196 hw->fc.current_mode = e1000_fc_rx_pause; 1155 hw->fc.current_mode = e1000_fc_rx_pause;
1197 e_dbg("Flow Control = Rx PAUSE frames only.\n"); 1156 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1198 } else { 1157 } else {
1199 /* 1158 /* Per the IEEE spec, at this point flow control
1200 * Per the IEEE spec, at this point flow control
1201 * should be disabled. 1159 * should be disabled.
1202 */ 1160 */
1203 hw->fc.current_mode = e1000_fc_none; 1161 hw->fc.current_mode = e1000_fc_none;
1204 e_dbg("Flow Control = NONE.\n"); 1162 e_dbg("Flow Control = NONE.\n");
1205 } 1163 }
1206 1164
1207 /* 1165 /* Now we need to do one last check... If we auto-
1208 * Now we need to do one last check... If we auto-
1209 * negotiated to HALF DUPLEX, flow control should not be 1166 * negotiated to HALF DUPLEX, flow control should not be
1210 * enabled per IEEE 802.3 spec. 1167 * enabled per IEEE 802.3 spec.
1211 */ 1168 */
@@ -1218,8 +1175,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1218 if (duplex == HALF_DUPLEX) 1175 if (duplex == HALF_DUPLEX)
1219 hw->fc.current_mode = e1000_fc_none; 1176 hw->fc.current_mode = e1000_fc_none;
1220 1177
1221 /* 1178 /* Now we call a subroutine to actually force the MAC
1222 * Now we call a subroutine to actually force the MAC
1223 * controller to use the correct flow control settings. 1179 * controller to use the correct flow control settings.
1224 */ 1180 */
1225 ret_val = e1000e_force_mac_fc(hw); 1181 ret_val = e1000e_force_mac_fc(hw);
@@ -1520,8 +1476,7 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1520 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1476 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1521 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1477 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1522 } else { 1478 } else {
1523 /* 1479 /* set the blink bit for each LED that's "on" (0x0E)
1524 * set the blink bit for each LED that's "on" (0x0E)
1525 * in ledctl_mode2 1480 * in ledctl_mode2
1526 */ 1481 */
1527 ledctl_blink = hw->mac.ledctl_mode2; 1482 ledctl_blink = hw->mac.ledctl_mode2;
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index bacc950fc684..6dc47beb3adc 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -143,8 +143,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
143 return hw->mac.tx_pkt_filtering; 143 return hw->mac.tx_pkt_filtering;
144 } 144 }
145 145
146 /* 146 /* If we can't read from the host interface for whatever
147 * If we can't read from the host interface for whatever
148 * reason, disable filtering. 147 * reason, disable filtering.
149 */ 148 */
150 ret_val = e1000_mng_enable_host_if(hw); 149 ret_val = e1000_mng_enable_host_if(hw);
@@ -163,8 +162,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
163 hdr->checksum = 0; 162 hdr->checksum = 0;
164 csum = e1000_calculate_checksum((u8 *)hdr, 163 csum = e1000_calculate_checksum((u8 *)hdr,
165 E1000_MNG_DHCP_COOKIE_LENGTH); 164 E1000_MNG_DHCP_COOKIE_LENGTH);
166 /* 165 /* If either the checksums or signature don't match, then
167 * If either the checksums or signature don't match, then
168 * the cookie area isn't considered valid, in which case we 166 * the cookie area isn't considered valid, in which case we
169 * take the safe route of assuming Tx filtering is enabled. 167 * take the safe route of assuming Tx filtering is enabled.
170 */ 168 */
@@ -252,8 +250,7 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
252 /* Calculate length in DWORDs */ 250 /* Calculate length in DWORDs */
253 length >>= 2; 251 length >>= 2;
254 252
255 /* 253 /* The device driver writes the relevant command block into the
256 * The device driver writes the relevant command block into the
257 * ram area. 254 * ram area.
258 */ 255 */
259 for (i = 0; i < length; i++) { 256 for (i = 0; i < length; i++) {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f444eb0b76d8..fbf75fdca994 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -146,9 +146,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
146 {0, NULL} 146 {0, NULL}
147}; 147};
148 148
149/* 149/**
150 * e1000_regdump - register printout routine 150 * e1000_regdump - register printout routine
151 */ 151 * @hw: pointer to the HW structure
152 * @reginfo: pointer to the register info table
153 **/
152static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) 154static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
153{ 155{
154 int n = 0; 156 int n = 0;
@@ -196,9 +198,10 @@ static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
196 } 198 }
197} 199}
198 200
199/* 201/**
200 * e1000e_dump - Print registers, Tx-ring and Rx-ring 202 * e1000e_dump - Print registers, Tx-ring and Rx-ring
201 */ 203 * @adapter: board private structure
204 **/
202static void e1000e_dump(struct e1000_adapter *adapter) 205static void e1000e_dump(struct e1000_adapter *adapter)
203{ 206{
204 struct net_device *netdev = adapter->netdev; 207 struct net_device *netdev = adapter->netdev;
@@ -623,8 +626,7 @@ map_skb:
623 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 626 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
624 627
625 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 628 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
626 /* 629 /* Force memory writes to complete before letting h/w
627 * Force memory writes to complete before letting h/w
628 * know there are new descriptors to fetch. (Only 630 * know there are new descriptors to fetch. (Only
629 * applicable for weak-ordered memory model archs, 631 * applicable for weak-ordered memory model archs,
630 * such as IA-64). 632 * such as IA-64).
@@ -692,8 +694,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
692 goto no_buffers; 694 goto no_buffers;
693 } 695 }
694 } 696 }
695 /* 697 /* Refresh the desc even if buffer_addrs
696 * Refresh the desc even if buffer_addrs
697 * didn't change because each write-back 698 * didn't change because each write-back
698 * erases this info. 699 * erases this info.
699 */ 700 */
@@ -726,8 +727,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
726 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 727 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
727 728
728 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { 729 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
729 /* 730 /* Force memory writes to complete before letting h/w
730 * Force memory writes to complete before letting h/w
731 * know there are new descriptors to fetch. (Only 731 * know there are new descriptors to fetch. (Only
732 * applicable for weak-ordered memory model archs, 732 * applicable for weak-ordered memory model archs,
733 * such as IA-64). 733 * such as IA-64).
@@ -817,7 +817,8 @@ check_page:
817 /* Force memory writes to complete before letting h/w 817 /* Force memory writes to complete before letting h/w
818 * know there are new descriptors to fetch. (Only 818 * know there are new descriptors to fetch. (Only
819 * applicable for weak-ordered memory model archs, 819 * applicable for weak-ordered memory model archs,
820 * such as IA-64). */ 820 * such as IA-64).
821 */
821 wmb(); 822 wmb();
822 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 823 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
823 e1000e_update_rdt_wa(rx_ring, i); 824 e1000e_update_rdt_wa(rx_ring, i);
@@ -891,8 +892,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
891 892
892 length = le16_to_cpu(rx_desc->wb.upper.length); 893 length = le16_to_cpu(rx_desc->wb.upper.length);
893 894
894 /* 895 /* !EOP means multiple descriptors were used to store a single
895 * !EOP means multiple descriptors were used to store a single
896 * packet, if that's the case we need to toss it. In fact, we 896 * packet, if that's the case we need to toss it. In fact, we
897 * need to toss every packet with the EOP bit clear and the 897 * need to toss every packet with the EOP bit clear and the
898 * next frame that _does_ have the EOP bit set, as it is by 898 * next frame that _does_ have the EOP bit set, as it is by
@@ -933,8 +933,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
933 total_rx_bytes += length; 933 total_rx_bytes += length;
934 total_rx_packets++; 934 total_rx_packets++;
935 935
936 /* 936 /* code added for copybreak, this should improve
937 * code added for copybreak, this should improve
938 * performance for small packets with large amounts 937 * performance for small packets with large amounts
939 * of reassembly being done in the stack 938 * of reassembly being done in the stack
940 */ 939 */
@@ -1032,15 +1031,13 @@ static void e1000_print_hw_hang(struct work_struct *work)
1032 1031
1033 if (!adapter->tx_hang_recheck && 1032 if (!adapter->tx_hang_recheck &&
1034 (adapter->flags2 & FLAG2_DMA_BURST)) { 1033 (adapter->flags2 & FLAG2_DMA_BURST)) {
1035 /* 1034 /* May be block on write-back, flush and detect again
1036 * May be block on write-back, flush and detect again
1037 * flush pending descriptor writebacks to memory 1035 * flush pending descriptor writebacks to memory
1038 */ 1036 */
1039 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1037 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1040 /* execute the writes immediately */ 1038 /* execute the writes immediately */
1041 e1e_flush(); 1039 e1e_flush();
1042 /* 1040 /* Due to rare timing issues, write to TIDV again to ensure
1043 * Due to rare timing issues, write to TIDV again to ensure
1044 * the write is successful 1041 * the write is successful
1045 */ 1042 */
1046 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1043 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -1169,8 +1166,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1169 } 1166 }
1170 1167
1171 if (adapter->detect_tx_hung) { 1168 if (adapter->detect_tx_hung) {
1172 /* 1169 /* Detect a transmit hang in hardware, this serializes the
1173 * Detect a transmit hang in hardware, this serializes the
1174 * check with the clearing of time_stamp and movement of i 1170 * check with the clearing of time_stamp and movement of i
1175 */ 1171 */
1176 adapter->detect_tx_hung = false; 1172 adapter->detect_tx_hung = false;
@@ -1270,14 +1266,12 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1270 skb_put(skb, length); 1266 skb_put(skb, length);
1271 1267
1272 { 1268 {
1273 /* 1269 /* this looks ugly, but it seems compiler issues make
1274 * this looks ugly, but it seems compiler issues make
1275 * it more efficient than reusing j 1270 * it more efficient than reusing j
1276 */ 1271 */
1277 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 1272 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1278 1273
1279 /* 1274 /* page alloc/put takes too long and effects small
1280 * page alloc/put takes too long and effects small
1281 * packet throughput, so unsplit small packets and 1275 * packet throughput, so unsplit small packets and
1282 * save the alloc/put only valid in softirq (napi) 1276 * save the alloc/put only valid in softirq (napi)
1283 * context to call kmap_* 1277 * context to call kmap_*
@@ -1288,8 +1282,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1288 1282
1289 ps_page = &buffer_info->ps_pages[0]; 1283 ps_page = &buffer_info->ps_pages[0];
1290 1284
1291 /* 1285 /* there is no documentation about how to call
1292 * there is no documentation about how to call
1293 * kmap_atomic, so we can't hold the mapping 1286 * kmap_atomic, so we can't hold the mapping
1294 * very long 1287 * very long
1295 */ 1288 */
@@ -1486,14 +1479,16 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1486 skb_shinfo(rxtop)->nr_frags, 1479 skb_shinfo(rxtop)->nr_frags,
1487 buffer_info->page, 0, length); 1480 buffer_info->page, 0, length);
1488 /* re-use the current skb, we only consumed the 1481 /* re-use the current skb, we only consumed the
1489 * page */ 1482 * page
1483 */
1490 buffer_info->skb = skb; 1484 buffer_info->skb = skb;
1491 skb = rxtop; 1485 skb = rxtop;
1492 rxtop = NULL; 1486 rxtop = NULL;
1493 e1000_consume_page(buffer_info, skb, length); 1487 e1000_consume_page(buffer_info, skb, length);
1494 } else { 1488 } else {
1495 /* no chain, got EOP, this buf is the packet 1489 /* no chain, got EOP, this buf is the packet
1496 * copybreak to save the put_page/alloc_page */ 1490 * copybreak to save the put_page/alloc_page
1491 */
1497 if (length <= copybreak && 1492 if (length <= copybreak &&
1498 skb_tailroom(skb) >= length) { 1493 skb_tailroom(skb) >= length) {
1499 u8 *vaddr; 1494 u8 *vaddr;
@@ -1502,7 +1497,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1502 length); 1497 length);
1503 kunmap_atomic(vaddr); 1498 kunmap_atomic(vaddr);
1504 /* re-use the page, so don't erase 1499 /* re-use the page, so don't erase
1505 * buffer_info->page */ 1500 * buffer_info->page
1501 */
1506 skb_put(skb, length); 1502 skb_put(skb, length);
1507 } else { 1503 } else {
1508 skb_fill_page_desc(skb, 0, 1504 skb_fill_page_desc(skb, 0,
@@ -1656,22 +1652,17 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
1656 struct e1000_hw *hw = &adapter->hw; 1652 struct e1000_hw *hw = &adapter->hw;
1657 u32 icr = er32(ICR); 1653 u32 icr = er32(ICR);
1658 1654
1659 /* 1655 /* read ICR disables interrupts using IAM */
1660 * read ICR disables interrupts using IAM
1661 */
1662
1663 if (icr & E1000_ICR_LSC) { 1656 if (icr & E1000_ICR_LSC) {
1664 hw->mac.get_link_status = true; 1657 hw->mac.get_link_status = true;
1665 /* 1658 /* ICH8 workaround-- Call gig speed drop workaround on cable
1666 * ICH8 workaround-- Call gig speed drop workaround on cable
1667 * disconnect (LSC) before accessing any PHY registers 1659 * disconnect (LSC) before accessing any PHY registers
1668 */ 1660 */
1669 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1661 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1670 (!(er32(STATUS) & E1000_STATUS_LU))) 1662 (!(er32(STATUS) & E1000_STATUS_LU)))
1671 schedule_work(&adapter->downshift_task); 1663 schedule_work(&adapter->downshift_task);
1672 1664
1673 /* 1665 /* 80003ES2LAN workaround-- For packet buffer work-around on
1674 * 80003ES2LAN workaround-- For packet buffer work-around on
1675 * link down event; disable receives here in the ISR and reset 1666 * link down event; disable receives here in the ISR and reset
1676 * adapter in watchdog 1667 * adapter in watchdog
1677 */ 1668 */
@@ -1713,31 +1704,27 @@ static irqreturn_t e1000_intr(int irq, void *data)
1713 if (!icr || test_bit(__E1000_DOWN, &adapter->state)) 1704 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1714 return IRQ_NONE; /* Not our interrupt */ 1705 return IRQ_NONE; /* Not our interrupt */
1715 1706
1716 /* 1707 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1717 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1718 * not set, then the adapter didn't send an interrupt 1708 * not set, then the adapter didn't send an interrupt
1719 */ 1709 */
1720 if (!(icr & E1000_ICR_INT_ASSERTED)) 1710 if (!(icr & E1000_ICR_INT_ASSERTED))
1721 return IRQ_NONE; 1711 return IRQ_NONE;
1722 1712
1723 /* 1713 /* Interrupt Auto-Mask...upon reading ICR,
1724 * Interrupt Auto-Mask...upon reading ICR,
1725 * interrupts are masked. No need for the 1714 * interrupts are masked. No need for the
1726 * IMC write 1715 * IMC write
1727 */ 1716 */
1728 1717
1729 if (icr & E1000_ICR_LSC) { 1718 if (icr & E1000_ICR_LSC) {
1730 hw->mac.get_link_status = true; 1719 hw->mac.get_link_status = true;
1731 /* 1720 /* ICH8 workaround-- Call gig speed drop workaround on cable
1732 * ICH8 workaround-- Call gig speed drop workaround on cable
1733 * disconnect (LSC) before accessing any PHY registers 1721 * disconnect (LSC) before accessing any PHY registers
1734 */ 1722 */
1735 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 1723 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1736 (!(er32(STATUS) & E1000_STATUS_LU))) 1724 (!(er32(STATUS) & E1000_STATUS_LU)))
1737 schedule_work(&adapter->downshift_task); 1725 schedule_work(&adapter->downshift_task);
1738 1726
1739 /* 1727 /* 80003ES2LAN workaround--
1740 * 80003ES2LAN workaround--
1741 * For packet buffer work-around on link down event; 1728 * For packet buffer work-around on link down event;
1742 * disable receives here in the ISR and 1729 * disable receives here in the ISR and
1743 * reset adapter in watchdog 1730 * reset adapter in watchdog
@@ -2469,8 +2456,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
2469 2456
2470set_itr_now: 2457set_itr_now:
2471 if (new_itr != adapter->itr) { 2458 if (new_itr != adapter->itr) {
2472 /* 2459 /* this attempts to bias the interrupt rate towards Bulk
2473 * this attempts to bias the interrupt rate towards Bulk
2474 * by adding intermediate steps when interrupt rate is 2460 * by adding intermediate steps when interrupt rate is
2475 * increasing 2461 * increasing
2476 */ 2462 */
@@ -2517,7 +2503,7 @@ void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2517 * e1000_alloc_queues - Allocate memory for all rings 2503 * e1000_alloc_queues - Allocate memory for all rings
2518 * @adapter: board private structure to initialize 2504 * @adapter: board private structure to initialize
2519 **/ 2505 **/
2520static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter) 2506static int e1000_alloc_queues(struct e1000_adapter *adapter)
2521{ 2507{
2522 int size = sizeof(struct e1000_ring); 2508 int size = sizeof(struct e1000_ring);
2523 2509
@@ -2740,8 +2726,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2740 2726
2741 manc = er32(MANC); 2727 manc = er32(MANC);
2742 2728
2743 /* 2729 /* enable receiving management packets to the host. this will probably
2744 * enable receiving management packets to the host. this will probably
2745 * generate destination unreachable messages from the host OS, but 2730 * generate destination unreachable messages from the host OS, but
2746 * the packets will be handled on SMBUS 2731 * the packets will be handled on SMBUS
2747 */ 2732 */
@@ -2754,8 +2739,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2754 break; 2739 break;
2755 case e1000_82574: 2740 case e1000_82574:
2756 case e1000_82583: 2741 case e1000_82583:
2757 /* 2742 /* Check if IPMI pass-through decision filter already exists;
2758 * Check if IPMI pass-through decision filter already exists;
2759 * if so, enable it. 2743 * if so, enable it.
2760 */ 2744 */
2761 for (i = 0, j = 0; i < 8; i++) { 2745 for (i = 0, j = 0; i < 8; i++) {
@@ -2827,8 +2811,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2827 u32 txdctl = er32(TXDCTL(0)); 2811 u32 txdctl = er32(TXDCTL(0));
2828 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | 2812 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2829 E1000_TXDCTL_WTHRESH); 2813 E1000_TXDCTL_WTHRESH);
2830 /* 2814 /* set up some performance related parameters to encourage the
2831 * set up some performance related parameters to encourage the
2832 * hardware to use the bus more efficiently in bursts, depends 2815 * hardware to use the bus more efficiently in bursts, depends
2833 * on the tx_int_delay to be enabled, 2816 * on the tx_int_delay to be enabled,
2834 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls 2817 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
@@ -2845,8 +2828,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2845 2828
2846 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2829 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2847 tarc = er32(TARC(0)); 2830 tarc = er32(TARC(0));
2848 /* 2831 /* set the speed mode bit, we'll clear it if we're not at
2849 * set the speed mode bit, we'll clear it if we're not at
2850 * gigabit link later 2832 * gigabit link later
2851 */ 2833 */
2852#define SPEED_MODE_BIT (1 << 21) 2834#define SPEED_MODE_BIT (1 << 21)
@@ -2967,8 +2949,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2967 rfctl |= E1000_RFCTL_EXTEN; 2949 rfctl |= E1000_RFCTL_EXTEN;
2968 ew32(RFCTL, rfctl); 2950 ew32(RFCTL, rfctl);
2969 2951
2970 /* 2952 /* 82571 and greater support packet-split where the protocol
2971 * 82571 and greater support packet-split where the protocol
2972 * header is placed in skb->data and the packet data is 2953 * header is placed in skb->data and the packet data is
2973 * placed in pages hanging off of skb_shinfo(skb)->nr_frags. 2954 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2974 * In the case of a non-split, skb->data is linearly filled, 2955 * In the case of a non-split, skb->data is linearly filled,
@@ -3016,7 +2997,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3016 /* This is useful for sniffing bad packets. */ 2997 /* This is useful for sniffing bad packets. */
3017 if (adapter->netdev->features & NETIF_F_RXALL) { 2998 if (adapter->netdev->features & NETIF_F_RXALL) {
3018 /* UPE and MPE will be handled by normal PROMISC logic 2999 /* UPE and MPE will be handled by normal PROMISC logic
3019 * in e1000e_set_rx_mode */ 3000 * in e1000e_set_rx_mode
3001 */
3020 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3002 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3021 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3003 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3022 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3004 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -3071,8 +3053,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3071 usleep_range(10000, 20000); 3053 usleep_range(10000, 20000);
3072 3054
3073 if (adapter->flags2 & FLAG2_DMA_BURST) { 3055 if (adapter->flags2 & FLAG2_DMA_BURST) {
3074 /* 3056 /* set the writeback threshold (only takes effect if the RDTR
3075 * set the writeback threshold (only takes effect if the RDTR
3076 * is set). set GRAN=1 and write back up to 0x4 worth, and 3057 * is set). set GRAN=1 and write back up to 0x4 worth, and
3077 * enable prefetching of 0x20 Rx descriptors 3058 * enable prefetching of 0x20 Rx descriptors
3078 * granularity = 01 3059 * granularity = 01
@@ -3083,8 +3064,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3083 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); 3064 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3084 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); 3065 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3085 3066
3086 /* 3067 /* override the delay timers for enabling bursting, only if
3087 * override the delay timers for enabling bursting, only if
3088 * the value was not set by the user via module options 3068 * the value was not set by the user via module options
3089 */ 3069 */
3090 if (adapter->rx_int_delay == DEFAULT_RDTR) 3070 if (adapter->rx_int_delay == DEFAULT_RDTR)
@@ -3108,8 +3088,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3108 ew32(CTRL_EXT, ctrl_ext); 3088 ew32(CTRL_EXT, ctrl_ext);
3109 e1e_flush(); 3089 e1e_flush();
3110 3090
3111 /* 3091 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3112 * Setup the HW Rx Head and Tail Descriptor Pointers and
3113 * the Base and Length of the Rx Descriptor Ring 3092 * the Base and Length of the Rx Descriptor Ring
3114 */ 3093 */
3115 rdba = rx_ring->dma; 3094 rdba = rx_ring->dma;
@@ -3130,8 +3109,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3130 ew32(RXCSUM, rxcsum); 3109 ew32(RXCSUM, rxcsum);
3131 3110
3132 if (adapter->hw.mac.type == e1000_pch2lan) { 3111 if (adapter->hw.mac.type == e1000_pch2lan) {
3133 /* 3112 /* With jumbo frames, excessive C-state transition
3134 * With jumbo frames, excessive C-state transition
3135 * latencies result in dropped transactions. 3113 * latencies result in dropped transactions.
3136 */ 3114 */
3137 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3115 if (adapter->netdev->mtu > ETH_DATA_LEN) {
@@ -3216,8 +3194,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3216 if (!netdev_uc_empty(netdev) && rar_entries) { 3194 if (!netdev_uc_empty(netdev) && rar_entries) {
3217 struct netdev_hw_addr *ha; 3195 struct netdev_hw_addr *ha;
3218 3196
3219 /* 3197 /* write the addresses in reverse order to avoid write
3220 * write the addresses in reverse order to avoid write
3221 * combining 3198 * combining
3222 */ 3199 */
3223 netdev_for_each_uc_addr(ha, netdev) { 3200 netdev_for_each_uc_addr(ha, netdev) {
@@ -3269,8 +3246,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
3269 if (netdev->flags & IFF_ALLMULTI) { 3246 if (netdev->flags & IFF_ALLMULTI) {
3270 rctl |= E1000_RCTL_MPE; 3247 rctl |= E1000_RCTL_MPE;
3271 } else { 3248 } else {
3272 /* 3249 /* Write addresses to the MTA, if the attempt fails
3273 * Write addresses to the MTA, if the attempt fails
3274 * then we should just turn on promiscuous mode so 3250 * then we should just turn on promiscuous mode so
3275 * that we can at least receive multicast traffic 3251 * that we can at least receive multicast traffic
3276 */ 3252 */
@@ -3279,8 +3255,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
3279 rctl |= E1000_RCTL_MPE; 3255 rctl |= E1000_RCTL_MPE;
3280 } 3256 }
3281 e1000e_vlan_filter_enable(adapter); 3257 e1000e_vlan_filter_enable(adapter);
3282 /* 3258 /* Write addresses to available RAR registers, if there is not
3283 * Write addresses to available RAR registers, if there is not
3284 * sufficient space to store all the addresses then enable 3259 * sufficient space to store all the addresses then enable
3285 * unicast promiscuous mode 3260 * unicast promiscuous mode
3286 */ 3261 */
@@ -3315,8 +3290,7 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3315 for (i = 0; i < 32; i++) 3290 for (i = 0; i < 32; i++)
3316 ew32(RETA(i), 0); 3291 ew32(RETA(i), 0);
3317 3292
3318 /* 3293 /* Disable raw packet checksumming so that RSS hash is placed in
3319 * Disable raw packet checksumming so that RSS hash is placed in
3320 * descriptor on writeback. 3294 * descriptor on writeback.
3321 */ 3295 */
3322 rxcsum = er32(RXCSUM); 3296 rxcsum = er32(RXCSUM);
@@ -3408,8 +3382,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3408 ew32(PBA, pba); 3382 ew32(PBA, pba);
3409 3383
3410 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { 3384 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3411 /* 3385 /* To maintain wire speed transmits, the Tx FIFO should be
3412 * To maintain wire speed transmits, the Tx FIFO should be
3413 * large enough to accommodate two full transmit packets, 3386 * large enough to accommodate two full transmit packets,
3414 * rounded up to the next 1KB and expressed in KB. Likewise, 3387 * rounded up to the next 1KB and expressed in KB. Likewise,
3415 * the Rx FIFO should be large enough to accommodate at least 3388 * the Rx FIFO should be large enough to accommodate at least
@@ -3421,8 +3394,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3421 tx_space = pba >> 16; 3394 tx_space = pba >> 16;
3422 /* lower 16 bits has Rx packet buffer allocation size in KB */ 3395 /* lower 16 bits has Rx packet buffer allocation size in KB */
3423 pba &= 0xffff; 3396 pba &= 0xffff;
3424 /* 3397 /* the Tx fifo also stores 16 bytes of information about the Tx
3425 * the Tx fifo also stores 16 bytes of information about the Tx
3426 * but don't include ethernet FCS because hardware appends it 3398 * but don't include ethernet FCS because hardware appends it
3427 */ 3399 */
3428 min_tx_space = (adapter->max_frame_size + 3400 min_tx_space = (adapter->max_frame_size +
@@ -3435,8 +3407,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3435 min_rx_space = ALIGN(min_rx_space, 1024); 3407 min_rx_space = ALIGN(min_rx_space, 1024);
3436 min_rx_space >>= 10; 3408 min_rx_space >>= 10;
3437 3409
3438 /* 3410 /* If current Tx allocation is less than the min Tx FIFO size,
3439 * If current Tx allocation is less than the min Tx FIFO size,
3440 * and the min Tx FIFO size is less than the current Rx FIFO 3411 * and the min Tx FIFO size is less than the current Rx FIFO
3441 * allocation, take space away from current Rx allocation 3412 * allocation, take space away from current Rx allocation
3442 */ 3413 */
@@ -3444,8 +3415,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3444 ((min_tx_space - tx_space) < pba)) { 3415 ((min_tx_space - tx_space) < pba)) {
3445 pba -= min_tx_space - tx_space; 3416 pba -= min_tx_space - tx_space;
3446 3417
3447 /* 3418 /* if short on Rx space, Rx wins and must trump Tx
3448 * if short on Rx space, Rx wins and must trump Tx
3449 * adjustment 3419 * adjustment
3450 */ 3420 */
3451 if (pba < min_rx_space) 3421 if (pba < min_rx_space)
@@ -3455,8 +3425,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3455 ew32(PBA, pba); 3425 ew32(PBA, pba);
3456 } 3426 }
3457 3427
3458 /* 3428 /* flow control settings
3459 * flow control settings
3460 * 3429 *
3461 * The high water mark must be low enough to fit one full frame 3430 * The high water mark must be low enough to fit one full frame
3462 * (or the size used for early receive) above it in the Rx FIFO. 3431 * (or the size used for early receive) above it in the Rx FIFO.
@@ -3490,8 +3459,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3490 fc->low_water = fc->high_water - 8; 3459 fc->low_water = fc->high_water - 8;
3491 break; 3460 break;
3492 case e1000_pchlan: 3461 case e1000_pchlan:
3493 /* 3462 /* Workaround PCH LOM adapter hangs with certain network
3494 * Workaround PCH LOM adapter hangs with certain network
3495 * loads. If hangs persist, try disabling Tx flow control. 3463 * loads. If hangs persist, try disabling Tx flow control.
3496 */ 3464 */
3497 if (adapter->netdev->mtu > ETH_DATA_LEN) { 3465 if (adapter->netdev->mtu > ETH_DATA_LEN) {
@@ -3516,8 +3484,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3516 break; 3484 break;
3517 } 3485 }
3518 3486
3519 /* 3487 /* Alignment of Tx data is on an arbitrary byte boundary with the
3520 * Alignment of Tx data is on an arbitrary byte boundary with the
3521 * maximum size per Tx descriptor limited only to the transmit 3488 * maximum size per Tx descriptor limited only to the transmit
3522 * allocation of the packet buffer minus 96 bytes with an upper 3489 * allocation of the packet buffer minus 96 bytes with an upper
3523 * limit of 24KB due to receive synchronization limitations. 3490 * limit of 24KB due to receive synchronization limitations.
@@ -3525,8 +3492,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3525 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, 3492 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3526 24 << 10); 3493 24 << 10);
3527 3494
3528 /* 3495 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
3529 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
3530 * fit in receive buffer. 3496 * fit in receive buffer.
3531 */ 3497 */
3532 if (adapter->itr_setting & 0x3) { 3498 if (adapter->itr_setting & 0x3) {
@@ -3549,8 +3515,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3549 /* Allow time for pending master requests to run */ 3515 /* Allow time for pending master requests to run */
3550 mac->ops.reset_hw(hw); 3516 mac->ops.reset_hw(hw);
3551 3517
3552 /* 3518 /* For parts with AMT enabled, let the firmware know
3553 * For parts with AMT enabled, let the firmware know
3554 * that the network interface is in control 3519 * that the network interface is in control
3555 */ 3520 */
3556 if (adapter->flags & FLAG_HAS_AMT) 3521 if (adapter->flags & FLAG_HAS_AMT)
@@ -3579,8 +3544,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3579 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 3544 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3580 !(adapter->flags & FLAG_SMART_POWER_DOWN)) { 3545 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3581 u16 phy_data = 0; 3546 u16 phy_data = 0;
3582 /* 3547 /* speed up time to link by disabling smart power down, ignore
3583 * speed up time to link by disabling smart power down, ignore
3584 * the return value of this function because there is nothing 3548 * the return value of this function because there is nothing
3585 * different we would do if it failed 3549 * different we would do if it failed
3586 */ 3550 */
@@ -3628,8 +3592,7 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3628 /* execute the writes immediately */ 3592 /* execute the writes immediately */
3629 e1e_flush(); 3593 e1e_flush();
3630 3594
3631 /* 3595 /* due to rare timing issues, write to TIDV/RDTR again to ensure the
3632 * due to rare timing issues, write to TIDV/RDTR again to ensure the
3633 * write is successful 3596 * write is successful
3634 */ 3597 */
3635 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 3598 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -3647,8 +3610,7 @@ void e1000e_down(struct e1000_adapter *adapter)
3647 struct e1000_hw *hw = &adapter->hw; 3610 struct e1000_hw *hw = &adapter->hw;
3648 u32 tctl, rctl; 3611 u32 tctl, rctl;
3649 3612
3650 /* 3613 /* signal that we're down so the interrupt handler does not
3651 * signal that we're down so the interrupt handler does not
3652 * reschedule our watchdog timer 3614 * reschedule our watchdog timer
3653 */ 3615 */
3654 set_bit(__E1000_DOWN, &adapter->state); 3616 set_bit(__E1000_DOWN, &adapter->state);
@@ -3691,8 +3653,7 @@ void e1000e_down(struct e1000_adapter *adapter)
3691 if (!pci_channel_offline(adapter->pdev)) 3653 if (!pci_channel_offline(adapter->pdev))
3692 e1000e_reset(adapter); 3654 e1000e_reset(adapter);
3693 3655
3694 /* 3656 /* TODO: for power management, we could drop the link and
3695 * TODO: for power management, we could drop the link and
3696 * pci_disable_device here. 3657 * pci_disable_device here.
3697 */ 3658 */
3698} 3659}
@@ -3715,7 +3676,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
3715 * Fields are initialized based on PCI device information and 3676 * Fields are initialized based on PCI device information and
3716 * OS network device settings (MTU size). 3677 * OS network device settings (MTU size).
3717 **/ 3678 **/
3718static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 3679static int e1000_sw_init(struct e1000_adapter *adapter)
3719{ 3680{
3720 struct net_device *netdev = adapter->netdev; 3681 struct net_device *netdev = adapter->netdev;
3721 3682
@@ -3755,8 +3716,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3755 e_dbg("icr is %08X\n", icr); 3716 e_dbg("icr is %08X\n", icr);
3756 if (icr & E1000_ICR_RXSEQ) { 3717 if (icr & E1000_ICR_RXSEQ) {
3757 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 3718 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3758 /* 3719 /* Force memory writes to complete before acknowledging the
3759 * Force memory writes to complete before acknowledging the
3760 * interrupt is handled. 3720 * interrupt is handled.
3761 */ 3721 */
3762 wmb(); 3722 wmb();
@@ -3786,7 +3746,8 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3786 e1000e_reset_interrupt_capability(adapter); 3746 e1000e_reset_interrupt_capability(adapter);
3787 3747
3788 /* Assume that the test fails, if it succeeds then the test 3748 /* Assume that the test fails, if it succeeds then the test
3789 * MSI irq handler will unset this flag */ 3749 * MSI irq handler will unset this flag
3750 */
3790 adapter->flags |= FLAG_MSI_TEST_FAILED; 3751 adapter->flags |= FLAG_MSI_TEST_FAILED;
3791 3752
3792 err = pci_enable_msi(adapter->pdev); 3753 err = pci_enable_msi(adapter->pdev);
@@ -3800,8 +3761,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3800 goto msi_test_failed; 3761 goto msi_test_failed;
3801 } 3762 }
3802 3763
3803 /* 3764 /* Force memory writes to complete before enabling and firing an
3804 * Force memory writes to complete before enabling and firing an
3805 * interrupt. 3765 * interrupt.
3806 */ 3766 */
3807 wmb(); 3767 wmb();
@@ -3901,8 +3861,7 @@ static int e1000_open(struct net_device *netdev)
3901 if (err) 3861 if (err)
3902 goto err_setup_rx; 3862 goto err_setup_rx;
3903 3863
3904 /* 3864 /* If AMT is enabled, let the firmware know that the network
3905 * If AMT is enabled, let the firmware know that the network
3906 * interface is now open and reset the part to a known state. 3865 * interface is now open and reset the part to a known state.
3907 */ 3866 */
3908 if (adapter->flags & FLAG_HAS_AMT) { 3867 if (adapter->flags & FLAG_HAS_AMT) {
@@ -3923,8 +3882,7 @@ static int e1000_open(struct net_device *netdev)
3923 PM_QOS_CPU_DMA_LATENCY, 3882 PM_QOS_CPU_DMA_LATENCY,
3924 PM_QOS_DEFAULT_VALUE); 3883 PM_QOS_DEFAULT_VALUE);
3925 3884
3926 /* 3885 /* before we allocate an interrupt, we must be ready to handle it.
3927 * before we allocate an interrupt, we must be ready to handle it.
3928 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3886 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3929 * as soon as we call pci_request_irq, so we have to setup our 3887 * as soon as we call pci_request_irq, so we have to setup our
3930 * clean_rx handler before we do so. 3888 * clean_rx handler before we do so.
@@ -3935,8 +3893,7 @@ static int e1000_open(struct net_device *netdev)
3935 if (err) 3893 if (err)
3936 goto err_req_irq; 3894 goto err_req_irq;
3937 3895
3938 /* 3896 /* Work around PCIe errata with MSI interrupts causing some chipsets to
3939 * Work around PCIe errata with MSI interrupts causing some chipsets to
3940 * ignore e1000e MSI messages, which means we need to test our MSI 3897 * ignore e1000e MSI messages, which means we need to test our MSI
3941 * interrupt now 3898 * interrupt now
3942 */ 3899 */
@@ -4017,16 +3974,14 @@ static int e1000_close(struct net_device *netdev)
4017 e1000e_free_tx_resources(adapter->tx_ring); 3974 e1000e_free_tx_resources(adapter->tx_ring);
4018 e1000e_free_rx_resources(adapter->rx_ring); 3975 e1000e_free_rx_resources(adapter->rx_ring);
4019 3976
4020 /* 3977 /* kill manageability vlan ID if supported, but not if a vlan with
4021 * kill manageability vlan ID if supported, but not if a vlan with
4022 * the same ID is registered on the host OS (let 8021q kill it) 3978 * the same ID is registered on the host OS (let 8021q kill it)
4023 */ 3979 */
4024 if (adapter->hw.mng_cookie.status & 3980 if (adapter->hw.mng_cookie.status &
4025 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) 3981 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4026 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 3982 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4027 3983
4028 /* 3984 /* If AMT is enabled, let the firmware know that the network
4029 * If AMT is enabled, let the firmware know that the network
4030 * interface is now closed 3985 * interface is now closed
4031 */ 3986 */
4032 if ((adapter->flags & FLAG_HAS_AMT) && 3987 if ((adapter->flags & FLAG_HAS_AMT) &&
@@ -4065,8 +4020,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4065 /* activate the work around */ 4020 /* activate the work around */
4066 e1000e_set_laa_state_82571(&adapter->hw, 1); 4021 e1000e_set_laa_state_82571(&adapter->hw, 1);
4067 4022
4068 /* 4023 /* Hold a copy of the LAA in RAR[14] This is done so that
4069 * Hold a copy of the LAA in RAR[14] This is done so that
4070 * between the time RAR[0] gets clobbered and the time it 4024 * between the time RAR[0] gets clobbered and the time it
4071 * gets fixed (in e1000_watchdog), the actual LAA is in one 4025 * gets fixed (in e1000_watchdog), the actual LAA is in one
4072 * of the RARs and no incoming packets directed to this port 4026 * of the RARs and no incoming packets directed to this port
@@ -4099,10 +4053,13 @@ static void e1000e_update_phy_task(struct work_struct *work)
4099 e1000_get_phy_info(&adapter->hw); 4053 e1000_get_phy_info(&adapter->hw);
4100} 4054}
4101 4055
4102/* 4056/**
4057 * e1000_update_phy_info - timre call-back to update PHY info
4058 * @data: pointer to adapter cast into an unsigned long
4059 *
4103 * Need to wait a few seconds after link up to get diagnostic information from 4060 * Need to wait a few seconds after link up to get diagnostic information from
4104 * the phy 4061 * the phy
4105 */ 4062 **/
4106static void e1000_update_phy_info(unsigned long data) 4063static void e1000_update_phy_info(unsigned long data)
4107{ 4064{
4108 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4065 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
@@ -4129,8 +4086,7 @@ static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4129 if (ret_val) 4086 if (ret_val)
4130 return; 4087 return;
4131 4088
4132 /* 4089 /* A page set is expensive so check if already on desired page.
4133 * A page set is expensive so check if already on desired page.
4134 * If not, set to the page with the PHY status registers. 4090 * If not, set to the page with the PHY status registers.
4135 */ 4091 */
4136 hw->phy.addr = 1; 4092 hw->phy.addr = 1;
@@ -4201,8 +4157,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4201 struct e1000_hw *hw = &adapter->hw; 4157 struct e1000_hw *hw = &adapter->hw;
4202 struct pci_dev *pdev = adapter->pdev; 4158 struct pci_dev *pdev = adapter->pdev;
4203 4159
4204 /* 4160 /* Prevent stats update while adapter is being reset, or if the pci
4205 * Prevent stats update while adapter is being reset, or if the pci
4206 * connection is down. 4161 * connection is down.
4207 */ 4162 */
4208 if (adapter->link_speed == 0) 4163 if (adapter->link_speed == 0)
@@ -4270,8 +4225,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4270 4225
4271 /* Rx Errors */ 4226 /* Rx Errors */
4272 4227
4273 /* 4228 /* RLEC on some newer hardware can be incorrect so build
4274 * RLEC on some newer hardware can be incorrect so build
4275 * our own version based on RUC and ROC 4229 * our own version based on RUC and ROC
4276 */ 4230 */
4277 netdev->stats.rx_errors = adapter->stats.rxerrc + 4231 netdev->stats.rx_errors = adapter->stats.rxerrc +
@@ -4323,8 +4277,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4323 if (ret_val) 4277 if (ret_val)
4324 e_warn("Error reading PHY register\n"); 4278 e_warn("Error reading PHY register\n");
4325 } else { 4279 } else {
4326 /* 4280 /* Do not read PHY registers if link is not up
4327 * Do not read PHY registers if link is not up
4328 * Set values to typical power-on defaults 4281 * Set values to typical power-on defaults
4329 */ 4282 */
4330 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); 4283 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
@@ -4362,8 +4315,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
4362 bool link_active = false; 4315 bool link_active = false;
4363 s32 ret_val = 0; 4316 s32 ret_val = 0;
4364 4317
4365 /* 4318 /* get_link_status is set on LSC (link status) interrupt or
4366 * get_link_status is set on LSC (link status) interrupt or
4367 * Rx sequence error interrupt. get_link_status will stay 4319 * Rx sequence error interrupt. get_link_status will stay
4368 * false until the check_for_link establishes link 4320 * false until the check_for_link establishes link
4369 * for copper adapters ONLY 4321 * for copper adapters ONLY
@@ -4415,8 +4367,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4415{ 4367{
4416 struct e1000_hw *hw = &adapter->hw; 4368 struct e1000_hw *hw = &adapter->hw;
4417 4369
4418 /* 4370 /* With 82574 controllers, PHY needs to be checked periodically
4419 * With 82574 controllers, PHY needs to be checked periodically
4420 * for hung state and reset, if two calls return true 4371 * for hung state and reset, if two calls return true
4421 */ 4372 */
4422 if (e1000_check_phy_82574(hw)) 4373 if (e1000_check_phy_82574(hw))
@@ -4484,8 +4435,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4484 &adapter->link_speed, 4435 &adapter->link_speed,
4485 &adapter->link_duplex); 4436 &adapter->link_duplex);
4486 e1000_print_link_info(adapter); 4437 e1000_print_link_info(adapter);
4487 /* 4438 /* On supported PHYs, check for duplex mismatch only
4488 * On supported PHYs, check for duplex mismatch only
4489 * if link has autonegotiated at 10/100 half 4439 * if link has autonegotiated at 10/100 half
4490 */ 4440 */
4491 if ((hw->phy.type == e1000_phy_igp_3 || 4441 if ((hw->phy.type == e1000_phy_igp_3 ||
@@ -4515,8 +4465,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4515 break; 4465 break;
4516 } 4466 }
4517 4467
4518 /* 4468 /* workaround: re-program speed mode bit after
4519 * workaround: re-program speed mode bit after
4520 * link-up event 4469 * link-up event
4521 */ 4470 */
4522 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 4471 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
@@ -4527,8 +4476,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4527 ew32(TARC(0), tarc0); 4476 ew32(TARC(0), tarc0);
4528 } 4477 }
4529 4478
4530 /* 4479 /* disable TSO for pcie and 10/100 speeds, to avoid
4531 * disable TSO for pcie and 10/100 speeds, to avoid
4532 * some hardware issues 4480 * some hardware issues
4533 */ 4481 */
4534 if (!(adapter->flags & FLAG_TSO_FORCE)) { 4482 if (!(adapter->flags & FLAG_TSO_FORCE)) {
@@ -4549,16 +4497,14 @@ static void e1000_watchdog_task(struct work_struct *work)
4549 } 4497 }
4550 } 4498 }
4551 4499
4552 /* 4500 /* enable transmits in the hardware, need to do this
4553 * enable transmits in the hardware, need to do this
4554 * after setting TARC(0) 4501 * after setting TARC(0)
4555 */ 4502 */
4556 tctl = er32(TCTL); 4503 tctl = er32(TCTL);
4557 tctl |= E1000_TCTL_EN; 4504 tctl |= E1000_TCTL_EN;
4558 ew32(TCTL, tctl); 4505 ew32(TCTL, tctl);
4559 4506
4560 /* 4507 /* Perform any post-link-up configuration before
4561 * Perform any post-link-up configuration before
4562 * reporting link up. 4508 * reporting link up.
4563 */ 4509 */
4564 if (phy->ops.cfg_on_link_up) 4510 if (phy->ops.cfg_on_link_up)
@@ -4609,8 +4555,7 @@ link_up:
4609 4555
4610 if (!netif_carrier_ok(netdev) && 4556 if (!netif_carrier_ok(netdev) &&
4611 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { 4557 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4612 /* 4558 /* We've lost link, so the controller stops DMA,
4613 * We've lost link, so the controller stops DMA,
4614 * but we've got queued Tx work that's never going 4559 * but we've got queued Tx work that's never going
4615 * to get done, so reset controller to flush Tx. 4560 * to get done, so reset controller to flush Tx.
4616 * (Do the reset outside of interrupt context). 4561 * (Do the reset outside of interrupt context).
@@ -4622,8 +4567,7 @@ link_up:
4622 4567
4623 /* Simple mode for Interrupt Throttle Rate (ITR) */ 4568 /* Simple mode for Interrupt Throttle Rate (ITR) */
4624 if (adapter->itr_setting == 4) { 4569 if (adapter->itr_setting == 4) {
4625 /* 4570 /* Symmetric Tx/Rx gets a reduced ITR=2000;
4626 * Symmetric Tx/Rx gets a reduced ITR=2000;
4627 * Total asymmetrical Tx or Rx gets ITR=8000; 4571 * Total asymmetrical Tx or Rx gets ITR=8000;
4628 * everyone else is between 2000-8000. 4572 * everyone else is between 2000-8000.
4629 */ 4573 */
@@ -4648,8 +4592,7 @@ link_up:
4648 /* Force detection of hung controller every watchdog period */ 4592 /* Force detection of hung controller every watchdog period */
4649 adapter->detect_tx_hung = true; 4593 adapter->detect_tx_hung = true;
4650 4594
4651 /* 4595 /* With 82571 controllers, LAA may be overwritten due to controller
4652 * With 82571 controllers, LAA may be overwritten due to controller
4653 * reset from the other port. Set the appropriate LAA in RAR[0] 4596 * reset from the other port. Set the appropriate LAA in RAR[0]
4654 */ 4597 */
4655 if (e1000e_get_laa_state_82571(hw)) 4598 if (e1000e_get_laa_state_82571(hw))
@@ -4948,8 +4891,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
4948 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) 4891 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4949 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); 4892 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
4950 4893
4951 /* 4894 /* Force memory writes to complete before letting h/w
4952 * Force memory writes to complete before letting h/w
4953 * know there are new descriptors to fetch. (Only 4895 * know there are new descriptors to fetch. (Only
4954 * applicable for weak-ordered memory model archs, 4896 * applicable for weak-ordered memory model archs,
4955 * such as IA-64). 4897 * such as IA-64).
@@ -4963,8 +4905,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
4963 else 4905 else
4964 writel(i, tx_ring->tail); 4906 writel(i, tx_ring->tail);
4965 4907
4966 /* 4908 /* we need this if more than one processor can write to our tail
4967 * we need this if more than one processor can write to our tail
4968 * at a time, it synchronizes IO on IA64/Altix systems 4909 * at a time, it synchronizes IO on IA64/Altix systems
4969 */ 4910 */
4970 mmiowb(); 4911 mmiowb();
@@ -5014,15 +4955,13 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5014 struct e1000_adapter *adapter = tx_ring->adapter; 4955 struct e1000_adapter *adapter = tx_ring->adapter;
5015 4956
5016 netif_stop_queue(adapter->netdev); 4957 netif_stop_queue(adapter->netdev);
5017 /* 4958 /* Herbert's original patch had:
5018 * Herbert's original patch had:
5019 * smp_mb__after_netif_stop_queue(); 4959 * smp_mb__after_netif_stop_queue();
5020 * but since that doesn't exist yet, just open code it. 4960 * but since that doesn't exist yet, just open code it.
5021 */ 4961 */
5022 smp_mb(); 4962 smp_mb();
5023 4963
5024 /* 4964 /* We need to check again in a case another CPU has just
5025 * We need to check again in a case another CPU has just
5026 * made room available. 4965 * made room available.
5027 */ 4966 */
5028 if (e1000_desc_unused(tx_ring) < size) 4967 if (e1000_desc_unused(tx_ring) < size)
@@ -5067,18 +5006,26 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5067 return NETDEV_TX_OK; 5006 return NETDEV_TX_OK;
5068 } 5007 }
5069 5008
5009 /* The minimum packet size with TCTL.PSP set is 17 bytes so
5010 * pad skb in order to meet this minimum size requirement
5011 */
5012 if (unlikely(skb->len < 17)) {
5013 if (skb_pad(skb, 17 - skb->len))
5014 return NETDEV_TX_OK;
5015 skb->len = 17;
5016 skb_set_tail_pointer(skb, 17);
5017 }
5018
5070 mss = skb_shinfo(skb)->gso_size; 5019 mss = skb_shinfo(skb)->gso_size;
5071 if (mss) { 5020 if (mss) {
5072 u8 hdr_len; 5021 u8 hdr_len;
5073 5022
5074 /* 5023 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5075 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
5076 * points to just header, pull a few bytes of payload from 5024 * points to just header, pull a few bytes of payload from
5077 * frags into skb->data 5025 * frags into skb->data
5078 */ 5026 */
5079 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5027 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5080 /* 5028 /* we do this workaround for ES2LAN, but it is un-necessary,
5081 * we do this workaround for ES2LAN, but it is un-necessary,
5082 * avoiding it could save a lot of cycles 5029 * avoiding it could save a lot of cycles
5083 */ 5030 */
5084 if (skb->data_len && (hdr_len == len)) { 5031 if (skb->data_len && (hdr_len == len)) {
@@ -5109,8 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5109 if (adapter->hw.mac.tx_pkt_filtering) 5056 if (adapter->hw.mac.tx_pkt_filtering)
5110 e1000_transfer_dhcp_info(adapter, skb); 5057 e1000_transfer_dhcp_info(adapter, skb);
5111 5058
5112 /* 5059 /* need: count + 2 desc gap to keep tail from touching
5113 * need: count + 2 desc gap to keep tail from touching
5114 * head, otherwise try next time 5060 * head, otherwise try next time
5115 */ 5061 */
5116 if (e1000_maybe_stop_tx(tx_ring, count + 2)) 5062 if (e1000_maybe_stop_tx(tx_ring, count + 2))
@@ -5134,8 +5080,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5134 else if (e1000_tx_csum(tx_ring, skb)) 5080 else if (e1000_tx_csum(tx_ring, skb))
5135 tx_flags |= E1000_TX_FLAGS_CSUM; 5081 tx_flags |= E1000_TX_FLAGS_CSUM;
5136 5082
5137 /* 5083 /* Old method was to assume IPv4 packet by default if TSO was enabled.
5138 * Old method was to assume IPv4 packet by default if TSO was enabled.
5139 * 82571 hardware supports TSO capabilities for IPv6 as well... 5084 * 82571 hardware supports TSO capabilities for IPv6 as well...
5140 * no longer assume, we must. 5085 * no longer assume, we must.
5141 */ 5086 */
@@ -5222,8 +5167,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5222 5167
5223 /* Rx Errors */ 5168 /* Rx Errors */
5224 5169
5225 /* 5170 /* RLEC on some newer hardware can be incorrect so build
5226 * RLEC on some newer hardware can be incorrect so build
5227 * our own version based on RUC and ROC 5171 * our own version based on RUC and ROC
5228 */ 5172 */
5229 stats->rx_errors = adapter->stats.rxerrc + 5173 stats->rx_errors = adapter->stats.rxerrc +
@@ -5292,8 +5236,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5292 if (netif_running(netdev)) 5236 if (netif_running(netdev))
5293 e1000e_down(adapter); 5237 e1000e_down(adapter);
5294 5238
5295 /* 5239 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5296 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5297 * means we reserve 2 more, this pushes us to allocate from the next 5240 * means we reserve 2 more, this pushes us to allocate from the next
5298 * larger slab size. 5241 * larger slab size.
5299 * i.e. RXBUFFER_2048 --> size-4096 slab 5242 * i.e. RXBUFFER_2048 --> size-4096 slab
@@ -5555,8 +5498,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5555 if (adapter->hw.phy.type == e1000_phy_igp_3) 5498 if (adapter->hw.phy.type == e1000_phy_igp_3)
5556 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 5499 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5557 5500
5558 /* 5501 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5559 * Release control of h/w to f/w. If f/w is AMT enabled, this
5560 * would have already happened in close and is redundant. 5502 * would have already happened in close and is redundant.
5561 */ 5503 */
5562 e1000e_release_hw_control(adapter); 5504 e1000e_release_hw_control(adapter);
@@ -5583,8 +5525,7 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5583 struct net_device *netdev = pci_get_drvdata(pdev); 5525 struct net_device *netdev = pci_get_drvdata(pdev);
5584 struct e1000_adapter *adapter = netdev_priv(netdev); 5526 struct e1000_adapter *adapter = netdev_priv(netdev);
5585 5527
5586 /* 5528 /* The pci-e switch on some quad port adapters will report a
5587 * The pci-e switch on some quad port adapters will report a
5588 * correctable error when the MAC transitions from D0 to D3. To 5529 * correctable error when the MAC transitions from D0 to D3. To
5589 * prevent this we need to mask off the correctable errors on the 5530 * prevent this we need to mask off the correctable errors on the
5590 * downstream port of the pci-e switch. 5531 * downstream port of the pci-e switch.
@@ -5613,8 +5554,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5613#else 5554#else
5614static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 5555static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5615{ 5556{
5616 /* 5557 /* Both device and parent should have the same ASPM setting.
5617 * Both device and parent should have the same ASPM setting.
5618 * Disable ASPM in downstream component first and then upstream. 5558 * Disable ASPM in downstream component first and then upstream.
5619 */ 5559 */
5620 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state); 5560 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
@@ -5708,8 +5648,7 @@ static int __e1000_resume(struct pci_dev *pdev)
5708 5648
5709 netif_device_attach(netdev); 5649 netif_device_attach(netdev);
5710 5650
5711 /* 5651 /* If the controller has AMT, do not set DRV_LOAD until the interface
5712 * If the controller has AMT, do not set DRV_LOAD until the interface
5713 * is up. For all other cases, let the f/w know that the h/w is now 5652 * is up. For all other cases, let the f/w know that the h/w is now
5714 * under the control of the driver. 5653 * under the control of the driver.
5715 */ 5654 */
@@ -5837,7 +5776,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
5837 return IRQ_HANDLED; 5776 return IRQ_HANDLED;
5838} 5777}
5839 5778
5840/* 5779/**
5780 * e1000_netpoll
5781 * @netdev: network interface device structure
5782 *
5841 * Polling 'interrupt' - used by things like netconsole to send skbs 5783 * Polling 'interrupt' - used by things like netconsole to send skbs
5842 * without having to re-enable interrupts. It's not called while 5784 * without having to re-enable interrupts. It's not called while
5843 * the interrupt routine is executing. 5785 * the interrupt routine is executing.
@@ -5962,8 +5904,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
5962 5904
5963 netif_device_attach(netdev); 5905 netif_device_attach(netdev);
5964 5906
5965 /* 5907 /* If the controller has AMT, do not set DRV_LOAD until the interface
5966 * If the controller has AMT, do not set DRV_LOAD until the interface
5967 * is up. For all other cases, let the f/w know that the h/w is now 5908 * is up. For all other cases, let the f/w know that the h/w is now
5968 * under the control of the driver. 5909 * under the control of the driver.
5969 */ 5910 */
@@ -6083,8 +6024,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
6083 * The OS initialization, configuring of the adapter private structure, 6024 * The OS initialization, configuring of the adapter private structure,
6084 * and a hardware reset occur. 6025 * and a hardware reset occur.
6085 **/ 6026 **/
6086static int __devinit e1000_probe(struct pci_dev *pdev, 6027static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6087 const struct pci_device_id *ent)
6088{ 6028{
6089 struct net_device *netdev; 6029 struct net_device *netdev;
6090 struct e1000_adapter *adapter; 6030 struct e1000_adapter *adapter;
@@ -6262,14 +6202,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6262 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 6202 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6263 adapter->flags |= FLAG_MNG_PT_ENABLED; 6203 adapter->flags |= FLAG_MNG_PT_ENABLED;
6264 6204
6265 /* 6205 /* before reading the NVM, reset the controller to
6266 * before reading the NVM, reset the controller to
6267 * put the device in a known good starting state 6206 * put the device in a known good starting state
6268 */ 6207 */
6269 adapter->hw.mac.ops.reset_hw(&adapter->hw); 6208 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6270 6209
6271 /* 6210 /* systems with ASPM and others may see the checksum fail on the first
6272 * systems with ASPM and others may see the checksum fail on the first
6273 * attempt. Let's give it a few tries 6211 * attempt. Let's give it a few tries
6274 */ 6212 */
6275 for (i = 0;; i++) { 6213 for (i = 0;; i++) {
@@ -6324,8 +6262,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6324 adapter->rx_ring->count = E1000_DEFAULT_RXD; 6262 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6325 adapter->tx_ring->count = E1000_DEFAULT_TXD; 6263 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6326 6264
6327 /* 6265 /* Initial Wake on LAN setting - If APM wake is enabled in
6328 * Initial Wake on LAN setting - If APM wake is enabled in
6329 * the EEPROM, enable the ACPI Magic Packet filter 6266 * the EEPROM, enable the ACPI Magic Packet filter
6330 */ 6267 */
6331 if (adapter->flags & FLAG_APME_IN_WUC) { 6268 if (adapter->flags & FLAG_APME_IN_WUC) {
@@ -6349,8 +6286,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6349 if (eeprom_data & eeprom_apme_mask) 6286 if (eeprom_data & eeprom_apme_mask)
6350 adapter->eeprom_wol |= E1000_WUFC_MAG; 6287 adapter->eeprom_wol |= E1000_WUFC_MAG;
6351 6288
6352 /* 6289 /* now that we have the eeprom settings, apply the special cases
6353 * now that we have the eeprom settings, apply the special cases
6354 * where the eeprom may be wrong or the board simply won't support 6290 * where the eeprom may be wrong or the board simply won't support
6355 * wake on lan on a particular port 6291 * wake on lan on a particular port
6356 */ 6292 */
@@ -6367,8 +6303,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6367 /* reset the hardware with the new settings */ 6303 /* reset the hardware with the new settings */
6368 e1000e_reset(adapter); 6304 e1000e_reset(adapter);
6369 6305
6370 /* 6306 /* If the controller has AMT, do not set DRV_LOAD until the interface
6371 * If the controller has AMT, do not set DRV_LOAD until the interface
6372 * is up. For all other cases, let the f/w know that the h/w is now 6307 * is up. For all other cases, let the f/w know that the h/w is now
6373 * under the control of the driver. 6308 * under the control of the driver.
6374 */ 6309 */
@@ -6425,14 +6360,13 @@ err_dma:
6425 * Hot-Plug event, or because the driver is going to be removed from 6360 * Hot-Plug event, or because the driver is going to be removed from
6426 * memory. 6361 * memory.
6427 **/ 6362 **/
6428static void __devexit e1000_remove(struct pci_dev *pdev) 6363static void e1000_remove(struct pci_dev *pdev)
6429{ 6364{
6430 struct net_device *netdev = pci_get_drvdata(pdev); 6365 struct net_device *netdev = pci_get_drvdata(pdev);
6431 struct e1000_adapter *adapter = netdev_priv(netdev); 6366 struct e1000_adapter *adapter = netdev_priv(netdev);
6432 bool down = test_bit(__E1000_DOWN, &adapter->state); 6367 bool down = test_bit(__E1000_DOWN, &adapter->state);
6433 6368
6434 /* 6369 /* The timers may be rescheduled, so explicitly disable them
6435 * The timers may be rescheduled, so explicitly disable them
6436 * from being rescheduled. 6370 * from being rescheduled.
6437 */ 6371 */
6438 if (!down) 6372 if (!down)
@@ -6457,8 +6391,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
6457 if (pci_dev_run_wake(pdev)) 6391 if (pci_dev_run_wake(pdev))
6458 pm_runtime_get_noresume(&pdev->dev); 6392 pm_runtime_get_noresume(&pdev->dev);
6459 6393
6460 /* 6394 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6461 * Release control of h/w to f/w. If f/w is AMT enabled, this
6462 * would have already happened in close and is redundant. 6395 * would have already happened in close and is redundant.
6463 */ 6396 */
6464 e1000e_release_hw_control(adapter); 6397 e1000e_release_hw_control(adapter);
@@ -6578,7 +6511,7 @@ static struct pci_driver e1000_driver = {
6578 .name = e1000e_driver_name, 6511 .name = e1000e_driver_name,
6579 .id_table = e1000_pci_tbl, 6512 .id_table = e1000_pci_tbl,
6580 .probe = e1000_probe, 6513 .probe = e1000_probe,
6581 .remove = __devexit_p(e1000_remove), 6514 .remove = e1000_remove,
6582#ifdef CONFIG_PM 6515#ifdef CONFIG_PM
6583 .driver = { 6516 .driver = {
6584 .pm = &e1000_pm_ops, 6517 .pm = &e1000_pm_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a969f1af1b4e..b6468804cb2e 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -279,8 +279,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
279 e1e_flush(); 279 e1e_flush();
280 udelay(1); 280 udelay(1);
281 281
282 /* 282 /* Read "Status Register" repeatedly until the LSB is cleared.
283 * Read "Status Register" repeatedly until the LSB is cleared.
284 * The EEPROM will signal that the command has been completed 283 * The EEPROM will signal that the command has been completed
285 * by clearing bit 0 of the internal status register. If it's 284 * by clearing bit 0 of the internal status register. If it's
286 * not cleared within 'timeout', then error out. 285 * not cleared within 'timeout', then error out.
@@ -321,8 +320,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
321 u32 i, eerd = 0; 320 u32 i, eerd = 0;
322 s32 ret_val = 0; 321 s32 ret_val = 0;
323 322
324 /* 323 /* A check for invalid values: offset too large, too many words,
325 * A check for invalid values: offset too large, too many words,
326 * too many words for the offset, and not enough words. 324 * too many words for the offset, and not enough words.
327 */ 325 */
328 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 326 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -364,8 +362,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
364 s32 ret_val; 362 s32 ret_val;
365 u16 widx = 0; 363 u16 widx = 0;
366 364
367 /* 365 /* A check for invalid values: offset too large, too many words,
368 * A check for invalid values: offset too large, too many words,
369 * and not enough words. 366 * and not enough words.
370 */ 367 */
371 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 368 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -393,8 +390,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
393 390
394 e1000_standby_nvm(hw); 391 e1000_standby_nvm(hw);
395 392
396 /* 393 /* Some SPI eeproms use the 8th address bit embedded in the
397 * Some SPI eeproms use the 8th address bit embedded in the
398 * opcode 394 * opcode
399 */ 395 */
400 if ((nvm->address_bits == 8) && (offset >= 128)) 396 if ((nvm->address_bits == 8) && (offset >= 128))
@@ -461,8 +457,7 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
461 return ret_val; 457 return ret_val;
462 } 458 }
463 459
464 /* 460 /* if nvm_data is not ptr guard the PBA must be in legacy format which
465 * if nvm_data is not ptr guard the PBA must be in legacy format which
466 * means pba_ptr is actually our second data word for the PBA number 461 * means pba_ptr is actually our second data word for the PBA number
467 * and we can decode it into an ascii string 462 * and we can decode it into an ascii string
468 */ 463 */
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index dfbfa7fd98c3..89d536dd7ff5 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -32,11 +32,9 @@
32 32
33#include "e1000.h" 33#include "e1000.h"
34 34
35/* 35/* This is the only thing that needs to be changed to adjust the
36 * This is the only thing that needs to be changed to adjust the
37 * maximum number of ports that the driver can manage. 36 * maximum number of ports that the driver can manage.
38 */ 37 */
39
40#define E1000_MAX_NIC 32 38#define E1000_MAX_NIC 32
41 39
42#define OPTION_UNSET -1 40#define OPTION_UNSET -1
@@ -49,22 +47,19 @@ module_param(copybreak, uint, 0644);
49MODULE_PARM_DESC(copybreak, 47MODULE_PARM_DESC(copybreak,
50 "Maximum size of packet that is copied to a new buffer on receive"); 48 "Maximum size of packet that is copied to a new buffer on receive");
51 49
52/* 50/* All parameters are treated the same, as an integer array of values.
53 * All parameters are treated the same, as an integer array of values.
54 * This macro just reduces the need to repeat the same declaration code 51 * This macro just reduces the need to repeat the same declaration code
55 * over and over (plus this helps to avoid typo bugs). 52 * over and over (plus this helps to avoid typo bugs).
56 */ 53 */
57
58#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } 54#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
59#define E1000_PARAM(X, desc) \ 55#define E1000_PARAM(X, desc) \
60 static int __devinitdata X[E1000_MAX_NIC+1] \ 56 static int X[E1000_MAX_NIC+1] \
61 = E1000_PARAM_INIT; \ 57 = E1000_PARAM_INIT; \
62 static unsigned int num_##X; \ 58 static unsigned int num_##X; \
63 module_param_array_named(X, X, int, &num_##X, 0); \ 59 module_param_array_named(X, X, int, &num_##X, 0); \
64 MODULE_PARM_DESC(X, desc); 60 MODULE_PARM_DESC(X, desc);
65 61
66/* 62/* Transmit Interrupt Delay in units of 1.024 microseconds
67 * Transmit Interrupt Delay in units of 1.024 microseconds
68 * Tx interrupt delay needs to typically be set to something non-zero 63 * Tx interrupt delay needs to typically be set to something non-zero
69 * 64 *
70 * Valid Range: 0-65535 65 * Valid Range: 0-65535
@@ -74,8 +69,7 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
74#define MAX_TXDELAY 0xFFFF 69#define MAX_TXDELAY 0xFFFF
75#define MIN_TXDELAY 0 70#define MIN_TXDELAY 0
76 71
77/* 72/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
78 * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
79 * 73 *
80 * Valid Range: 0-65535 74 * Valid Range: 0-65535
81 */ 75 */
@@ -84,8 +78,7 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
84#define MAX_TXABSDELAY 0xFFFF 78#define MAX_TXABSDELAY 0xFFFF
85#define MIN_TXABSDELAY 0 79#define MIN_TXABSDELAY 0
86 80
87/* 81/* Receive Interrupt Delay in units of 1.024 microseconds
88 * Receive Interrupt Delay in units of 1.024 microseconds
89 * hardware will likely hang if you set this to anything but zero. 82 * hardware will likely hang if you set this to anything but zero.
90 * 83 *
91 * Valid Range: 0-65535 84 * Valid Range: 0-65535
@@ -94,8 +87,7 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
94#define MAX_RXDELAY 0xFFFF 87#define MAX_RXDELAY 0xFFFF
95#define MIN_RXDELAY 0 88#define MIN_RXDELAY 0
96 89
97/* 90/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
98 * Receive Absolute Interrupt Delay in units of 1.024 microseconds
99 * 91 *
100 * Valid Range: 0-65535 92 * Valid Range: 0-65535
101 */ 93 */
@@ -103,8 +95,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
103#define MAX_RXABSDELAY 0xFFFF 95#define MAX_RXABSDELAY 0xFFFF
104#define MIN_RXABSDELAY 0 96#define MIN_RXABSDELAY 0
105 97
106/* 98/* Interrupt Throttle Rate (interrupts/sec)
107 * Interrupt Throttle Rate (interrupts/sec)
108 * 99 *
109 * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative 100 * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
110 */ 101 */
@@ -113,8 +104,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
113#define MAX_ITR 100000 104#define MAX_ITR 100000
114#define MIN_ITR 100 105#define MIN_ITR 100
115 106
116/* 107/* IntMode (Interrupt Mode)
117 * IntMode (Interrupt Mode)
118 * 108 *
119 * Valid Range: varies depending on kernel configuration & hardware support 109 * Valid Range: varies depending on kernel configuration & hardware support
120 * 110 *
@@ -132,8 +122,7 @@ E1000_PARAM(IntMode, "Interrupt Mode");
132#define MAX_INTMODE 2 122#define MAX_INTMODE 2
133#define MIN_INTMODE 0 123#define MIN_INTMODE 0
134 124
135/* 125/* Enable Smart Power Down of the PHY
136 * Enable Smart Power Down of the PHY
137 * 126 *
138 * Valid Range: 0, 1 127 * Valid Range: 0, 1
139 * 128 *
@@ -141,8 +130,7 @@ E1000_PARAM(IntMode, "Interrupt Mode");
141 */ 130 */
142E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); 131E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
143 132
144/* 133/* Enable Kumeran Lock Loss workaround
145 * Enable Kumeran Lock Loss workaround
146 * 134 *
147 * Valid Range: 0, 1 135 * Valid Range: 0, 1
148 * 136 *
@@ -150,8 +138,7 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
150 */ 138 */
151E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); 139E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
152 140
153/* 141/* Write Protect NVM
154 * Write Protect NVM
155 * 142 *
156 * Valid Range: 0, 1 143 * Valid Range: 0, 1
157 * 144 *
@@ -159,8 +146,7 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
159 */ 146 */
160E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); 147E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
161 148
162/* 149/* Enable CRC Stripping
163 * Enable CRC Stripping
164 * 150 *
165 * Valid Range: 0, 1 151 * Valid Range: 0, 1
166 * 152 *
@@ -186,9 +172,9 @@ struct e1000_option {
186 } arg; 172 } arg;
187}; 173};
188 174
189static int __devinit e1000_validate_option(unsigned int *value, 175static int e1000_validate_option(unsigned int *value,
190 const struct e1000_option *opt, 176 const struct e1000_option *opt,
191 struct e1000_adapter *adapter) 177 struct e1000_adapter *adapter)
192{ 178{
193 if (*value == OPTION_UNSET) { 179 if (*value == OPTION_UNSET) {
194 *value = opt->def; 180 *value = opt->def;
@@ -249,7 +235,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
249 * value exists, a default value is used. The final value is stored 235 * value exists, a default value is used. The final value is stored
250 * in a variable in the adapter structure. 236 * in a variable in the adapter structure.
251 **/ 237 **/
252void __devinit e1000e_check_options(struct e1000_adapter *adapter) 238void e1000e_check_options(struct e1000_adapter *adapter)
253{ 239{
254 struct e1000_hw *hw = &adapter->hw; 240 struct e1000_hw *hw = &adapter->hw;
255 int bd = adapter->bd_number; 241 int bd = adapter->bd_number;
@@ -351,8 +337,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
351 if (num_InterruptThrottleRate > bd) { 337 if (num_InterruptThrottleRate > bd) {
352 adapter->itr = InterruptThrottleRate[bd]; 338 adapter->itr = InterruptThrottleRate[bd];
353 339
354 /* 340 /* Make sure a message is printed for non-special
355 * Make sure a message is printed for non-special
356 * values. And in case of an invalid option, display 341 * values. And in case of an invalid option, display
357 * warning, use default and go through itr/itr_setting 342 * warning, use default and go through itr/itr_setting
358 * adjustment logic below 343 * adjustment logic below
@@ -361,14 +346,12 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
361 e1000_validate_option(&adapter->itr, &opt, adapter)) 346 e1000_validate_option(&adapter->itr, &opt, adapter))
362 adapter->itr = opt.def; 347 adapter->itr = opt.def;
363 } else { 348 } else {
364 /* 349 /* If no option specified, use default value and go
365 * If no option specified, use default value and go
366 * through the logic below to adjust itr/itr_setting 350 * through the logic below to adjust itr/itr_setting
367 */ 351 */
368 adapter->itr = opt.def; 352 adapter->itr = opt.def;
369 353
370 /* 354 /* Make sure a message is printed for non-special
371 * Make sure a message is printed for non-special
372 * default values 355 * default values
373 */ 356 */
374 if (adapter->itr > 4) 357 if (adapter->itr > 4)
@@ -400,8 +383,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
400 opt.name); 383 opt.name);
401 break; 384 break;
402 default: 385 default:
403 /* 386 /* Save the setting, because the dynamic bits
404 * Save the setting, because the dynamic bits
405 * change itr. 387 * change itr.
406 * 388 *
407 * Clear the lower two bits because 389 * Clear the lower two bits because
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index fc62a3f3a5be..28b38ff37e84 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -193,8 +193,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
193 return -E1000_ERR_PARAM; 193 return -E1000_ERR_PARAM;
194 } 194 }
195 195
196 /* 196 /* Set up Op-code, Phy Address, and register offset in the MDI
197 * Set up Op-code, Phy Address, and register offset in the MDI
198 * Control register. The MAC will take care of interfacing with the 197 * Control register. The MAC will take care of interfacing with the
199 * PHY to retrieve the desired data. 198 * PHY to retrieve the desired data.
200 */ 199 */
@@ -204,8 +203,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
204 203
205 ew32(MDIC, mdic); 204 ew32(MDIC, mdic);
206 205
207 /* 206 /* Poll the ready bit to see if the MDI read completed
208 * Poll the ready bit to see if the MDI read completed
209 * Increasing the time out as testing showed failures with 207 * Increasing the time out as testing showed failures with
210 * the lower time out 208 * the lower time out
211 */ 209 */
@@ -225,8 +223,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
225 } 223 }
226 *data = (u16) mdic; 224 *data = (u16) mdic;
227 225
228 /* 226 /* Allow some time after each MDIC transaction to avoid
229 * Allow some time after each MDIC transaction to avoid
230 * reading duplicate data in the next MDIC transaction. 227 * reading duplicate data in the next MDIC transaction.
231 */ 228 */
232 if (hw->mac.type == e1000_pch2lan) 229 if (hw->mac.type == e1000_pch2lan)
@@ -253,8 +250,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
253 return -E1000_ERR_PARAM; 250 return -E1000_ERR_PARAM;
254 } 251 }
255 252
256 /* 253 /* Set up Op-code, Phy Address, and register offset in the MDI
257 * Set up Op-code, Phy Address, and register offset in the MDI
258 * Control register. The MAC will take care of interfacing with the 254 * Control register. The MAC will take care of interfacing with the
259 * PHY to retrieve the desired data. 255 * PHY to retrieve the desired data.
260 */ 256 */
@@ -265,8 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
265 261
266 ew32(MDIC, mdic); 262 ew32(MDIC, mdic);
267 263
268 /* 264 /* Poll the ready bit to see if the MDI read completed
269 * Poll the ready bit to see if the MDI read completed
270 * Increasing the time out as testing showed failures with 265 * Increasing the time out as testing showed failures with
271 * the lower time out 266 * the lower time out
272 */ 267 */
@@ -285,8 +280,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
285 return -E1000_ERR_PHY; 280 return -E1000_ERR_PHY;
286 } 281 }
287 282
288 /* 283 /* Allow some time after each MDIC transaction to avoid
289 * Allow some time after each MDIC transaction to avoid
290 * reading duplicate data in the next MDIC transaction. 284 * reading duplicate data in the next MDIC transaction.
291 */ 285 */
292 if (hw->mac.type == e1000_pch2lan) 286 if (hw->mac.type == e1000_pch2lan)
@@ -708,8 +702,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
708 if (ret_val) 702 if (ret_val)
709 return ret_val; 703 return ret_val;
710 phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; 704 phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
711 /* 705 /* Options:
712 * Options:
713 * 0 - Auto (default) 706 * 0 - Auto (default)
714 * 1 - MDI mode 707 * 1 - MDI mode
715 * 2 - MDI-X mode 708 * 2 - MDI-X mode
@@ -754,8 +747,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
754 if (phy->type != e1000_phy_bm) 747 if (phy->type != e1000_phy_bm)
755 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 748 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
756 749
757 /* 750 /* Options:
758 * Options:
759 * MDI/MDI-X = 0 (default) 751 * MDI/MDI-X = 0 (default)
760 * 0 - Auto for all speeds 752 * 0 - Auto for all speeds
761 * 1 - MDI mode 753 * 1 - MDI mode
@@ -780,8 +772,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
780 break; 772 break;
781 } 773 }
782 774
783 /* 775 /* Options:
784 * Options:
785 * disable_polarity_correction = 0 (default) 776 * disable_polarity_correction = 0 (default)
786 * Automatic Correction for Reversed Cable Polarity 777 * Automatic Correction for Reversed Cable Polarity
787 * 0 - Disabled 778 * 0 - Disabled
@@ -818,8 +809,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
818 if ((phy->type == e1000_phy_m88) && 809 if ((phy->type == e1000_phy_m88) &&
819 (phy->revision < E1000_REVISION_4) && 810 (phy->revision < E1000_REVISION_4) &&
820 (phy->id != BME1000_E_PHY_ID_R2)) { 811 (phy->id != BME1000_E_PHY_ID_R2)) {
821 /* 812 /* Force TX_CLK in the Extended PHY Specific Control Register
822 * Force TX_CLK in the Extended PHY Specific Control Register
823 * to 25MHz clock. 813 * to 25MHz clock.
824 */ 814 */
825 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 815 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@@ -899,8 +889,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
899 return ret_val; 889 return ret_val;
900 } 890 }
901 891
902 /* 892 /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
903 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
904 * timeout issues when LFS is enabled. 893 * timeout issues when LFS is enabled.
905 */ 894 */
906 msleep(100); 895 msleep(100);
@@ -936,8 +925,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
936 925
937 /* set auto-master slave resolution settings */ 926 /* set auto-master slave resolution settings */
938 if (hw->mac.autoneg) { 927 if (hw->mac.autoneg) {
939 /* 928 /* when autonegotiation advertisement is only 1000Mbps then we
940 * when autonegotiation advertisement is only 1000Mbps then we
941 * should disable SmartSpeed and enable Auto MasterSlave 929 * should disable SmartSpeed and enable Auto MasterSlave
942 * resolution as hardware default. 930 * resolution as hardware default.
943 */ 931 */
@@ -1001,16 +989,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1001 return ret_val; 989 return ret_val;
1002 } 990 }
1003 991
1004 /* 992 /* Need to parse both autoneg_advertised and fc and set up
1005 * Need to parse both autoneg_advertised and fc and set up
1006 * the appropriate PHY registers. First we will parse for 993 * the appropriate PHY registers. First we will parse for
1007 * autoneg_advertised software override. Since we can advertise 994 * autoneg_advertised software override. Since we can advertise
1008 * a plethora of combinations, we need to check each bit 995 * a plethora of combinations, we need to check each bit
1009 * individually. 996 * individually.
1010 */ 997 */
1011 998
1012 /* 999 /* First we clear all the 10/100 mb speed bits in the Auto-Neg
1013 * First we clear all the 10/100 mb speed bits in the Auto-Neg
1014 * Advertisement Register (Address 4) and the 1000 mb speed bits in 1000 * Advertisement Register (Address 4) and the 1000 mb speed bits in
1015 * the 1000Base-T Control Register (Address 9). 1001 * the 1000Base-T Control Register (Address 9).
1016 */ 1002 */
@@ -1056,8 +1042,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1056 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 1042 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
1057 } 1043 }
1058 1044
1059 /* 1045 /* Check for a software override of the flow control settings, and
1060 * Check for a software override of the flow control settings, and
1061 * setup the PHY advertisement registers accordingly. If 1046 * setup the PHY advertisement registers accordingly. If
1062 * auto-negotiation is enabled, then software will have to set the 1047 * auto-negotiation is enabled, then software will have to set the
1063 * "PAUSE" bits to the correct value in the Auto-Negotiation 1048 * "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -1076,15 +1061,13 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1076 */ 1061 */
1077 switch (hw->fc.current_mode) { 1062 switch (hw->fc.current_mode) {
1078 case e1000_fc_none: 1063 case e1000_fc_none:
1079 /* 1064 /* Flow control (Rx & Tx) is completely disabled by a
1080 * Flow control (Rx & Tx) is completely disabled by a
1081 * software over-ride. 1065 * software over-ride.
1082 */ 1066 */
1083 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1067 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1084 break; 1068 break;
1085 case e1000_fc_rx_pause: 1069 case e1000_fc_rx_pause:
1086 /* 1070 /* Rx Flow control is enabled, and Tx Flow control is
1087 * Rx Flow control is enabled, and Tx Flow control is
1088 * disabled, by a software over-ride. 1071 * disabled, by a software over-ride.
1089 * 1072 *
1090 * Since there really isn't a way to advertise that we are 1073 * Since there really isn't a way to advertise that we are
@@ -1096,16 +1079,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1096 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1079 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1097 break; 1080 break;
1098 case e1000_fc_tx_pause: 1081 case e1000_fc_tx_pause:
1099 /* 1082 /* Tx Flow control is enabled, and Rx Flow control is
1100 * Tx Flow control is enabled, and Rx Flow control is
1101 * disabled, by a software over-ride. 1083 * disabled, by a software over-ride.
1102 */ 1084 */
1103 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; 1085 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
1104 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; 1086 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
1105 break; 1087 break;
1106 case e1000_fc_full: 1088 case e1000_fc_full:
1107 /* 1089 /* Flow control (both Rx and Tx) is enabled by a software
1108 * Flow control (both Rx and Tx) is enabled by a software
1109 * over-ride. 1090 * over-ride.
1110 */ 1091 */
1111 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1092 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -1142,14 +1123,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1142 s32 ret_val; 1123 s32 ret_val;
1143 u16 phy_ctrl; 1124 u16 phy_ctrl;
1144 1125
1145 /* 1126 /* Perform some bounds checking on the autoneg advertisement
1146 * Perform some bounds checking on the autoneg advertisement
1147 * parameter. 1127 * parameter.
1148 */ 1128 */
1149 phy->autoneg_advertised &= phy->autoneg_mask; 1129 phy->autoneg_advertised &= phy->autoneg_mask;
1150 1130
1151 /* 1131 /* If autoneg_advertised is zero, we assume it was not defaulted
1152 * If autoneg_advertised is zero, we assume it was not defaulted
1153 * by the calling code so we set to advertise full capability. 1132 * by the calling code so we set to advertise full capability.
1154 */ 1133 */
1155 if (!phy->autoneg_advertised) 1134 if (!phy->autoneg_advertised)
@@ -1163,8 +1142,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1163 } 1142 }
1164 e_dbg("Restarting Auto-Neg\n"); 1143 e_dbg("Restarting Auto-Neg\n");
1165 1144
1166 /* 1145 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
1167 * Restart auto-negotiation by setting the Auto Neg Enable bit and
1168 * the Auto Neg Restart bit in the PHY control register. 1146 * the Auto Neg Restart bit in the PHY control register.
1169 */ 1147 */
1170 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); 1148 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
@@ -1176,8 +1154,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1176 if (ret_val) 1154 if (ret_val)
1177 return ret_val; 1155 return ret_val;
1178 1156
1179 /* 1157 /* Does the user want to wait for Auto-Neg to complete here, or
1180 * Does the user want to wait for Auto-Neg to complete here, or
1181 * check at a later time (for example, callback routine). 1158 * check at a later time (for example, callback routine).
1182 */ 1159 */
1183 if (phy->autoneg_wait_to_complete) { 1160 if (phy->autoneg_wait_to_complete) {
@@ -1208,16 +1185,14 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
1208 bool link; 1185 bool link;
1209 1186
1210 if (hw->mac.autoneg) { 1187 if (hw->mac.autoneg) {
1211 /* 1188 /* Setup autoneg and flow control advertisement and perform
1212 * Setup autoneg and flow control advertisement and perform
1213 * autonegotiation. 1189 * autonegotiation.
1214 */ 1190 */
1215 ret_val = e1000_copper_link_autoneg(hw); 1191 ret_val = e1000_copper_link_autoneg(hw);
1216 if (ret_val) 1192 if (ret_val)
1217 return ret_val; 1193 return ret_val;
1218 } else { 1194 } else {
1219 /* 1195 /* PHY will be set to 10H, 10F, 100H or 100F
1220 * PHY will be set to 10H, 10F, 100H or 100F
1221 * depending on user settings. 1196 * depending on user settings.
1222 */ 1197 */
1223 e_dbg("Forcing Speed and Duplex\n"); 1198 e_dbg("Forcing Speed and Duplex\n");
@@ -1228,8 +1203,7 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
1228 } 1203 }
1229 } 1204 }
1230 1205
1231 /* 1206 /* Check link status. Wait up to 100 microseconds for link to become
1232 * Check link status. Wait up to 100 microseconds for link to become
1233 * valid. 1207 * valid.
1234 */ 1208 */
1235 ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, 1209 ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
@@ -1273,8 +1247,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1273 if (ret_val) 1247 if (ret_val)
1274 return ret_val; 1248 return ret_val;
1275 1249
1276 /* 1250 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
1277 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
1278 * forced whenever speed and duplex are forced. 1251 * forced whenever speed and duplex are forced.
1279 */ 1252 */
1280 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 1253 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -1328,8 +1301,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1328 u16 phy_data; 1301 u16 phy_data;
1329 bool link; 1302 bool link;
1330 1303
1331 /* 1304 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
1332 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
1333 * forced whenever speed and duplex are forced. 1305 * forced whenever speed and duplex are forced.
1334 */ 1306 */
1335 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1307 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1370,8 +1342,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1370 if (hw->phy.type != e1000_phy_m88) { 1342 if (hw->phy.type != e1000_phy_m88) {
1371 e_dbg("Link taking longer than expected.\n"); 1343 e_dbg("Link taking longer than expected.\n");
1372 } else { 1344 } else {
1373 /* 1345 /* We didn't get link.
1374 * We didn't get link.
1375 * Reset the DSP and cross our fingers. 1346 * Reset the DSP and cross our fingers.
1376 */ 1347 */
1377 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 1348 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
@@ -1398,8 +1369,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1398 if (ret_val) 1369 if (ret_val)
1399 return ret_val; 1370 return ret_val;
1400 1371
1401 /* 1372 /* Resetting the phy means we need to re-force TX_CLK in the
1402 * Resetting the phy means we need to re-force TX_CLK in the
1403 * Extended PHY Specific Control Register to 25MHz clock from 1373 * Extended PHY Specific Control Register to 25MHz clock from
1404 * the reset value of 2.5MHz. 1374 * the reset value of 2.5MHz.
1405 */ 1375 */
@@ -1408,8 +1378,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1408 if (ret_val) 1378 if (ret_val)
1409 return ret_val; 1379 return ret_val;
1410 1380
1411 /* 1381 /* In addition, we must re-enable CRS on Tx for both half and full
1412 * In addition, we must re-enable CRS on Tx for both half and full
1413 * duplex. 1382 * duplex.
1414 */ 1383 */
1415 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1384 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1573,8 +1542,7 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1573 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); 1542 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
1574 if (ret_val) 1543 if (ret_val)
1575 return ret_val; 1544 return ret_val;
1576 /* 1545 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1577 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1578 * during Dx states where the power conservation is most 1546 * during Dx states where the power conservation is most
1579 * important. During driver activity we should enable 1547 * important. During driver activity we should enable
1580 * SmartSpeed, so performance is maintained. 1548 * SmartSpeed, so performance is maintained.
@@ -1702,8 +1670,7 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1702 s32 ret_val; 1670 s32 ret_val;
1703 u16 data, offset, mask; 1671 u16 data, offset, mask;
1704 1672
1705 /* 1673 /* Polarity is determined based on the speed of
1706 * Polarity is determined based on the speed of
1707 * our connection. 1674 * our connection.
1708 */ 1675 */
1709 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); 1676 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
@@ -1715,8 +1682,7 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1715 offset = IGP01E1000_PHY_PCS_INIT_REG; 1682 offset = IGP01E1000_PHY_PCS_INIT_REG;
1716 mask = IGP01E1000_PHY_POLARITY_MASK; 1683 mask = IGP01E1000_PHY_POLARITY_MASK;
1717 } else { 1684 } else {
1718 /* 1685 /* This really only applies to 10Mbps since
1719 * This really only applies to 10Mbps since
1720 * there is no polarity for 100Mbps (always 0). 1686 * there is no polarity for 100Mbps (always 0).
1721 */ 1687 */
1722 offset = IGP01E1000_PHY_PORT_STATUS; 1688 offset = IGP01E1000_PHY_PORT_STATUS;
@@ -1745,8 +1711,7 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
1745 s32 ret_val; 1711 s32 ret_val;
1746 u16 phy_data, offset, mask; 1712 u16 phy_data, offset, mask;
1747 1713
1748 /* 1714 /* Polarity is determined based on the reversal feature being enabled.
1749 * Polarity is determined based on the reversal feature being enabled.
1750 */ 1715 */
1751 if (phy->polarity_correction) { 1716 if (phy->polarity_correction) {
1752 offset = IFE_PHY_EXTENDED_STATUS_CONTROL; 1717 offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
@@ -1791,8 +1756,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
1791 msleep(100); 1756 msleep(100);
1792 } 1757 }
1793 1758
1794 /* 1759 /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1795 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1796 * has completed. 1760 * has completed.
1797 */ 1761 */
1798 return ret_val; 1762 return ret_val;
@@ -1814,15 +1778,13 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1814 u16 i, phy_status; 1778 u16 i, phy_status;
1815 1779
1816 for (i = 0; i < iterations; i++) { 1780 for (i = 0; i < iterations; i++) {
1817 /* 1781 /* Some PHYs require the PHY_STATUS register to be read
1818 * Some PHYs require the PHY_STATUS register to be read
1819 * twice due to the link bit being sticky. No harm doing 1782 * twice due to the link bit being sticky. No harm doing
1820 * it across the board. 1783 * it across the board.
1821 */ 1784 */
1822 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status); 1785 ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
1823 if (ret_val) 1786 if (ret_val)
1824 /* 1787 /* If the first read fails, another entity may have
1825 * If the first read fails, another entity may have
1826 * ownership of the resources, wait and try again to 1788 * ownership of the resources, wait and try again to
1827 * see if they have relinquished the resources yet. 1789 * see if they have relinquished the resources yet.
1828 */ 1790 */
@@ -1913,8 +1875,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1913 if (ret_val) 1875 if (ret_val)
1914 return ret_val; 1876 return ret_val;
1915 1877
1916 /* 1878 /* Getting bits 15:9, which represent the combination of
1917 * Getting bits 15:9, which represent the combination of
1918 * coarse and fine gain values. The result is a number 1879 * coarse and fine gain values. The result is a number
1919 * that can be put into the lookup table to obtain the 1880 * that can be put into the lookup table to obtain the
1920 * approximate cable length. 1881 * approximate cable length.
@@ -2285,15 +2246,13 @@ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
2285 e1e_wphy(hw, 0x1796, 0x0008); 2246 e1e_wphy(hw, 0x1796, 0x0008);
2286 /* Change cg_icount + enable integbp for channels BCD */ 2247 /* Change cg_icount + enable integbp for channels BCD */
2287 e1e_wphy(hw, 0x1798, 0xD008); 2248 e1e_wphy(hw, 0x1798, 0xD008);
2288 /* 2249 /* Change cg_icount + enable integbp + change prop_factor_master
2289 * Change cg_icount + enable integbp + change prop_factor_master
2290 * to 8 for channel A 2250 * to 8 for channel A
2291 */ 2251 */
2292 e1e_wphy(hw, 0x1898, 0xD918); 2252 e1e_wphy(hw, 0x1898, 0xD918);
2293 /* Disable AHT in Slave mode on channel A */ 2253 /* Disable AHT in Slave mode on channel A */
2294 e1e_wphy(hw, 0x187A, 0x0800); 2254 e1e_wphy(hw, 0x187A, 0x0800);
2295 /* 2255 /* Enable LPLU and disable AN to 1000 in non-D0a states,
2296 * Enable LPLU and disable AN to 1000 in non-D0a states,
2297 * Enable SPD+B2B 2256 * Enable SPD+B2B
2298 */ 2257 */
2299 e1e_wphy(hw, 0x0019, 0x008D); 2258 e1e_wphy(hw, 0x0019, 0x008D);
@@ -2417,8 +2376,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
2417 e1000e_get_phy_id(hw); 2376 e1000e_get_phy_id(hw);
2418 phy_type = e1000e_get_phy_type_from_id(hw->phy.id); 2377 phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
2419 2378
2420 /* 2379 /* If phy_type is valid, break - we found our
2421 * If phy_type is valid, break - we found our
2422 * PHY address 2380 * PHY address
2423 */ 2381 */
2424 if (phy_type != e1000_phy_unknown) 2382 if (phy_type != e1000_phy_unknown)
@@ -2478,8 +2436,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2478 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2436 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2479 u32 page_shift, page_select; 2437 u32 page_shift, page_select;
2480 2438
2481 /* 2439 /* Page select is register 31 for phy address 1 and 22 for
2482 * Page select is register 31 for phy address 1 and 22 for
2483 * phy address 2 and 3. Page select is shifted only for 2440 * phy address 2 and 3. Page select is shifted only for
2484 * phy address 1. 2441 * phy address 1.
2485 */ 2442 */
@@ -2537,8 +2494,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2537 if (offset > MAX_PHY_MULTI_PAGE_REG) { 2494 if (offset > MAX_PHY_MULTI_PAGE_REG) {
2538 u32 page_shift, page_select; 2495 u32 page_shift, page_select;
2539 2496
2540 /* 2497 /* Page select is register 31 for phy address 1 and 22 for
2541 * Page select is register 31 for phy address 1 and 22 for
2542 * phy address 2 and 3. Page select is shifted only for 2498 * phy address 2 and 3. Page select is shifted only for
2543 * phy address 1. 2499 * phy address 1.
2544 */ 2500 */
@@ -2683,8 +2639,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
2683 return ret_val; 2639 return ret_val;
2684 } 2640 }
2685 2641
2686 /* 2642 /* Enable both PHY wakeup mode and Wakeup register page writes.
2687 * Enable both PHY wakeup mode and Wakeup register page writes.
2688 * Prevent a power state change by disabling ME and Host PHY wakeup. 2643 * Prevent a power state change by disabling ME and Host PHY wakeup.
2689 */ 2644 */
2690 temp = *phy_reg; 2645 temp = *phy_reg;
@@ -2698,8 +2653,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
2698 return ret_val; 2653 return ret_val;
2699 } 2654 }
2700 2655
2701 /* 2656 /* Select Host Wakeup Registers page - caller now able to write
2702 * Select Host Wakeup Registers page - caller now able to write
2703 * registers on the Wakeup registers page 2657 * registers on the Wakeup registers page
2704 */ 2658 */
2705 return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); 2659 return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
@@ -3038,8 +2992,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
3038 if (page == HV_INTC_FC_PAGE_START) 2992 if (page == HV_INTC_FC_PAGE_START)
3039 page = 0; 2993 page = 0;
3040 2994
3041 /* 2995 /* Workaround MDIO accesses being disabled after entering IEEE
3042 * Workaround MDIO accesses being disabled after entering IEEE
3043 * Power Down (when bit 11 of the PHY Control register is set) 2996 * Power Down (when bit 11 of the PHY Control register is set)
3044 */ 2997 */
3045 if ((hw->phy.type == e1000_phy_82578) && 2998 if ((hw->phy.type == e1000_phy_82578) &&
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 97c197fd4a8e..624476cfa727 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -34,6 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
34 34
35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ 35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ 36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
37 e1000_i210.o 37 e1000_i210.o igb_ptp.o
38
39igb-$(CONFIG_IGB_PTP) += igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ca4641e2f748..fdaaf2709d0a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -319,6 +319,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
319 nvm->ops.acquire = igb_acquire_nvm_i210; 319 nvm->ops.acquire = igb_acquire_nvm_i210;
320 nvm->ops.release = igb_release_nvm_i210; 320 nvm->ops.release = igb_release_nvm_i210;
321 nvm->ops.read = igb_read_nvm_srrd_i210; 321 nvm->ops.read = igb_read_nvm_srrd_i210;
322 nvm->ops.write = igb_write_nvm_srwr_i210;
322 nvm->ops.valid_led_default = igb_valid_led_default_i210; 323 nvm->ops.valid_led_default = igb_valid_led_default_i210;
323 break; 324 break;
324 case e1000_i211: 325 case e1000_i211:
@@ -1027,6 +1028,15 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1027 * continue to check for link. 1028 * continue to check for link.
1028 */ 1029 */
1029 hw->mac.get_link_status = !hw->mac.serdes_has_link; 1030 hw->mac.get_link_status = !hw->mac.serdes_has_link;
1031
1032 /* Configure Flow Control now that Auto-Neg has completed.
1033 * First, we need to restore the desired flow control
1034 * settings because we may have had to re-autoneg with a
1035 * different link partner.
1036 */
1037 ret_val = igb_config_fc_after_link_up(hw);
1038 if (ret_val)
1039 hw_dbg("Error configuring flow control\n");
1030 } else { 1040 } else {
1031 ret_val = igb_check_for_copper_link(hw); 1041 ret_val = igb_check_for_copper_link(hw);
1032 } 1042 }
@@ -1277,12 +1287,20 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1277{ 1287{
1278 u32 ctrl; 1288 u32 ctrl;
1279 s32 ret_val; 1289 s32 ret_val;
1290 u32 phpm_reg;
1280 1291
1281 ctrl = rd32(E1000_CTRL); 1292 ctrl = rd32(E1000_CTRL);
1282 ctrl |= E1000_CTRL_SLU; 1293 ctrl |= E1000_CTRL_SLU;
1283 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1294 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1284 wr32(E1000_CTRL, ctrl); 1295 wr32(E1000_CTRL, ctrl);
1285 1296
1297 /* Clear Go Link Disconnect bit */
1298 if (hw->mac.type >= e1000_82580) {
1299 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1300 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1301 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1302 }
1303
1286 ret_val = igb_setup_serdes_link_82575(hw); 1304 ret_val = igb_setup_serdes_link_82575(hw);
1287 if (ret_val) 1305 if (ret_val)
1288 goto out; 1306 goto out;
@@ -1336,7 +1354,7 @@ out:
1336 **/ 1354 **/
1337static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1355static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1338{ 1356{
1339 u32 ctrl_ext, ctrl_reg, reg; 1357 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1340 bool pcs_autoneg; 1358 bool pcs_autoneg;
1341 s32 ret_val = E1000_SUCCESS; 1359 s32 ret_val = E1000_SUCCESS;
1342 u16 data; 1360 u16 data;
@@ -1424,27 +1442,45 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1424 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1442 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1425 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1443 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1426 1444
1427 /*
1428 * We force flow control to prevent the CTRL register values from being
1429 * overwritten by the autonegotiated flow control values
1430 */
1431 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1432
1433 if (pcs_autoneg) { 1445 if (pcs_autoneg) {
1434 /* Set PCS register for autoneg */ 1446 /* Set PCS register for autoneg */
1435 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1447 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1436 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1448 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1449
1450 /* Disable force flow control for autoneg */
1451 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1452
1453 /* Configure flow control advertisement for autoneg */
1454 anadv_reg = rd32(E1000_PCS_ANADV);
1455 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1456 switch (hw->fc.requested_mode) {
1457 case e1000_fc_full:
1458 case e1000_fc_rx_pause:
1459 anadv_reg |= E1000_TXCW_ASM_DIR;
1460 anadv_reg |= E1000_TXCW_PAUSE;
1461 break;
1462 case e1000_fc_tx_pause:
1463 anadv_reg |= E1000_TXCW_ASM_DIR;
1464 break;
1465 default:
1466 break;
1467 }
1468 wr32(E1000_PCS_ANADV, anadv_reg);
1469
1437 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1470 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1438 } else { 1471 } else {
1439 /* Set PCS register for forced link */ 1472 /* Set PCS register for forced link */
1440 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1473 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1441 1474
1475 /* Force flow control for forced link */
1476 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1477
1442 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1478 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1443 } 1479 }
1444 1480
1445 wr32(E1000_PCS_LCTL, reg); 1481 wr32(E1000_PCS_LCTL, reg);
1446 1482
1447 if (!igb_sgmii_active_82575(hw)) 1483 if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
1448 igb_force_mac_fc(hw); 1484 igb_force_mac_fc(hw);
1449 1485
1450 return ret_val; 1486 return ret_val;
@@ -1918,6 +1954,12 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1918 1954
1919 hw->dev_spec._82575.global_device_reset = false; 1955 hw->dev_spec._82575.global_device_reset = false;
1920 1956
1957 /* due to hw errata, global device reset doesn't always
1958 * work on 82580
1959 */
1960 if (hw->mac.type == e1000_82580)
1961 global_device_reset = false;
1962
1921 /* Get current control state. */ 1963 /* Get current control state. */
1922 ctrl = rd32(E1000_CTRL); 1964 ctrl = rd32(E1000_CTRL);
1923 1965
@@ -2233,19 +2275,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
2233 2275
2234 /* enable or disable per user setting */ 2276 /* enable or disable per user setting */
2235 if (!(hw->dev_spec._82575.eee_disable)) { 2277 if (!(hw->dev_spec._82575.eee_disable)) {
2236 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | 2278 u32 eee_su = rd32(E1000_EEE_SU);
2237 E1000_IPCNFG_EEE_100M_AN); 2279
2238 eeer |= (E1000_EEER_TX_LPI_EN | 2280 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2239 E1000_EEER_RX_LPI_EN | 2281 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2240 E1000_EEER_LPI_FC); 2282 E1000_EEER_LPI_FC);
2241 2283
2242 /* keep the LPI clock running before EEE is enabled */ 2284 /* This bit should not be set in normal operation. */
2243 if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { 2285 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2244 u32 eee_su; 2286 hw_dbg("LPI Clock Stop Bit should not be set!\n");
2245 eee_su = rd32(E1000_EEE_SU); 2287
2246 eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
2247 wr32(E1000_EEE_SU, eee_su);
2248 }
2249 2288
2250 } else { 2289 } else {
2251 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | 2290 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e85c453f5428..44b76b3b6816 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
172#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ 172#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
173#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ 173#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
174#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ 174#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
175#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
175 176
176#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 177#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
177#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 178#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
179#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
178#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 180#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
181#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
179 182
180/* Additional DCA related definitions, note change in position of CPUID */ 183/* Additional DCA related definitions, note change in position of CPUID */
181#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ 184#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index de4b41ec3c40..45dce06eff26 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -431,6 +431,10 @@
431#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 431#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
432#define FLOW_CONTROL_TYPE 0x8808 432#define FLOW_CONTROL_TYPE 0x8808
433 433
434/* Transmit Config Word */
435#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
436#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
437
434/* 802.1q VLAN Packet Size */ 438/* 802.1q VLAN Packet Size */
435#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ 439#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
436#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 440#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
@@ -539,6 +543,9 @@
539/* mPHY Near End Digital Loopback Override Bit */ 543/* mPHY Near End Digital Loopback Override Bit */
540#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 544#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
541 545
546#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
547#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
548
542/* PHY Control Register */ 549/* PHY Control Register */
543#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ 550#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
544#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ 551#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
@@ -636,6 +643,7 @@
636/* NVM Word Offsets */ 643/* NVM Word Offsets */
637#define NVM_COMPAT 0x0003 644#define NVM_COMPAT 0x0003
638#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */ 645#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
646#define NVM_VERSION 0x0005
639#define NVM_INIT_CONTROL2_REG 0x000F 647#define NVM_INIT_CONTROL2_REG 0x000F
640#define NVM_INIT_CONTROL3_PORT_B 0x0014 648#define NVM_INIT_CONTROL3_PORT_B 0x0014
641#define NVM_INIT_CONTROL3_PORT_A 0x0024 649#define NVM_INIT_CONTROL3_PORT_A 0x0024
@@ -653,6 +661,19 @@
653#define NVM_LED_1_CFG 0x001C 661#define NVM_LED_1_CFG 0x001C
654#define NVM_LED_0_2_CFG 0x001F 662#define NVM_LED_0_2_CFG 0x001F
655 663
664/* NVM version defines */
665#define NVM_ETRACK_WORD 0x0042
666#define NVM_COMB_VER_OFF 0x0083
667#define NVM_COMB_VER_PTR 0x003d
668#define NVM_MAJOR_MASK 0xF000
669#define NVM_MINOR_MASK 0x0FF0
670#define NVM_BUILD_MASK 0x000F
671#define NVM_COMB_VER_MASK 0x00FF
672#define NVM_MAJOR_SHIFT 12
673#define NVM_MINOR_SHIFT 4
674#define NVM_COMB_VER_SHFT 8
675#define NVM_VER_INVALID 0xFFFF
676#define NVM_ETRACK_SHIFT 16
656 677
657#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ 678#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
658#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ 679#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -860,6 +881,7 @@
860#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ 881#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
861#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ 882#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
862#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ 883#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
884#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
863 885
864/* SerDes Control */ 886/* SerDes Control */
865#define E1000_GEN_CTL_READY 0x80000000 887#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 77a5f939bc74..fbcdbebb0b5f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -35,11 +35,42 @@
35#include "e1000_hw.h" 35#include "e1000_hw.h"
36#include "e1000_i210.h" 36#include "e1000_i210.h"
37 37
38static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw); 38/**
39static void igb_put_hw_semaphore_i210(struct e1000_hw *hw); 39 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
40static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, 40 * @hw: pointer to the HW structure
41 u16 *data); 41 *
42static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw); 42 * Acquire the HW semaphore to access the PHY or NVM
43 */
44static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
45{
46 u32 swsm;
47 s32 ret_val = E1000_SUCCESS;
48 s32 timeout = hw->nvm.word_size + 1;
49 s32 i = 0;
50
51 /* Get the FW semaphore. */
52 for (i = 0; i < timeout; i++) {
53 swsm = rd32(E1000_SWSM);
54 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
55
56 /* Semaphore acquired if bit latched */
57 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
58 break;
59
60 udelay(50);
61 }
62
63 if (i == timeout) {
64 /* Release semaphores */
65 igb_put_hw_semaphore(hw);
66 hw_dbg("Driver can't access the NVM\n");
67 ret_val = -E1000_ERR_NVM;
68 goto out;
69 }
70
71out:
72 return ret_val;
73}
43 74
44/** 75/**
45 * igb_acquire_nvm_i210 - Request for access to EEPROM 76 * igb_acquire_nvm_i210 - Request for access to EEPROM
@@ -68,6 +99,23 @@ void igb_release_nvm_i210(struct e1000_hw *hw)
68} 99}
69 100
70/** 101/**
102 * igb_put_hw_semaphore_i210 - Release hardware semaphore
103 * @hw: pointer to the HW structure
104 *
105 * Release hardware semaphore used to access the PHY or NVM
106 */
107static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
108{
109 u32 swsm;
110
111 swsm = rd32(E1000_SWSM);
112
113 swsm &= ~E1000_SWSM_SWESMBI;
114
115 wr32(E1000_SWSM, swsm);
116}
117
118/**
71 * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore 119 * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
72 * @hw: pointer to the HW structure 120 * @hw: pointer to the HW structure
73 * @mask: specifies which semaphore to acquire 121 * @mask: specifies which semaphore to acquire
@@ -138,60 +186,6 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
138} 186}
139 187
140/** 188/**
141 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
142 * @hw: pointer to the HW structure
143 *
144 * Acquire the HW semaphore to access the PHY or NVM
145 **/
146static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
147{
148 u32 swsm;
149 s32 ret_val = E1000_SUCCESS;
150 s32 timeout = hw->nvm.word_size + 1;
151 s32 i = 0;
152
153 /* Get the FW semaphore. */
154 for (i = 0; i < timeout; i++) {
155 swsm = rd32(E1000_SWSM);
156 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
157
158 /* Semaphore acquired if bit latched */
159 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
160 break;
161
162 udelay(50);
163 }
164
165 if (i == timeout) {
166 /* Release semaphores */
167 igb_put_hw_semaphore(hw);
168 hw_dbg("Driver can't access the NVM\n");
169 ret_val = -E1000_ERR_NVM;
170 goto out;
171 }
172
173out:
174 return ret_val;
175}
176
177/**
178 * igb_put_hw_semaphore_i210 - Release hardware semaphore
179 * @hw: pointer to the HW structure
180 *
181 * Release hardware semaphore used to access the PHY or NVM
182 **/
183static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
184{
185 u32 swsm;
186
187 swsm = rd32(E1000_SWSM);
188
189 swsm &= ~E1000_SWSM_SWESMBI;
190
191 wr32(E1000_SWSM, swsm);
192}
193
194/**
195 * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register 189 * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
196 * @hw: pointer to the HW structure 190 * @hw: pointer to the HW structure
197 * @offset: offset of word in the Shadow Ram to read 191 * @offset: offset of word in the Shadow Ram to read
@@ -229,49 +223,6 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
229} 223}
230 224
231/** 225/**
232 * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
233 * @hw: pointer to the HW structure
234 * @offset: offset within the Shadow RAM to be written to
235 * @words: number of words to write
236 * @data: 16 bit word(s) to be written to the Shadow RAM
237 *
238 * Writes data to Shadow RAM at offset using EEWR register.
239 *
240 * If e1000_update_nvm_checksum is not called after this function , the
241 * data will not be committed to FLASH and also Shadow RAM will most likely
242 * contain an invalid checksum.
243 *
244 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
245 * partially written.
246 **/
247s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
248 u16 *data)
249{
250 s32 status = E1000_SUCCESS;
251 u16 i, count;
252
253 /* We cannot hold synchronization semaphores for too long,
254 * because of forceful takeover procedure. However it is more efficient
255 * to write in bursts than synchronizing access for each word. */
256 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
257 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
258 E1000_EERD_EEWR_MAX_COUNT : (words - i);
259 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
260 status = igb_write_nvm_srwr(hw, offset, count,
261 data + i);
262 hw->nvm.ops.release(hw);
263 } else {
264 status = E1000_ERR_SWFW_SYNC;
265 }
266
267 if (status != E1000_SUCCESS)
268 break;
269 }
270
271 return status;
272}
273
274/**
275 * igb_write_nvm_srwr - Write to Shadow Ram using EEWR 226 * igb_write_nvm_srwr - Write to Shadow Ram using EEWR
276 * @hw: pointer to the HW structure 227 * @hw: pointer to the HW structure
277 * @offset: offset within the Shadow Ram to be written to 228 * @offset: offset within the Shadow Ram to be written to
@@ -329,6 +280,50 @@ out:
329} 280}
330 281
331/** 282/**
283 * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
284 * @hw: pointer to the HW structure
285 * @offset: offset within the Shadow RAM to be written to
286 * @words: number of words to write
287 * @data: 16 bit word(s) to be written to the Shadow RAM
288 *
289 * Writes data to Shadow RAM at offset using EEWR register.
290 *
291 * If e1000_update_nvm_checksum is not called after this function , the
292 * data will not be committed to FLASH and also Shadow RAM will most likely
293 * contain an invalid checksum.
294 *
295 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
296 * partially written.
297 */
298s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
299 u16 *data)
300{
301 s32 status = E1000_SUCCESS;
302 u16 i, count;
303
304 /* We cannot hold synchronization semaphores for too long,
305 * because of forceful takeover procedure. However it is more efficient
306 * to write in bursts than synchronizing access for each word.
307 */
308 for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
309 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
310 E1000_EERD_EEWR_MAX_COUNT : (words - i);
311 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
312 status = igb_write_nvm_srwr(hw, offset, count,
313 data + i);
314 hw->nvm.ops.release(hw);
315 } else {
316 status = E1000_ERR_SWFW_SYNC;
317 }
318
319 if (status != E1000_SUCCESS)
320 break;
321 }
322
323 return status;
324}
325
326/**
332 * igb_read_nvm_i211 - Read NVM wrapper function for I211 327 * igb_read_nvm_i211 - Read NVM wrapper function for I211
333 * @hw: pointer to the HW structure 328 * @hw: pointer to the HW structure
334 * @address: the word address (aka eeprom offset) to read 329 * @address: the word address (aka eeprom offset) to read
@@ -350,16 +345,40 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
350 if (ret_val != E1000_SUCCESS) 345 if (ret_val != E1000_SUCCESS)
351 hw_dbg("MAC Addr not found in iNVM\n"); 346 hw_dbg("MAC Addr not found in iNVM\n");
352 break; 347 break;
353 case NVM_ID_LED_SETTINGS:
354 case NVM_INIT_CTRL_2: 348 case NVM_INIT_CTRL_2:
349 ret_val = igb_read_invm_i211(hw, (u8)offset, data);
350 if (ret_val != E1000_SUCCESS) {
351 *data = NVM_INIT_CTRL_2_DEFAULT_I211;
352 ret_val = E1000_SUCCESS;
353 }
354 break;
355 case NVM_INIT_CTRL_4: 355 case NVM_INIT_CTRL_4:
356 ret_val = igb_read_invm_i211(hw, (u8)offset, data);
357 if (ret_val != E1000_SUCCESS) {
358 *data = NVM_INIT_CTRL_4_DEFAULT_I211;
359 ret_val = E1000_SUCCESS;
360 }
361 break;
356 case NVM_LED_1_CFG: 362 case NVM_LED_1_CFG:
363 ret_val = igb_read_invm_i211(hw, (u8)offset, data);
364 if (ret_val != E1000_SUCCESS) {
365 *data = NVM_LED_1_CFG_DEFAULT_I211;
366 ret_val = E1000_SUCCESS;
367 }
368 break;
357 case NVM_LED_0_2_CFG: 369 case NVM_LED_0_2_CFG:
358 igb_read_invm_i211(hw, offset, data); 370 igb_read_invm_i211(hw, offset, data);
371 if (ret_val != E1000_SUCCESS) {
372 *data = NVM_LED_0_2_CFG_DEFAULT_I211;
373 ret_val = E1000_SUCCESS;
374 }
359 break; 375 break;
360 case NVM_COMPAT: 376 case NVM_ID_LED_SETTINGS:
361 *data = ID_LED_DEFAULT_I210; 377 ret_val = igb_read_invm_i211(hw, (u8)offset, data);
362 break; 378 if (ret_val != E1000_SUCCESS) {
379 *data = ID_LED_RESERVED_FFFF;
380 ret_val = E1000_SUCCESS;
381 }
363 case NVM_SUB_DEV_ID: 382 case NVM_SUB_DEV_ID:
364 *data = hw->subsystem_device_id; 383 *data = hw->subsystem_device_id;
365 break; 384 break;
@@ -423,6 +442,100 @@ s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
423} 442}
424 443
425/** 444/**
445 * igb_read_invm_version - Reads iNVM version and image type
446 * @hw: pointer to the HW structure
447 * @invm_ver: version structure for the version read
448 *
449 * Reads iNVM version and image type.
450 **/
451s32 igb_read_invm_version(struct e1000_hw *hw,
452 struct e1000_fw_version *invm_ver) {
453 u32 *record = NULL;
454 u32 *next_record = NULL;
455 u32 i = 0;
456 u32 invm_dword = 0;
457 u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
458 E1000_INVM_RECORD_SIZE_IN_BYTES);
459 u32 buffer[E1000_INVM_SIZE];
460 s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
461 u16 version = 0;
462
463 /* Read iNVM memory */
464 for (i = 0; i < E1000_INVM_SIZE; i++) {
465 invm_dword = rd32(E1000_INVM_DATA_REG(i));
466 buffer[i] = invm_dword;
467 }
468
469 /* Read version number */
470 for (i = 1; i < invm_blocks; i++) {
471 record = &buffer[invm_blocks - i];
472 next_record = &buffer[invm_blocks - i + 1];
473
474 /* Check if we have first version location used */
475 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
476 version = 0;
477 status = E1000_SUCCESS;
478 break;
479 }
480 /* Check if we have second version location used */
481 else if ((i == 1) &&
482 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
483 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
484 status = E1000_SUCCESS;
485 break;
486 }
487 /* Check if we have odd version location
488 * used and it is the last one used
489 */
490 else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
491 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
492 (i != 1))) {
493 version = (*next_record & E1000_INVM_VER_FIELD_TWO)
494 >> 13;
495 status = E1000_SUCCESS;
496 break;
497 }
498 /* Check if we have even version location
499 * used and it is the last one used
500 */
501 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
502 ((*record & 0x3) == 0)) {
503 version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
504 status = E1000_SUCCESS;
505 break;
506 }
507 }
508
509 if (status == E1000_SUCCESS) {
510 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
511 >> E1000_INVM_MAJOR_SHIFT;
512 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
513 }
514 /* Read Image Type */
515 for (i = 1; i < invm_blocks; i++) {
516 record = &buffer[invm_blocks - i];
517 next_record = &buffer[invm_blocks - i + 1];
518
519 /* Check if we have image type in first location used */
520 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
521 invm_ver->invm_img_type = 0;
522 status = E1000_SUCCESS;
523 break;
524 }
525 /* Check if we have image type in first location used */
526 else if ((((*record & 0x3) == 0) &&
527 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
528 ((((*record & 0x3) != 0) && (i != 1)))) {
529 invm_ver->invm_img_type =
530 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
531 status = E1000_SUCCESS;
532 break;
533 }
534 }
535 return status;
536}
537
538/**
426 * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum 539 * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
427 * @hw: pointer to the HW structure 540 * @hw: pointer to the HW structure
428 * 541 *
@@ -519,6 +632,28 @@ out:
519} 632}
520 633
521/** 634/**
635 * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
636 * @hw: pointer to the HW structure
637 *
638 */
639static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
640{
641 s32 ret_val = -E1000_ERR_NVM;
642 u32 i, reg;
643
644 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
645 reg = rd32(E1000_EECD);
646 if (reg & E1000_EECD_FLUDONE_I210) {
647 ret_val = E1000_SUCCESS;
648 break;
649 }
650 udelay(5);
651 }
652
653 return ret_val;
654}
655
656/**
522 * igb_update_flash_i210 - Commit EEPROM to the flash 657 * igb_update_flash_i210 - Commit EEPROM to the flash
523 * @hw: pointer to the HW structure 658 * @hw: pointer to the HW structure
524 * 659 *
@@ -548,28 +683,6 @@ out:
548} 683}
549 684
550/** 685/**
551 * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
552 * @hw: pointer to the HW structure
553 *
554 **/
555s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
556{
557 s32 ret_val = -E1000_ERR_NVM;
558 u32 i, reg;
559
560 for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
561 reg = rd32(E1000_EECD);
562 if (reg & E1000_EECD_FLUDONE_I210) {
563 ret_val = E1000_SUCCESS;
564 break;
565 }
566 udelay(5);
567 }
568
569 return ret_val;
570}
571
572/**
573 * igb_valid_led_default_i210 - Verify a valid default LED config 686 * igb_valid_led_default_i210 - Verify a valid default LED config
574 * @hw: pointer to the HW structure 687 * @hw: pointer to the HW structure
575 * @data: pointer to the NVM (EEPROM) 688 * @data: pointer to the NVM (EEPROM)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5dc2bd3f50bc..1c89358a99ab 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -43,6 +43,8 @@ extern void igb_release_nvm_i210(struct e1000_hw *hw);
43extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); 43extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
44extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words, 44extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
45 u16 *data); 45 u16 *data);
46extern s32 igb_read_invm_version(struct e1000_hw *hw,
47 struct e1000_fw_version *invm_ver);
46 48
47#define E1000_STM_OPCODE 0xDB00 49#define E1000_STM_OPCODE 0xDB00
48#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 50#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -65,6 +67,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
65 67
66#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 68#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
67#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 69#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
70#define E1000_INVM_ULT_BYTES_SIZE 8
71#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
72#define E1000_INVM_VER_FIELD_ONE 0x1FF8
73#define E1000_INVM_VER_FIELD_TWO 0x7FE000
74#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
75
76#define E1000_INVM_MAJOR_MASK 0x3F0
77#define E1000_INVM_MINOR_MASK 0xF
78#define E1000_INVM_MAJOR_SHIFT 4
68 79
69#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ 80#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
70 (ID_LED_OFF1_OFF2 << 4) | \ 81 (ID_LED_OFF1_OFF2 << 4) | \
@@ -73,4 +84,10 @@ enum E1000_INVM_STRUCTURE_TYPE {
73 (ID_LED_DEF1_DEF2 << 4) | \ 84 (ID_LED_DEF1_DEF2 << 4) | \
74 (ID_LED_DEF1_DEF2)) 85 (ID_LED_DEF1_DEF2))
75 86
87/* NVM offset defaults for i211 device */
88#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
89#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
90#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
91#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
92
76#endif 93#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 819c145ac762..101e6e4da97f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -839,6 +839,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
839{ 839{
840 struct e1000_mac_info *mac = &hw->mac; 840 struct e1000_mac_info *mac = &hw->mac;
841 s32 ret_val = 0; 841 s32 ret_val = 0;
842 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
842 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 843 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
843 u16 speed, duplex; 844 u16 speed, duplex;
844 845
@@ -1040,6 +1041,129 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
1040 goto out; 1041 goto out;
1041 } 1042 }
1042 } 1043 }
1044 /* Check for the case where we have SerDes media and auto-neg is
1045 * enabled. In this case, we need to check and see if Auto-Neg
1046 * has completed, and if so, how the PHY and link partner has
1047 * flow control configured.
1048 */
1049 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1050 && mac->autoneg) {
1051 /* Read the PCS_LSTS and check to see if AutoNeg
1052 * has completed.
1053 */
1054 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1055
1056 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1057 hw_dbg("PCS Auto Neg has not completed.\n");
1058 return ret_val;
1059 }
1060
1061 /* The AutoNeg process has completed, so we now need to
1062 * read both the Auto Negotiation Advertisement
1063 * Register (PCS_ANADV) and the Auto_Negotiation Base
1064 * Page Ability Register (PCS_LPAB) to determine how
1065 * flow control was negotiated.
1066 */
1067 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1068 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1069
1070 /* Two bits in the Auto Negotiation Advertisement Register
1071 * (PCS_ANADV) and two bits in the Auto Negotiation Base
1072 * Page Ability Register (PCS_LPAB) determine flow control
1073 * for both the PHY and the link partner. The following
1074 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1075 * 1999, describes these PAUSE resolution bits and how flow
1076 * control is determined based upon these settings.
1077 * NOTE: DC = Don't Care
1078 *
1079 * LOCAL DEVICE | LINK PARTNER
1080 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1081 *-------|---------|-------|---------|--------------------
1082 * 0 | 0 | DC | DC | e1000_fc_none
1083 * 0 | 1 | 0 | DC | e1000_fc_none
1084 * 0 | 1 | 1 | 0 | e1000_fc_none
1085 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1086 * 1 | 0 | 0 | DC | e1000_fc_none
1087 * 1 | DC | 1 | DC | e1000_fc_full
1088 * 1 | 1 | 0 | 0 | e1000_fc_none
1089 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1090 *
1091 * Are both PAUSE bits set to 1? If so, this implies
1092 * Symmetric Flow Control is enabled at both ends. The
1093 * ASM_DIR bits are irrelevant per the spec.
1094 *
1095 * For Symmetric Flow Control:
1096 *
1097 * LOCAL DEVICE | LINK PARTNER
1098 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1099 *-------|---------|-------|---------|--------------------
1100 * 1 | DC | 1 | DC | e1000_fc_full
1101 *
1102 */
1103 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1104 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1105 /* Now we need to check if the user selected Rx ONLY
1106 * of pause frames. In this case, we had to advertise
1107 * FULL flow control because we could not advertise Rx
1108 * ONLY. Hence, we must now check to see if we need to
1109 * turn OFF the TRANSMISSION of PAUSE frames.
1110 */
1111 if (hw->fc.requested_mode == e1000_fc_full) {
1112 hw->fc.current_mode = e1000_fc_full;
1113 hw_dbg("Flow Control = FULL.\n");
1114 } else {
1115 hw->fc.current_mode = e1000_fc_rx_pause;
1116 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1117 }
1118 }
1119 /* For receiving PAUSE frames ONLY.
1120 *
1121 * LOCAL DEVICE | LINK PARTNER
1122 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1123 *-------|---------|-------|---------|--------------------
1124 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1125 */
1126 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1127 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1128 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1129 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1130 hw->fc.current_mode = e1000_fc_tx_pause;
1131 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1132 }
1133 /* For transmitting PAUSE frames ONLY.
1134 *
1135 * LOCAL DEVICE | LINK PARTNER
1136 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1137 *-------|---------|-------|---------|--------------------
1138 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1139 */
1140 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1141 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1142 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1143 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1144 hw->fc.current_mode = e1000_fc_rx_pause;
1145 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1146 } else {
1147 /* Per the IEEE spec, at this point flow control
1148 * should be disabled.
1149 */
1150 hw->fc.current_mode = e1000_fc_none;
1151 hw_dbg("Flow Control = NONE.\n");
1152 }
1153
1154 /* Now we call a subroutine to actually force the MAC
1155 * controller to use the correct flow control settings.
1156 */
1157 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1158 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1159 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1160
1161 ret_val = igb_force_mac_fc(hw);
1162 if (ret_val) {
1163 hw_dbg("Error forcing flow control settings\n");
1164 return ret_val;
1165 }
1166 }
1043 1167
1044out: 1168out:
1045 return ret_val; 1169 return ret_val;
@@ -1391,6 +1515,10 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1391{ 1515{
1392 s32 ret_val = 0; 1516 s32 ret_val = 0;
1393 1517
1518 /* All MDI settings are supported on 82580 and newer. */
1519 if (hw->mac.type >= e1000_82580)
1520 goto out;
1521
1394 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { 1522 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1395 hw_dbg("Invalid MDI setting detected\n"); 1523 hw_dbg("Invalid MDI setting detected\n");
1396 hw->phy.mdix = 1; 1524 hw->phy.mdix = 1;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index cbddc4e51e30..e2b2c4b9c951 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -33,6 +33,7 @@
33#include "e1000_phy.h" 33#include "e1000_phy.h"
34#include "e1000_nvm.h" 34#include "e1000_nvm.h"
35#include "e1000_defines.h" 35#include "e1000_defines.h"
36#include "e1000_i210.h"
36 37
37/* 38/*
38 * Functions that should not be called directly from drivers but can be used 39 * Functions that should not be called directly from drivers but can be used
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index aa5fcdf3f357..fbb7604db364 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -438,7 +438,7 @@ out:
438s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 438s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
439{ 439{
440 struct e1000_nvm_info *nvm = &hw->nvm; 440 struct e1000_nvm_info *nvm = &hw->nvm;
441 s32 ret_val; 441 s32 ret_val = -E1000_ERR_NVM;
442 u16 widx = 0; 442 u16 widx = 0;
443 443
444 /* 444 /*
@@ -448,22 +448,21 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
448 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 448 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
449 (words == 0)) { 449 (words == 0)) {
450 hw_dbg("nvm parameter(s) out of bounds\n"); 450 hw_dbg("nvm parameter(s) out of bounds\n");
451 ret_val = -E1000_ERR_NVM; 451 return ret_val;
452 goto out;
453 } 452 }
454 453
455 ret_val = hw->nvm.ops.acquire(hw);
456 if (ret_val)
457 goto out;
458
459 msleep(10);
460
461 while (widx < words) { 454 while (widx < words) {
462 u8 write_opcode = NVM_WRITE_OPCODE_SPI; 455 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
463 456
464 ret_val = igb_ready_nvm_eeprom(hw); 457 ret_val = nvm->ops.acquire(hw);
465 if (ret_val) 458 if (ret_val)
466 goto release; 459 return ret_val;
460
461 ret_val = igb_ready_nvm_eeprom(hw);
462 if (ret_val) {
463 nvm->ops.release(hw);
464 return ret_val;
465 }
467 466
468 igb_standby_nvm(hw); 467 igb_standby_nvm(hw);
469 468
@@ -497,13 +496,10 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
497 break; 496 break;
498 } 497 }
499 } 498 }
499 usleep_range(1000, 2000);
500 nvm->ops.release(hw);
500 } 501 }
501 502
502 msleep(10);
503release:
504 hw->nvm.ops.release(hw);
505
506out:
507 return ret_val; 503 return ret_val;
508} 504}
509 505
@@ -710,3 +706,74 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
710out: 706out:
711 return ret_val; 707 return ret_val;
712} 708}
709
710/**
711 * igb_get_fw_version - Get firmware version information
712 * @hw: pointer to the HW structure
713 * @fw_vers: pointer to output structure
714 *
715 * unsupported MAC types will return all 0 version structure
716 **/
717void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
718{
719 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
720 u16 fw_version;
721
722 memset(fw_vers, 0, sizeof(struct e1000_fw_version));
723
724 switch (hw->mac.type) {
725 case e1000_i211:
726 igb_read_invm_version(hw, fw_vers);
727 return;
728 case e1000_82575:
729 case e1000_82576:
730 case e1000_82580:
731 case e1000_i350:
732 case e1000_i210:
733 break;
734 default:
735 return;
736 }
737 /* basic eeprom version numbers */
738 hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
739 fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
740 fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
741
742 /* etrack id */
743 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
744 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
745 fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
746
747 switch (hw->mac.type) {
748 case e1000_i210:
749 case e1000_i350:
750 /* find combo image version */
751 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
752 if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
753
754 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
755 + 1), 1, &comb_verh);
756 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
757 1, &comb_verl);
758
759 /* get Option Rom version if it exists and is valid */
760 if ((comb_verh && comb_verl) &&
761 ((comb_verh != NVM_VER_INVALID) &&
762 (comb_verl != NVM_VER_INVALID))) {
763
764 fw_vers->or_valid = true;
765 fw_vers->or_major =
766 comb_verl >> NVM_COMB_VER_SHFT;
767 fw_vers->or_build =
768 ((comb_verl << NVM_COMB_VER_SHFT)
769 | (comb_verh >> NVM_COMB_VER_SHFT));
770 fw_vers->or_patch =
771 comb_verh & NVM_COMB_VER_MASK;
772 }
773 }
774 break;
775 default:
776 break;
777 }
778 return;
779}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 825b0228cac0..7012d458c6f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -40,4 +40,20 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
40s32 igb_validate_nvm_checksum(struct e1000_hw *hw); 40s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
41s32 igb_update_nvm_checksum(struct e1000_hw *hw); 41s32 igb_update_nvm_checksum(struct e1000_hw *hw);
42 42
43struct e1000_fw_version {
44 u32 etrack_id;
45 u16 eep_major;
46 u16 eep_minor;
47
48 u8 invm_major;
49 u8 invm_minor;
50 u8 invm_img_type;
51
52 bool or_valid;
53 u16 or_major;
54 u16 or_build;
55 u16 or_patch;
56};
57void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
58
43#endif 59#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3404bc79f4ca..fe76004aca4e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1207,20 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1207 u16 phy_data; 1207 u16 phy_data;
1208 bool link; 1208 bool link;
1209 1209
1210 /* 1210 /* I210 and I211 devices support Auto-Crossover in forced operation. */
1211 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 1211 if (phy->type != e1000_phy_i210) {
1212 * forced whenever speed and duplex are forced. 1212 /*
1213 */ 1213 * Clear Auto-Crossover to force MDI manually. M88E1000
1214 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1214 * requires MDI forced whenever speed and duplex are forced.
1215 if (ret_val) 1215 */
1216 goto out; 1216 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
1217 &phy_data);
1218 if (ret_val)
1219 goto out;
1217 1220
1218 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 1221 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
1219 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1222 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
1220 if (ret_val) 1223 phy_data);
1221 goto out; 1224 if (ret_val)
1225 goto out;
1222 1226
1223 hw_dbg("M88E1000 PSCR: %X\n", phy_data); 1227 hw_dbg("M88E1000 PSCR: %X\n", phy_data);
1228 }
1224 1229
1225 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); 1230 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
1226 if (ret_val) 1231 if (ret_val)
@@ -1710,6 +1715,26 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1710 1715
1711 switch (hw->phy.id) { 1716 switch (hw->phy.id) {
1712 case I210_I_PHY_ID: 1717 case I210_I_PHY_ID:
1718 /* Get cable length from PHY Cable Diagnostics Control Reg */
1719 ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
1720 (I347AT4_PCDL + phy->addr),
1721 &phy_data);
1722 if (ret_val)
1723 return ret_val;
1724
1725 /* Check if the unit of cable length is meters or cm */
1726 ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
1727 I347AT4_PCDC, &phy_data2);
1728 if (ret_val)
1729 return ret_val;
1730
1731 is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
1732
1733 /* Populate the phy structure with cable length in meters */
1734 phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
1735 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1736 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1737 break;
1713 case I347AT4_E_PHY_ID: 1738 case I347AT4_E_PHY_ID:
1714 /* Remember the original page select and set it to 7 */ 1739 /* Remember the original page select and set it to 7 */
1715 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, 1740 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6ac3299bfcb9..ed282f877d9a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -124,6 +124,7 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
124#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ 124#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
125#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ 125#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
126#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ 126#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
127#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
127 128
128/* Enable flexible speed on link-up */ 129/* Enable flexible speed on link-up */
129#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ 130#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8aad230c0592..17f1686ee411 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,16 +34,16 @@
34#include "e1000_mac.h" 34#include "e1000_mac.h"
35#include "e1000_82575.h" 35#include "e1000_82575.h"
36 36
37#ifdef CONFIG_IGB_PTP
38#include <linux/clocksource.h> 37#include <linux/clocksource.h>
39#include <linux/net_tstamp.h> 38#include <linux/net_tstamp.h>
40#include <linux/ptp_clock_kernel.h> 39#include <linux/ptp_clock_kernel.h>
41#endif /* CONFIG_IGB_PTP */
42#include <linux/bitops.h> 40#include <linux/bitops.h>
43#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
44 42
45struct igb_adapter; 43struct igb_adapter;
46 44
45#define E1000_PCS_CFG_IGN_SD 1
46
47/* Interrupt defines */ 47/* Interrupt defines */
48#define IGB_START_ITR 648 /* ~6000 ints/sec */ 48#define IGB_START_ITR 648 /* ~6000 ints/sec */
49#define IGB_4K_ITR 980 49#define IGB_4K_ITR 980
@@ -132,9 +132,10 @@ struct vf_data_storage {
132#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 132#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
133 133
134/* Supported Rx Buffer Sizes */ 134/* Supported Rx Buffer Sizes */
135#define IGB_RXBUFFER_256 256 135#define IGB_RXBUFFER_256 256
136#define IGB_RXBUFFER_16384 16384 136#define IGB_RXBUFFER_2048 2048
137#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 137#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
138#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
138 139
139/* How many Tx Descriptors do we need to call netif_wake_queue ? */ 140/* How many Tx Descriptors do we need to call netif_wake_queue ? */
140#define IGB_TX_QUEUE_WAKE 16 141#define IGB_TX_QUEUE_WAKE 16
@@ -151,11 +152,18 @@ struct vf_data_storage {
151 152
152#define IGB_MNG_VLAN_NONE -1 153#define IGB_MNG_VLAN_NONE -1
153 154
154#define IGB_TX_FLAGS_CSUM 0x00000001 155enum igb_tx_flags {
155#define IGB_TX_FLAGS_VLAN 0x00000002 156 /* cmd_type flags */
156#define IGB_TX_FLAGS_TSO 0x00000004 157 IGB_TX_FLAGS_VLAN = 0x01,
157#define IGB_TX_FLAGS_IPV4 0x00000008 158 IGB_TX_FLAGS_TSO = 0x02,
158#define IGB_TX_FLAGS_TSTAMP 0x00000010 159 IGB_TX_FLAGS_TSTAMP = 0x04,
160
161 /* olinfo flags */
162 IGB_TX_FLAGS_IPV4 = 0x10,
163 IGB_TX_FLAGS_CSUM = 0x20,
164};
165
166/* VLAN info */
159#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 167#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
160#define IGB_TX_FLAGS_VLAN_SHIFT 16 168#define IGB_TX_FLAGS_VLAN_SHIFT 16
161 169
@@ -174,11 +182,9 @@ struct igb_tx_buffer {
174}; 182};
175 183
176struct igb_rx_buffer { 184struct igb_rx_buffer {
177 struct sk_buff *skb;
178 dma_addr_t dma; 185 dma_addr_t dma;
179 struct page *page; 186 struct page *page;
180 dma_addr_t page_dma; 187 unsigned int page_offset;
181 u32 page_offset;
182}; 188};
183 189
184struct igb_tx_queue_stats { 190struct igb_tx_queue_stats {
@@ -205,22 +211,6 @@ struct igb_ring_container {
205 u8 itr; /* current ITR setting for ring */ 211 u8 itr; /* current ITR setting for ring */
206}; 212};
207 213
208struct igb_q_vector {
209 struct igb_adapter *adapter; /* backlink */
210 int cpu; /* CPU for DCA */
211 u32 eims_value; /* EIMS mask value */
212
213 struct igb_ring_container rx, tx;
214
215 struct napi_struct napi;
216
217 u16 itr_val;
218 u8 set_itr;
219 void __iomem *itr_register;
220
221 char name[IFNAMSIZ + 9];
222};
223
224struct igb_ring { 214struct igb_ring {
225 struct igb_q_vector *q_vector; /* backlink to q_vector */ 215 struct igb_q_vector *q_vector; /* backlink to q_vector */
226 struct net_device *netdev; /* back pointer to net_device */ 216 struct net_device *netdev; /* back pointer to net_device */
@@ -232,15 +222,17 @@ struct igb_ring {
232 void *desc; /* descriptor ring memory */ 222 void *desc; /* descriptor ring memory */
233 unsigned long flags; /* ring specific flags */ 223 unsigned long flags; /* ring specific flags */
234 void __iomem *tail; /* pointer to ring tail register */ 224 void __iomem *tail; /* pointer to ring tail register */
225 dma_addr_t dma; /* phys address of the ring */
226 unsigned int size; /* length of desc. ring in bytes */
235 227
236 u16 count; /* number of desc. in the ring */ 228 u16 count; /* number of desc. in the ring */
237 u8 queue_index; /* logical index of the ring*/ 229 u8 queue_index; /* logical index of the ring*/
238 u8 reg_idx; /* physical index of the ring */ 230 u8 reg_idx; /* physical index of the ring */
239 u32 size; /* length of desc. ring in bytes */
240 231
241 /* everything past this point are written often */ 232 /* everything past this point are written often */
242 u16 next_to_clean ____cacheline_aligned_in_smp; 233 u16 next_to_clean;
243 u16 next_to_use; 234 u16 next_to_use;
235 u16 next_to_alloc;
244 236
245 union { 237 union {
246 /* TX */ 238 /* TX */
@@ -251,12 +243,30 @@ struct igb_ring {
251 }; 243 };
252 /* RX */ 244 /* RX */
253 struct { 245 struct {
246 struct sk_buff *skb;
254 struct igb_rx_queue_stats rx_stats; 247 struct igb_rx_queue_stats rx_stats;
255 struct u64_stats_sync rx_syncp; 248 struct u64_stats_sync rx_syncp;
256 }; 249 };
257 }; 250 };
258 /* Items past this point are only used during ring alloc / free */ 251} ____cacheline_internodealigned_in_smp;
259 dma_addr_t dma; /* phys address of the ring */ 252
253struct igb_q_vector {
254 struct igb_adapter *adapter; /* backlink */
255 int cpu; /* CPU for DCA */
256 u32 eims_value; /* EIMS mask value */
257
258 u16 itr_val;
259 u8 set_itr;
260 void __iomem *itr_register;
261
262 struct igb_ring_container rx, tx;
263
264 struct napi_struct napi;
265 struct rcu_head rcu; /* to avoid race with update stats on free */
266 char name[IFNAMSIZ + 9];
267
268 /* for dynamic allocation of rings associated with this q_vector */
269 struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
260}; 270};
261 271
262enum e1000_ring_flags_t { 272enum e1000_ring_flags_t {
@@ -362,8 +372,6 @@ struct igb_adapter {
362 u32 eims_other; 372 u32 eims_other;
363 373
364 /* to not mess up cache alignment, always add to the bottom */ 374 /* to not mess up cache alignment, always add to the bottom */
365 u32 eeprom_wol;
366
367 u16 tx_ring_count; 375 u16 tx_ring_count;
368 u16 rx_ring_count; 376 u16 rx_ring_count;
369 unsigned int vfs_allocated_count; 377 unsigned int vfs_allocated_count;
@@ -373,7 +381,6 @@ struct igb_adapter {
373 u32 wvbr; 381 u32 wvbr;
374 u32 *shadow_vfta; 382 u32 *shadow_vfta;
375 383
376#ifdef CONFIG_IGB_PTP
377 struct ptp_clock *ptp_clock; 384 struct ptp_clock *ptp_clock;
378 struct ptp_clock_info ptp_caps; 385 struct ptp_clock_info ptp_caps;
379 struct delayed_work ptp_overflow_work; 386 struct delayed_work ptp_overflow_work;
@@ -382,17 +389,19 @@ struct igb_adapter {
382 spinlock_t tmreg_lock; 389 spinlock_t tmreg_lock;
383 struct cyclecounter cc; 390 struct cyclecounter cc;
384 struct timecounter tc; 391 struct timecounter tc;
385#endif /* CONFIG_IGB_PTP */
386 392
387 char fw_version[32]; 393 char fw_version[32];
388}; 394};
389 395
390#define IGB_FLAG_HAS_MSI (1 << 0) 396#define IGB_FLAG_HAS_MSI (1 << 0)
391#define IGB_FLAG_DCA_ENABLED (1 << 1) 397#define IGB_FLAG_DCA_ENABLED (1 << 1)
392#define IGB_FLAG_QUAD_PORT_A (1 << 2) 398#define IGB_FLAG_QUAD_PORT_A (1 << 2)
393#define IGB_FLAG_QUEUE_PAIRS (1 << 3) 399#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
394#define IGB_FLAG_DMAC (1 << 4) 400#define IGB_FLAG_DMAC (1 << 4)
395#define IGB_FLAG_PTP (1 << 5) 401#define IGB_FLAG_PTP (1 << 5)
402#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
403#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
404#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
396 405
397/* DMA Coalescing defines */ 406/* DMA Coalescing defines */
398#define IGB_MIN_TXPBSIZE 20408 407#define IGB_MIN_TXPBSIZE 20408
@@ -436,18 +445,27 @@ extern bool igb_has_link(struct igb_adapter *adapter);
436extern void igb_set_ethtool_ops(struct net_device *); 445extern void igb_set_ethtool_ops(struct net_device *);
437extern void igb_power_up_link(struct igb_adapter *); 446extern void igb_power_up_link(struct igb_adapter *);
438extern void igb_set_fw_version(struct igb_adapter *); 447extern void igb_set_fw_version(struct igb_adapter *);
439#ifdef CONFIG_IGB_PTP
440extern void igb_ptp_init(struct igb_adapter *adapter); 448extern void igb_ptp_init(struct igb_adapter *adapter);
441extern void igb_ptp_stop(struct igb_adapter *adapter); 449extern void igb_ptp_stop(struct igb_adapter *adapter);
442extern void igb_ptp_reset(struct igb_adapter *adapter); 450extern void igb_ptp_reset(struct igb_adapter *adapter);
443extern void igb_ptp_tx_work(struct work_struct *work); 451extern void igb_ptp_tx_work(struct work_struct *work);
444extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); 452extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
445extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, 453extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
446 union e1000_adv_rx_desc *rx_desc, 454 struct sk_buff *skb);
455extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
456 unsigned char *va,
447 struct sk_buff *skb); 457 struct sk_buff *skb);
458static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
459 union e1000_adv_rx_desc *rx_desc,
460 struct sk_buff *skb)
461{
462 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
463 !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
464 igb_ptp_rx_rgtstamp(q_vector, skb);
465}
466
448extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 467extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
449 struct ifreq *ifr, int cmd); 468 struct ifreq *ifr, int cmd);
450#endif /* CONFIG_IGB_PTP */
451 469
452static inline s32 igb_reset_phy(struct e1000_hw *hw) 470static inline s32 igb_reset_phy(struct e1000_hw *hw)
453{ 471{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 2ea012849825..bfe9208c4b18 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -37,6 +37,7 @@
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
40#include <linux/highmem.h>
40 41
41#include "igb.h" 42#include "igb.h"
42 43
@@ -1623,6 +1624,20 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1623 reg &= ~E1000_CONNSW_ENRGSRC; 1624 reg &= ~E1000_CONNSW_ENRGSRC;
1624 wr32(E1000_CONNSW, reg); 1625 wr32(E1000_CONNSW, reg);
1625 1626
1627 /* Unset sigdetect for SERDES loopback on
1628 * 82580 and i350 devices.
1629 */
1630 switch (hw->mac.type) {
1631 case e1000_82580:
1632 case e1000_i350:
1633 reg = rd32(E1000_PCS_CFG0);
1634 reg |= E1000_PCS_CFG_IGN_SD;
1635 wr32(E1000_PCS_CFG0, reg);
1636 break;
1637 default:
1638 break;
1639 }
1640
1626 /* Set PCS register for forced speed */ 1641 /* Set PCS register for forced speed */
1627 reg = rd32(E1000_PCS_LCTL); 1642 reg = rd32(E1000_PCS_LCTL);
1628 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/ 1643 reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
@@ -1685,16 +1700,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
1685 memset(&skb->data[frame_size + 12], 0xAF, 1); 1700 memset(&skb->data[frame_size + 12], 0xAF, 1);
1686} 1701}
1687 1702
1688static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1703static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
1704 unsigned int frame_size)
1689{ 1705{
1690 frame_size /= 2; 1706 unsigned char *data;
1691 if (*(skb->data + 3) == 0xFF) { 1707 bool match = true;
1692 if ((*(skb->data + frame_size + 10) == 0xBE) && 1708
1693 (*(skb->data + frame_size + 12) == 0xAF)) { 1709 frame_size >>= 1;
1694 return 0; 1710
1695 } 1711 data = kmap(rx_buffer->page);
1696 } 1712
1697 return 13; 1713 if (data[3] != 0xFF ||
1714 data[frame_size + 10] != 0xBE ||
1715 data[frame_size + 12] != 0xAF)
1716 match = false;
1717
1718 kunmap(rx_buffer->page);
1719
1720 return match;
1698} 1721}
1699 1722
1700static int igb_clean_test_rings(struct igb_ring *rx_ring, 1723static int igb_clean_test_rings(struct igb_ring *rx_ring,
@@ -1704,9 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1704 union e1000_adv_rx_desc *rx_desc; 1727 union e1000_adv_rx_desc *rx_desc;
1705 struct igb_rx_buffer *rx_buffer_info; 1728 struct igb_rx_buffer *rx_buffer_info;
1706 struct igb_tx_buffer *tx_buffer_info; 1729 struct igb_tx_buffer *tx_buffer_info;
1707 struct netdev_queue *txq;
1708 u16 rx_ntc, tx_ntc, count = 0; 1730 u16 rx_ntc, tx_ntc, count = 0;
1709 unsigned int total_bytes = 0, total_packets = 0;
1710 1731
1711 /* initialize next to clean and descriptor values */ 1732 /* initialize next to clean and descriptor values */
1712 rx_ntc = rx_ring->next_to_clean; 1733 rx_ntc = rx_ring->next_to_clean;
@@ -1717,21 +1738,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1717 /* check rx buffer */ 1738 /* check rx buffer */
1718 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1739 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1719 1740
1720 /* unmap rx buffer, will be remapped by alloc_rx_buffers */ 1741 /* sync Rx buffer for CPU read */
1721 dma_unmap_single(rx_ring->dev, 1742 dma_sync_single_for_cpu(rx_ring->dev,
1722 rx_buffer_info->dma, 1743 rx_buffer_info->dma,
1723 IGB_RX_HDR_LEN, 1744 IGB_RX_BUFSZ,
1724 DMA_FROM_DEVICE); 1745 DMA_FROM_DEVICE);
1725 rx_buffer_info->dma = 0;
1726 1746
1727 /* verify contents of skb */ 1747 /* verify contents of skb */
1728 if (!igb_check_lbtest_frame(rx_buffer_info->skb, size)) 1748 if (igb_check_lbtest_frame(rx_buffer_info, size))
1729 count++; 1749 count++;
1730 1750
1751 /* sync Rx buffer for device write */
1752 dma_sync_single_for_device(rx_ring->dev,
1753 rx_buffer_info->dma,
1754 IGB_RX_BUFSZ,
1755 DMA_FROM_DEVICE);
1756
1731 /* unmap buffer on tx side */ 1757 /* unmap buffer on tx side */
1732 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1758 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1733 total_bytes += tx_buffer_info->bytecount;
1734 total_packets += tx_buffer_info->gso_segs;
1735 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1759 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1736 1760
1737 /* increment rx/tx next to clean counters */ 1761 /* increment rx/tx next to clean counters */
@@ -1746,8 +1770,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1746 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1770 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1747 } 1771 }
1748 1772
1749 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); 1773 netdev_tx_reset_queue(txring_txq(tx_ring));
1750 netdev_tx_completed_queue(txq, total_packets, total_bytes);
1751 1774
1752 /* re-map buffers to ring, store next to clean values */ 1775 /* re-map buffers to ring, store next to clean values */
1753 igb_alloc_rx_buffers(rx_ring, count); 1776 igb_alloc_rx_buffers(rx_ring, count);
@@ -1957,54 +1980,6 @@ static void igb_diag_test(struct net_device *netdev,
1957 msleep_interruptible(4 * 1000); 1980 msleep_interruptible(4 * 1000);
1958} 1981}
1959 1982
1960static int igb_wol_exclusion(struct igb_adapter *adapter,
1961 struct ethtool_wolinfo *wol)
1962{
1963 struct e1000_hw *hw = &adapter->hw;
1964 int retval = 1; /* fail by default */
1965
1966 switch (hw->device_id) {
1967 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1968 /* WoL not supported */
1969 wol->supported = 0;
1970 break;
1971 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1972 case E1000_DEV_ID_82576_FIBER:
1973 case E1000_DEV_ID_82576_SERDES:
1974 /* Wake events not supported on port B */
1975 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
1976 wol->supported = 0;
1977 break;
1978 }
1979 /* return success for non excluded adapter ports */
1980 retval = 0;
1981 break;
1982 case E1000_DEV_ID_82576_QUAD_COPPER:
1983 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
1984 /* quad port adapters only support WoL on port A */
1985 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1986 wol->supported = 0;
1987 break;
1988 }
1989 /* return success for non excluded adapter ports */
1990 retval = 0;
1991 break;
1992 default:
1993 /* dual port cards only support WoL on port A from now on
1994 * unless it was enabled in the eeprom for port B
1995 * so exclude FUNC_1 ports from having WoL enabled */
1996 if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
1997 !adapter->eeprom_wol) {
1998 wol->supported = 0;
1999 break;
2000 }
2001
2002 retval = 0;
2003 }
2004
2005 return retval;
2006}
2007
2008static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1983static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2009{ 1984{
2010 struct igb_adapter *adapter = netdev_priv(netdev); 1985 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2014,10 +1989,7 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2014 WAKE_PHY; 1989 WAKE_PHY;
2015 wol->wolopts = 0; 1990 wol->wolopts = 0;
2016 1991
2017 /* this function will set ->supported = 0 and return 1 if wol is not 1992 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
2018 * supported by this hardware */
2019 if (igb_wol_exclusion(adapter, wol) ||
2020 !device_can_wakeup(&adapter->pdev->dev))
2021 return; 1993 return;
2022 1994
2023 /* apply any specific unsupported masks here */ 1995 /* apply any specific unsupported masks here */
@@ -2045,8 +2017,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2045 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) 2017 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
2046 return -EOPNOTSUPP; 2018 return -EOPNOTSUPP;
2047 2019
2048 if (igb_wol_exclusion(adapter, wol) || 2020 if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
2049 !device_can_wakeup(&adapter->pdev->dev))
2050 return wol->wolopts ? -EOPNOTSUPP : 0; 2021 return wol->wolopts ? -EOPNOTSUPP : 0;
2051 2022
2052 /* these settings will always override what we currently have */ 2023 /* these settings will always override what we currently have */
@@ -2301,7 +2272,6 @@ static int igb_get_ts_info(struct net_device *dev,
2301 struct igb_adapter *adapter = netdev_priv(dev); 2272 struct igb_adapter *adapter = netdev_priv(dev);
2302 2273
2303 switch (adapter->hw.mac.type) { 2274 switch (adapter->hw.mac.type) {
2304#ifdef CONFIG_IGB_PTP
2305 case e1000_82576: 2275 case e1000_82576:
2306 case e1000_82580: 2276 case e1000_82580:
2307 case e1000_i350: 2277 case e1000_i350:
@@ -2337,12 +2307,288 @@ static int igb_get_ts_info(struct net_device *dev,
2337 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 2307 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2338 2308
2339 return 0; 2309 return 0;
2340#endif /* CONFIG_IGB_PTP */
2341 default: 2310 default:
2342 return -EOPNOTSUPP; 2311 return -EOPNOTSUPP;
2343 } 2312 }
2344} 2313}
2345 2314
2315static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
2316 struct ethtool_rxnfc *cmd)
2317{
2318 cmd->data = 0;
2319
2320 /* Report default options for RSS on igb */
2321 switch (cmd->flow_type) {
2322 case TCP_V4_FLOW:
2323 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2324 case UDP_V4_FLOW:
2325 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2326 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2327 case SCTP_V4_FLOW:
2328 case AH_ESP_V4_FLOW:
2329 case AH_V4_FLOW:
2330 case ESP_V4_FLOW:
2331 case IPV4_FLOW:
2332 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2333 break;
2334 case TCP_V6_FLOW:
2335 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2336 case UDP_V6_FLOW:
2337 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2338 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2339 case SCTP_V6_FLOW:
2340 case AH_ESP_V6_FLOW:
2341 case AH_V6_FLOW:
2342 case ESP_V6_FLOW:
2343 case IPV6_FLOW:
2344 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2345 break;
2346 default:
2347 return -EINVAL;
2348 }
2349
2350 return 0;
2351}
2352
2353static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2354 u32 *rule_locs)
2355{
2356 struct igb_adapter *adapter = netdev_priv(dev);
2357 int ret = -EOPNOTSUPP;
2358
2359 switch (cmd->cmd) {
2360 case ETHTOOL_GRXRINGS:
2361 cmd->data = adapter->num_rx_queues;
2362 ret = 0;
2363 break;
2364 case ETHTOOL_GRXFH:
2365 ret = igb_get_rss_hash_opts(adapter, cmd);
2366 break;
2367 default:
2368 break;
2369 }
2370
2371 return ret;
2372}
2373
2374#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
2375 IGB_FLAG_RSS_FIELD_IPV6_UDP)
2376static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
2377 struct ethtool_rxnfc *nfc)
2378{
2379 u32 flags = adapter->flags;
2380
2381 /* RSS does not support anything other than hashing
2382 * to queues on src and dst IPs and ports
2383 */
2384 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2385 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2386 return -EINVAL;
2387
2388 switch (nfc->flow_type) {
2389 case TCP_V4_FLOW:
2390 case TCP_V6_FLOW:
2391 if (!(nfc->data & RXH_IP_SRC) ||
2392 !(nfc->data & RXH_IP_DST) ||
2393 !(nfc->data & RXH_L4_B_0_1) ||
2394 !(nfc->data & RXH_L4_B_2_3))
2395 return -EINVAL;
2396 break;
2397 case UDP_V4_FLOW:
2398 if (!(nfc->data & RXH_IP_SRC) ||
2399 !(nfc->data & RXH_IP_DST))
2400 return -EINVAL;
2401 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2402 case 0:
2403 flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
2404 break;
2405 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2406 flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
2407 break;
2408 default:
2409 return -EINVAL;
2410 }
2411 break;
2412 case UDP_V6_FLOW:
2413 if (!(nfc->data & RXH_IP_SRC) ||
2414 !(nfc->data & RXH_IP_DST))
2415 return -EINVAL;
2416 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2417 case 0:
2418 flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
2419 break;
2420 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2421 flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
2422 break;
2423 default:
2424 return -EINVAL;
2425 }
2426 break;
2427 case AH_ESP_V4_FLOW:
2428 case AH_V4_FLOW:
2429 case ESP_V4_FLOW:
2430 case SCTP_V4_FLOW:
2431 case AH_ESP_V6_FLOW:
2432 case AH_V6_FLOW:
2433 case ESP_V6_FLOW:
2434 case SCTP_V6_FLOW:
2435 if (!(nfc->data & RXH_IP_SRC) ||
2436 !(nfc->data & RXH_IP_DST) ||
2437 (nfc->data & RXH_L4_B_0_1) ||
2438 (nfc->data & RXH_L4_B_2_3))
2439 return -EINVAL;
2440 break;
2441 default:
2442 return -EINVAL;
2443 }
2444
2445 /* if we changed something we need to update flags */
2446 if (flags != adapter->flags) {
2447 struct e1000_hw *hw = &adapter->hw;
2448 u32 mrqc = rd32(E1000_MRQC);
2449
2450 if ((flags & UDP_RSS_FLAGS) &&
2451 !(adapter->flags & UDP_RSS_FLAGS))
2452 dev_err(&adapter->pdev->dev,
2453 "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2454
2455 adapter->flags = flags;
2456
2457 /* Perform hash on these packet types */
2458 mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
2459 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2460 E1000_MRQC_RSS_FIELD_IPV6 |
2461 E1000_MRQC_RSS_FIELD_IPV6_TCP;
2462
2463 mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
2464 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2465
2466 if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2467 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
2468
2469 if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2470 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
2471
2472 wr32(E1000_MRQC, mrqc);
2473 }
2474
2475 return 0;
2476}
2477
2478static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2479{
2480 struct igb_adapter *adapter = netdev_priv(dev);
2481 int ret = -EOPNOTSUPP;
2482
2483 switch (cmd->cmd) {
2484 case ETHTOOL_SRXFH:
2485 ret = igb_set_rss_hash_opt(adapter, cmd);
2486 break;
2487 default:
2488 break;
2489 }
2490
2491 return ret;
2492}
2493
2494static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2495{
2496 struct igb_adapter *adapter = netdev_priv(netdev);
2497 struct e1000_hw *hw = &adapter->hw;
2498 u32 ipcnfg, eeer;
2499
2500 if ((hw->mac.type < e1000_i350) ||
2501 (hw->phy.media_type != e1000_media_type_copper))
2502 return -EOPNOTSUPP;
2503
2504 edata->supported = (SUPPORTED_1000baseT_Full |
2505 SUPPORTED_100baseT_Full);
2506
2507 ipcnfg = rd32(E1000_IPCNFG);
2508 eeer = rd32(E1000_EEER);
2509
2510 /* EEE status on negotiated link */
2511 if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
2512 edata->advertised = ADVERTISED_1000baseT_Full;
2513
2514 if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
2515 edata->advertised |= ADVERTISED_100baseT_Full;
2516
2517 if (eeer & E1000_EEER_EEE_NEG)
2518 edata->eee_active = true;
2519
2520 edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
2521
2522 if (eeer & E1000_EEER_TX_LPI_EN)
2523 edata->tx_lpi_enabled = true;
2524
2525 /* Report correct negotiated EEE status for devices that
2526 * wrongly report EEE at half-duplex
2527 */
2528 if (adapter->link_duplex == HALF_DUPLEX) {
2529 edata->eee_enabled = false;
2530 edata->eee_active = false;
2531 edata->tx_lpi_enabled = false;
2532 edata->advertised &= ~edata->advertised;
2533 }
2534
2535 return 0;
2536}
2537
2538static int igb_set_eee(struct net_device *netdev,
2539 struct ethtool_eee *edata)
2540{
2541 struct igb_adapter *adapter = netdev_priv(netdev);
2542 struct e1000_hw *hw = &adapter->hw;
2543 struct ethtool_eee eee_curr;
2544 s32 ret_val;
2545
2546 if ((hw->mac.type < e1000_i350) ||
2547 (hw->phy.media_type != e1000_media_type_copper))
2548 return -EOPNOTSUPP;
2549
2550 ret_val = igb_get_eee(netdev, &eee_curr);
2551 if (ret_val)
2552 return ret_val;
2553
2554 if (eee_curr.eee_enabled) {
2555 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
2556 dev_err(&adapter->pdev->dev,
2557 "Setting EEE tx-lpi is not supported\n");
2558 return -EINVAL;
2559 }
2560
2561 /* Tx LPI timer is not implemented currently */
2562 if (edata->tx_lpi_timer) {
2563 dev_err(&adapter->pdev->dev,
2564 "Setting EEE Tx LPI timer is not supported\n");
2565 return -EINVAL;
2566 }
2567
2568 if (eee_curr.advertised != edata->advertised) {
2569 dev_err(&adapter->pdev->dev,
2570 "Setting EEE Advertisement is not supported\n");
2571 return -EINVAL;
2572 }
2573
2574 } else if (!edata->eee_enabled) {
2575 dev_err(&adapter->pdev->dev,
2576 "Setting EEE options are not supported with EEE disabled\n");
2577 return -EINVAL;
2578 }
2579
2580 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
2581 hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
2582 igb_set_eee_i350(hw);
2583
2584 /* reset link */
2585 if (!netif_running(netdev))
2586 igb_reset(adapter);
2587 }
2588
2589 return 0;
2590}
2591
2346static int igb_ethtool_begin(struct net_device *netdev) 2592static int igb_ethtool_begin(struct net_device *netdev)
2347{ 2593{
2348 struct igb_adapter *adapter = netdev_priv(netdev); 2594 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2383,6 +2629,10 @@ static const struct ethtool_ops igb_ethtool_ops = {
2383 .get_coalesce = igb_get_coalesce, 2629 .get_coalesce = igb_get_coalesce,
2384 .set_coalesce = igb_set_coalesce, 2630 .set_coalesce = igb_set_coalesce,
2385 .get_ts_info = igb_get_ts_info, 2631 .get_ts_info = igb_get_ts_info,
2632 .get_rxnfc = igb_get_rxnfc,
2633 .set_rxnfc = igb_set_rxnfc,
2634 .get_eee = igb_get_eee,
2635 .set_eee = igb_set_eee,
2386 .begin = igb_ethtool_begin, 2636 .begin = igb_ethtool_begin,
2387 .complete = igb_ethtool_complete, 2637 .complete = igb_ethtool_complete,
2388}; 2638};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e1ceb37ef12e..31cfe2ec75df 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
60#include "igb.h" 60#include "igb.h"
61 61
62#define MAJ 4 62#define MAJ 4
63#define MIN 0 63#define MIN 1
64#define BUILD 1 64#define BUILD 2
65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66__stringify(BUILD) "-k" 66__stringify(BUILD) "-k"
67char igb_driver_name[] = "igb"; 67char igb_driver_name[] = "igb";
@@ -118,10 +118,11 @@ static void igb_free_all_tx_resources(struct igb_adapter *);
118static void igb_free_all_rx_resources(struct igb_adapter *); 118static void igb_free_all_rx_resources(struct igb_adapter *);
119static void igb_setup_mrqc(struct igb_adapter *); 119static void igb_setup_mrqc(struct igb_adapter *);
120static int igb_probe(struct pci_dev *, const struct pci_device_id *); 120static int igb_probe(struct pci_dev *, const struct pci_device_id *);
121static void __devexit igb_remove(struct pci_dev *pdev); 121static void igb_remove(struct pci_dev *pdev);
122static int igb_sw_init(struct igb_adapter *); 122static int igb_sw_init(struct igb_adapter *);
123static int igb_open(struct net_device *); 123static int igb_open(struct net_device *);
124static int igb_close(struct net_device *); 124static int igb_close(struct net_device *);
125static void igb_configure(struct igb_adapter *);
125static void igb_configure_tx(struct igb_adapter *); 126static void igb_configure_tx(struct igb_adapter *);
126static void igb_configure_rx(struct igb_adapter *); 127static void igb_configure_rx(struct igb_adapter *);
127static void igb_clean_all_tx_rings(struct igb_adapter *); 128static void igb_clean_all_tx_rings(struct igb_adapter *);
@@ -228,7 +229,7 @@ static struct pci_driver igb_driver = {
228 .name = igb_driver_name, 229 .name = igb_driver_name,
229 .id_table = igb_pci_tbl, 230 .id_table = igb_pci_tbl,
230 .probe = igb_probe, 231 .probe = igb_probe,
231 .remove = __devexit_p(igb_remove), 232 .remove = igb_remove,
232#ifdef CONFIG_PM 233#ifdef CONFIG_PM
233 .driver.pm = &igb_pm_ops, 234 .driver.pm = &igb_pm_ops,
234#endif 235#endif
@@ -534,31 +535,27 @@ rx_ring_summary:
534 535
535 if (staterr & E1000_RXD_STAT_DD) { 536 if (staterr & E1000_RXD_STAT_DD) {
536 /* Descriptor Done */ 537 /* Descriptor Done */
537 pr_info("%s[0x%03X] %016llX %016llX -------" 538 pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
538 "--------- %p%s\n", "RWB", i, 539 "RWB", i,
539 le64_to_cpu(u0->a), 540 le64_to_cpu(u0->a),
540 le64_to_cpu(u0->b), 541 le64_to_cpu(u0->b),
541 buffer_info->skb, next_desc); 542 next_desc);
542 } else { 543 } else {
543 pr_info("%s[0x%03X] %016llX %016llX %016llX" 544 pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
544 " %p%s\n", "R ", i, 545 "R ", i,
545 le64_to_cpu(u0->a), 546 le64_to_cpu(u0->a),
546 le64_to_cpu(u0->b), 547 le64_to_cpu(u0->b),
547 (u64)buffer_info->dma, 548 (u64)buffer_info->dma,
548 buffer_info->skb, next_desc); 549 next_desc);
549 550
550 if (netif_msg_pktdata(adapter) && 551 if (netif_msg_pktdata(adapter) &&
551 buffer_info->dma && buffer_info->skb) { 552 buffer_info->dma && buffer_info->page) {
552 print_hex_dump(KERN_INFO, "",
553 DUMP_PREFIX_ADDRESS,
554 16, 1, buffer_info->skb->data,
555 IGB_RX_HDR_LEN, true);
556 print_hex_dump(KERN_INFO, "", 553 print_hex_dump(KERN_INFO, "",
557 DUMP_PREFIX_ADDRESS, 554 DUMP_PREFIX_ADDRESS,
558 16, 1, 555 16, 1,
559 page_address(buffer_info->page) + 556 page_address(buffer_info->page) +
560 buffer_info->page_offset, 557 buffer_info->page_offset,
561 PAGE_SIZE/2, true); 558 IGB_RX_BUFSZ, true);
562 } 559 }
563 } 560 }
564 } 561 }
@@ -656,80 +653,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
656 } 653 }
657} 654}
658 655
659static void igb_free_queues(struct igb_adapter *adapter)
660{
661 int i;
662
663 for (i = 0; i < adapter->num_tx_queues; i++) {
664 kfree(adapter->tx_ring[i]);
665 adapter->tx_ring[i] = NULL;
666 }
667 for (i = 0; i < adapter->num_rx_queues; i++) {
668 kfree(adapter->rx_ring[i]);
669 adapter->rx_ring[i] = NULL;
670 }
671 adapter->num_rx_queues = 0;
672 adapter->num_tx_queues = 0;
673}
674
675/**
676 * igb_alloc_queues - Allocate memory for all rings
677 * @adapter: board private structure to initialize
678 *
679 * We allocate one ring per queue at run-time since we don't know the
680 * number of queues at compile-time.
681 **/
682static int igb_alloc_queues(struct igb_adapter *adapter)
683{
684 struct igb_ring *ring;
685 int i;
686
687 for (i = 0; i < adapter->num_tx_queues; i++) {
688 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
689 if (!ring)
690 goto err;
691 ring->count = adapter->tx_ring_count;
692 ring->queue_index = i;
693 ring->dev = &adapter->pdev->dev;
694 ring->netdev = adapter->netdev;
695 /* For 82575, context index must be unique per ring. */
696 if (adapter->hw.mac.type == e1000_82575)
697 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
698 adapter->tx_ring[i] = ring;
699 }
700
701 for (i = 0; i < adapter->num_rx_queues; i++) {
702 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
703 if (!ring)
704 goto err;
705 ring->count = adapter->rx_ring_count;
706 ring->queue_index = i;
707 ring->dev = &adapter->pdev->dev;
708 ring->netdev = adapter->netdev;
709 /* set flag indicating ring supports SCTP checksum offload */
710 if (adapter->hw.mac.type >= e1000_82576)
711 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
712
713 /*
714 * On i350, i210, and i211, loopback VLAN packets
715 * have the tag byte-swapped.
716 * */
717 if (adapter->hw.mac.type >= e1000_i350)
718 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
719
720 adapter->rx_ring[i] = ring;
721 }
722
723 igb_cache_ring_register(adapter);
724
725 return 0;
726
727err:
728 igb_free_queues(adapter);
729
730 return -ENOMEM;
731}
732
733/** 656/**
734 * igb_write_ivar - configure ivar for given MSI-X vector 657 * igb_write_ivar - configure ivar for given MSI-X vector
735 * @hw: pointer to the HW structure 658 * @hw: pointer to the HW structure
@@ -909,17 +832,18 @@ static int igb_request_msix(struct igb_adapter *adapter)
909{ 832{
910 struct net_device *netdev = adapter->netdev; 833 struct net_device *netdev = adapter->netdev;
911 struct e1000_hw *hw = &adapter->hw; 834 struct e1000_hw *hw = &adapter->hw;
912 int i, err = 0, vector = 0; 835 int i, err = 0, vector = 0, free_vector = 0;
913 836
914 err = request_irq(adapter->msix_entries[vector].vector, 837 err = request_irq(adapter->msix_entries[vector].vector,
915 igb_msix_other, 0, netdev->name, adapter); 838 igb_msix_other, 0, netdev->name, adapter);
916 if (err) 839 if (err)
917 goto out; 840 goto err_out;
918 vector++;
919 841
920 for (i = 0; i < adapter->num_q_vectors; i++) { 842 for (i = 0; i < adapter->num_q_vectors; i++) {
921 struct igb_q_vector *q_vector = adapter->q_vector[i]; 843 struct igb_q_vector *q_vector = adapter->q_vector[i];
922 844
845 vector++;
846
923 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); 847 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
924 848
925 if (q_vector->rx.ring && q_vector->tx.ring) 849 if (q_vector->rx.ring && q_vector->tx.ring)
@@ -938,13 +862,22 @@ static int igb_request_msix(struct igb_adapter *adapter)
938 igb_msix_ring, 0, q_vector->name, 862 igb_msix_ring, 0, q_vector->name,
939 q_vector); 863 q_vector);
940 if (err) 864 if (err)
941 goto out; 865 goto err_free;
942 vector++;
943 } 866 }
944 867
945 igb_configure_msix(adapter); 868 igb_configure_msix(adapter);
946 return 0; 869 return 0;
947out: 870
871err_free:
872 /* free already assigned IRQs */
873 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
874
875 vector--;
876 for (i = 0; i < vector; i++) {
877 free_irq(adapter->msix_entries[free_vector++].vector,
878 adapter->q_vector[i]);
879 }
880err_out:
948 return err; 881 return err;
949} 882}
950 883
@@ -960,6 +893,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
960} 893}
961 894
962/** 895/**
896 * igb_free_q_vector - Free memory allocated for specific interrupt vector
897 * @adapter: board private structure to initialize
898 * @v_idx: Index of vector to be freed
899 *
900 * This function frees the memory allocated to the q_vector. In addition if
901 * NAPI is enabled it will delete any references to the NAPI struct prior
902 * to freeing the q_vector.
903 **/
904static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
905{
906 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
907
908 if (q_vector->tx.ring)
909 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
910
911 if (q_vector->rx.ring)
912 adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
913
914 adapter->q_vector[v_idx] = NULL;
915 netif_napi_del(&q_vector->napi);
916
917 /*
918 * ixgbe_get_stats64() might access the rings on this vector,
919 * we must wait a grace period before freeing it.
920 */
921 kfree_rcu(q_vector, rcu);
922}
923
924/**
963 * igb_free_q_vectors - Free memory allocated for interrupt vectors 925 * igb_free_q_vectors - Free memory allocated for interrupt vectors
964 * @adapter: board private structure to initialize 926 * @adapter: board private structure to initialize
965 * 927 *
@@ -969,17 +931,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
969 **/ 931 **/
970static void igb_free_q_vectors(struct igb_adapter *adapter) 932static void igb_free_q_vectors(struct igb_adapter *adapter)
971{ 933{
972 int v_idx; 934 int v_idx = adapter->num_q_vectors;
973 935
974 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 936 adapter->num_tx_queues = 0;
975 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 937 adapter->num_rx_queues = 0;
976 adapter->q_vector[v_idx] = NULL;
977 if (!q_vector)
978 continue;
979 netif_napi_del(&q_vector->napi);
980 kfree(q_vector);
981 }
982 adapter->num_q_vectors = 0; 938 adapter->num_q_vectors = 0;
939
940 while (v_idx--)
941 igb_free_q_vector(adapter, v_idx);
983} 942}
984 943
985/** 944/**
@@ -990,7 +949,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
990 */ 949 */
991static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) 950static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
992{ 951{
993 igb_free_queues(adapter);
994 igb_free_q_vectors(adapter); 952 igb_free_q_vectors(adapter);
995 igb_reset_interrupt_capability(adapter); 953 igb_reset_interrupt_capability(adapter);
996} 954}
@@ -1001,11 +959,14 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1001 * Attempt to configure interrupts using the best available 959 * Attempt to configure interrupts using the best available
1002 * capabilities of the hardware and kernel. 960 * capabilities of the hardware and kernel.
1003 **/ 961 **/
1004static int igb_set_interrupt_capability(struct igb_adapter *adapter) 962static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1005{ 963{
1006 int err; 964 int err;
1007 int numvecs, i; 965 int numvecs, i;
1008 966
967 if (!msix)
968 goto msi_only;
969
1009 /* Number of supported queues. */ 970 /* Number of supported queues. */
1010 adapter->num_rx_queues = adapter->rss_queues; 971 adapter->num_rx_queues = adapter->rss_queues;
1011 if (adapter->vfs_allocated_count) 972 if (adapter->vfs_allocated_count)
@@ -1038,7 +999,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
1038 adapter->msix_entries, 999 adapter->msix_entries,
1039 numvecs); 1000 numvecs);
1040 if (err == 0) 1001 if (err == 0)
1041 goto out; 1002 return;
1042 1003
1043 igb_reset_interrupt_capability(adapter); 1004 igb_reset_interrupt_capability(adapter);
1044 1005
@@ -1068,105 +1029,183 @@ msi_only:
1068 adapter->num_q_vectors = 1; 1029 adapter->num_q_vectors = 1;
1069 if (!pci_enable_msi(adapter->pdev)) 1030 if (!pci_enable_msi(adapter->pdev))
1070 adapter->flags |= IGB_FLAG_HAS_MSI; 1031 adapter->flags |= IGB_FLAG_HAS_MSI;
1071out: 1032}
1072 /* Notify the stack of the (possibly) reduced queue counts. */ 1033
1073 rtnl_lock(); 1034static void igb_add_ring(struct igb_ring *ring,
1074 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); 1035 struct igb_ring_container *head)
1075 err = netif_set_real_num_rx_queues(adapter->netdev, 1036{
1076 adapter->num_rx_queues); 1037 head->ring = ring;
1077 rtnl_unlock(); 1038 head->count++;
1078 return err;
1079} 1039}
1080 1040
1081/** 1041/**
1082 * igb_alloc_q_vectors - Allocate memory for interrupt vectors 1042 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1083 * @adapter: board private structure to initialize 1043 * @adapter: board private structure to initialize
1044 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1045 * @v_idx: index of vector in adapter struct
1046 * @txr_count: total number of Tx rings to allocate
1047 * @txr_idx: index of first Tx ring to allocate
1048 * @rxr_count: total number of Rx rings to allocate
1049 * @rxr_idx: index of first Rx ring to allocate
1084 * 1050 *
1085 * We allocate one q_vector per queue interrupt. If allocation fails we 1051 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1086 * return -ENOMEM.
1087 **/ 1052 **/
1088static int igb_alloc_q_vectors(struct igb_adapter *adapter) 1053static int igb_alloc_q_vector(struct igb_adapter *adapter,
1054 int v_count, int v_idx,
1055 int txr_count, int txr_idx,
1056 int rxr_count, int rxr_idx)
1089{ 1057{
1090 struct igb_q_vector *q_vector; 1058 struct igb_q_vector *q_vector;
1091 struct e1000_hw *hw = &adapter->hw; 1059 struct igb_ring *ring;
1092 int v_idx; 1060 int ring_count, size;
1093 1061
1094 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 1062 /* igb only supports 1 Tx and/or 1 Rx queue per vector */
1095 q_vector = kzalloc(sizeof(struct igb_q_vector), 1063 if (txr_count > 1 || rxr_count > 1)
1096 GFP_KERNEL); 1064 return -ENOMEM;
1097 if (!q_vector) 1065
1098 goto err_out; 1066 ring_count = txr_count + rxr_count;
1099 q_vector->adapter = adapter; 1067 size = sizeof(struct igb_q_vector) +
1100 q_vector->itr_register = hw->hw_addr + E1000_EITR(0); 1068 (sizeof(struct igb_ring) * ring_count);
1101 q_vector->itr_val = IGB_START_ITR; 1069
1102 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); 1070 /* allocate q_vector and rings */
1103 adapter->q_vector[v_idx] = q_vector; 1071 q_vector = kzalloc(size, GFP_KERNEL);
1072 if (!q_vector)
1073 return -ENOMEM;
1074
1075 /* initialize NAPI */
1076 netif_napi_add(adapter->netdev, &q_vector->napi,
1077 igb_poll, 64);
1078
1079 /* tie q_vector and adapter together */
1080 adapter->q_vector[v_idx] = q_vector;
1081 q_vector->adapter = adapter;
1082
1083 /* initialize work limits */
1084 q_vector->tx.work_limit = adapter->tx_work_limit;
1085
1086 /* initialize ITR configuration */
1087 q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
1088 q_vector->itr_val = IGB_START_ITR;
1089
1090 /* initialize pointer to rings */
1091 ring = q_vector->ring;
1092
1093 if (txr_count) {
1094 /* assign generic ring traits */
1095 ring->dev = &adapter->pdev->dev;
1096 ring->netdev = adapter->netdev;
1097
1098 /* configure backlink on ring */
1099 ring->q_vector = q_vector;
1100
1101 /* update q_vector Tx values */
1102 igb_add_ring(ring, &q_vector->tx);
1103
1104 /* For 82575, context index must be unique per ring. */
1105 if (adapter->hw.mac.type == e1000_82575)
1106 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
1107
1108 /* apply Tx specific ring traits */
1109 ring->count = adapter->tx_ring_count;
1110 ring->queue_index = txr_idx;
1111
1112 /* assign ring to adapter */
1113 adapter->tx_ring[txr_idx] = ring;
1114
1115 /* push pointer to next ring */
1116 ring++;
1104 } 1117 }
1105 1118
1106 return 0; 1119 if (rxr_count) {
1120 /* assign generic ring traits */
1121 ring->dev = &adapter->pdev->dev;
1122 ring->netdev = adapter->netdev;
1107 1123
1108err_out: 1124 /* configure backlink on ring */
1109 igb_free_q_vectors(adapter); 1125 ring->q_vector = q_vector;
1110 return -ENOMEM;
1111}
1112 1126
1113static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter, 1127 /* update q_vector Rx values */
1114 int ring_idx, int v_idx) 1128 igb_add_ring(ring, &q_vector->rx);
1115{
1116 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1117 1129
1118 q_vector->rx.ring = adapter->rx_ring[ring_idx]; 1130 /* set flag indicating ring supports SCTP checksum offload */
1119 q_vector->rx.ring->q_vector = q_vector; 1131 if (adapter->hw.mac.type >= e1000_82576)
1120 q_vector->rx.count++; 1132 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1121 q_vector->itr_val = adapter->rx_itr_setting;
1122 if (q_vector->itr_val && q_vector->itr_val <= 3)
1123 q_vector->itr_val = IGB_START_ITR;
1124}
1125 1133
1126static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, 1134 /*
1127 int ring_idx, int v_idx) 1135 * On i350, i210, and i211, loopback VLAN packets
1128{ 1136 * have the tag byte-swapped.
1129 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 1137 * */
1138 if (adapter->hw.mac.type >= e1000_i350)
1139 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1130 1140
1131 q_vector->tx.ring = adapter->tx_ring[ring_idx]; 1141 /* apply Rx specific ring traits */
1132 q_vector->tx.ring->q_vector = q_vector; 1142 ring->count = adapter->rx_ring_count;
1133 q_vector->tx.count++; 1143 ring->queue_index = rxr_idx;
1134 q_vector->itr_val = adapter->tx_itr_setting; 1144
1135 q_vector->tx.work_limit = adapter->tx_work_limit; 1145 /* assign ring to adapter */
1136 if (q_vector->itr_val && q_vector->itr_val <= 3) 1146 adapter->rx_ring[rxr_idx] = ring;
1137 q_vector->itr_val = IGB_START_ITR; 1147 }
1148
1149 return 0;
1138} 1150}
1139 1151
1152
1140/** 1153/**
1141 * igb_map_ring_to_vector - maps allocated queues to vectors 1154 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1155 * @adapter: board private structure to initialize
1142 * 1156 *
1143 * This function maps the recently allocated queues to vectors. 1157 * We allocate one q_vector per queue interrupt. If allocation fails we
1158 * return -ENOMEM.
1144 **/ 1159 **/
1145static int igb_map_ring_to_vector(struct igb_adapter *adapter) 1160static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1146{ 1161{
1147 int i; 1162 int q_vectors = adapter->num_q_vectors;
1148 int v_idx = 0; 1163 int rxr_remaining = adapter->num_rx_queues;
1164 int txr_remaining = adapter->num_tx_queues;
1165 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1166 int err;
1149 1167
1150 if ((adapter->num_q_vectors < adapter->num_rx_queues) || 1168 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1151 (adapter->num_q_vectors < adapter->num_tx_queues)) 1169 for (; rxr_remaining; v_idx++) {
1152 return -ENOMEM; 1170 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1171 0, 0, 1, rxr_idx);
1153 1172
1154 if (adapter->num_q_vectors >= 1173 if (err)
1155 (adapter->num_rx_queues + adapter->num_tx_queues)) { 1174 goto err_out;
1156 for (i = 0; i < adapter->num_rx_queues; i++) 1175
1157 igb_map_rx_ring_to_vector(adapter, i, v_idx++); 1176 /* update counts and index */
1158 for (i = 0; i < adapter->num_tx_queues; i++) 1177 rxr_remaining--;
1159 igb_map_tx_ring_to_vector(adapter, i, v_idx++); 1178 rxr_idx++;
1160 } else {
1161 for (i = 0; i < adapter->num_rx_queues; i++) {
1162 if (i < adapter->num_tx_queues)
1163 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1164 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1165 } 1179 }
1166 for (; i < adapter->num_tx_queues; i++)
1167 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1168 } 1180 }
1181
1182 for (; v_idx < q_vectors; v_idx++) {
1183 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1184 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1185 err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
1186 tqpv, txr_idx, rqpv, rxr_idx);
1187
1188 if (err)
1189 goto err_out;
1190
1191 /* update counts and index */
1192 rxr_remaining -= rqpv;
1193 txr_remaining -= tqpv;
1194 rxr_idx++;
1195 txr_idx++;
1196 }
1197
1169 return 0; 1198 return 0;
1199
1200err_out:
1201 adapter->num_tx_queues = 0;
1202 adapter->num_rx_queues = 0;
1203 adapter->num_q_vectors = 0;
1204
1205 while (v_idx--)
1206 igb_free_q_vector(adapter, v_idx);
1207
1208 return -ENOMEM;
1170} 1209}
1171 1210
1172/** 1211/**
@@ -1174,14 +1213,12 @@ static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1174 * 1213 *
1175 * This function initializes the interrupts and allocates all of the queues. 1214 * This function initializes the interrupts and allocates all of the queues.
1176 **/ 1215 **/
1177static int igb_init_interrupt_scheme(struct igb_adapter *adapter) 1216static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1178{ 1217{
1179 struct pci_dev *pdev = adapter->pdev; 1218 struct pci_dev *pdev = adapter->pdev;
1180 int err; 1219 int err;
1181 1220
1182 err = igb_set_interrupt_capability(adapter); 1221 igb_set_interrupt_capability(adapter, msix);
1183 if (err)
1184 return err;
1185 1222
1186 err = igb_alloc_q_vectors(adapter); 1223 err = igb_alloc_q_vectors(adapter);
1187 if (err) { 1224 if (err) {
@@ -1189,24 +1226,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1189 goto err_alloc_q_vectors; 1226 goto err_alloc_q_vectors;
1190 } 1227 }
1191 1228
1192 err = igb_alloc_queues(adapter); 1229 igb_cache_ring_register(adapter);
1193 if (err) {
1194 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1195 goto err_alloc_queues;
1196 }
1197
1198 err = igb_map_ring_to_vector(adapter);
1199 if (err) {
1200 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1201 goto err_map_queues;
1202 }
1203
1204 1230
1205 return 0; 1231 return 0;
1206err_map_queues: 1232
1207 igb_free_queues(adapter);
1208err_alloc_queues:
1209 igb_free_q_vectors(adapter);
1210err_alloc_q_vectors: 1233err_alloc_q_vectors:
1211 igb_reset_interrupt_capability(adapter); 1234 igb_reset_interrupt_capability(adapter);
1212 return err; 1235 return err;
@@ -1229,29 +1252,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
1229 if (!err) 1252 if (!err)
1230 goto request_done; 1253 goto request_done;
1231 /* fall back to MSI */ 1254 /* fall back to MSI */
1232 igb_clear_interrupt_scheme(adapter);
1233 if (!pci_enable_msi(pdev))
1234 adapter->flags |= IGB_FLAG_HAS_MSI;
1235 igb_free_all_tx_resources(adapter); 1255 igb_free_all_tx_resources(adapter);
1236 igb_free_all_rx_resources(adapter); 1256 igb_free_all_rx_resources(adapter);
1237 adapter->num_tx_queues = 1; 1257
1238 adapter->num_rx_queues = 1; 1258 igb_clear_interrupt_scheme(adapter);
1239 adapter->num_q_vectors = 1; 1259 err = igb_init_interrupt_scheme(adapter, false);
1240 err = igb_alloc_q_vectors(adapter); 1260 if (err)
1241 if (err) {
1242 dev_err(&pdev->dev,
1243 "Unable to allocate memory for vectors\n");
1244 goto request_done;
1245 }
1246 err = igb_alloc_queues(adapter);
1247 if (err) {
1248 dev_err(&pdev->dev,
1249 "Unable to allocate memory for queues\n");
1250 igb_free_q_vectors(adapter);
1251 goto request_done; 1261 goto request_done;
1252 } 1262
1253 igb_setup_all_tx_resources(adapter); 1263 igb_setup_all_tx_resources(adapter);
1254 igb_setup_all_rx_resources(adapter); 1264 igb_setup_all_rx_resources(adapter);
1265 igb_configure(adapter);
1255 } 1266 }
1256 1267
1257 igb_assign_vector(adapter->q_vector[0], 0); 1268 igb_assign_vector(adapter->q_vector[0], 0);
@@ -1587,8 +1598,7 @@ void igb_reset(struct igb_adapter *adapter)
1587 struct e1000_hw *hw = &adapter->hw; 1598 struct e1000_hw *hw = &adapter->hw;
1588 struct e1000_mac_info *mac = &hw->mac; 1599 struct e1000_mac_info *mac = &hw->mac;
1589 struct e1000_fc_info *fc = &hw->fc; 1600 struct e1000_fc_info *fc = &hw->fc;
1590 u32 pba = 0, tx_space, min_tx_space, min_rx_space; 1601 u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
1591 u16 hwm;
1592 1602
1593 /* Repartition Pba for greater than 9k mtu 1603 /* Repartition Pba for greater than 9k mtu
1594 * To take effect CTRL.RST is required. 1604 * To take effect CTRL.RST is required.
@@ -1663,7 +1673,7 @@ void igb_reset(struct igb_adapter *adapter)
1663 hwm = min(((pba << 10) * 9 / 10), 1673 hwm = min(((pba << 10) * 9 / 10),
1664 ((pba << 10) - 2 * adapter->max_frame_size)); 1674 ((pba << 10) - 2 * adapter->max_frame_size));
1665 1675
1666 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1676 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
1667 fc->low_water = fc->high_water - 16; 1677 fc->low_water = fc->high_water - 16;
1668 fc->pause_time = 0xFFFF; 1678 fc->pause_time = 0xFFFF;
1669 fc->send_xon = 1; 1679 fc->send_xon = 1;
@@ -1706,10 +1716,8 @@ void igb_reset(struct igb_adapter *adapter)
1706 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1716 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1707 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1717 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1708 1718
1709#ifdef CONFIG_IGB_PTP
1710 /* Re-enable PTP, where applicable. */ 1719 /* Re-enable PTP, where applicable. */
1711 igb_ptp_reset(adapter); 1720 igb_ptp_reset(adapter);
1712#endif /* CONFIG_IGB_PTP */
1713 1721
1714 igb_get_phy_info(hw); 1722 igb_get_phy_info(hw);
1715} 1723}
@@ -1783,58 +1791,34 @@ static const struct net_device_ops igb_netdev_ops = {
1783void igb_set_fw_version(struct igb_adapter *adapter) 1791void igb_set_fw_version(struct igb_adapter *adapter)
1784{ 1792{
1785 struct e1000_hw *hw = &adapter->hw; 1793 struct e1000_hw *hw = &adapter->hw;
1786 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; 1794 struct e1000_fw_version fw;
1787 u16 major, build, patch, fw_version; 1795
1788 u32 etrack_id; 1796 igb_get_fw_version(hw, &fw);
1789 1797
1790 hw->nvm.ops.read(hw, 5, 1, &fw_version); 1798 switch (hw->mac.type) {
1791 if (adapter->hw.mac.type != e1000_i211) { 1799 case e1000_i211:
1792 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1793 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1794 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1795
1796 /* combo image version needs to be found */
1797 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1798 if ((comb_offset != 0x0) &&
1799 (comb_offset != IGB_NVM_VER_INVALID)) {
1800 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1801 + 1), 1, &comb_verh);
1802 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1803 1, &comb_verl);
1804
1805 /* Only display Option Rom if it exists and is valid */
1806 if ((comb_verh && comb_verl) &&
1807 ((comb_verh != IGB_NVM_VER_INVALID) &&
1808 (comb_verl != IGB_NVM_VER_INVALID))) {
1809 major = comb_verl >> IGB_COMB_VER_SHFT;
1810 build = (comb_verl << IGB_COMB_VER_SHFT) |
1811 (comb_verh >> IGB_COMB_VER_SHFT);
1812 patch = comb_verh & IGB_COMB_VER_MASK;
1813 snprintf(adapter->fw_version,
1814 sizeof(adapter->fw_version),
1815 "%d.%d%d, 0x%08x, %d.%d.%d",
1816 (fw_version & IGB_MAJOR_MASK) >>
1817 IGB_MAJOR_SHIFT,
1818 (fw_version & IGB_MINOR_MASK) >>
1819 IGB_MINOR_SHIFT,
1820 (fw_version & IGB_BUILD_MASK),
1821 etrack_id, major, build, patch);
1822 goto out;
1823 }
1824 }
1825 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1826 "%d.%d%d, 0x%08x",
1827 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1828 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1829 (fw_version & IGB_BUILD_MASK), etrack_id);
1830 } else {
1831 snprintf(adapter->fw_version, sizeof(adapter->fw_version), 1800 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1832 "%d.%d%d", 1801 "%2d.%2d-%d",
1833 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT, 1802 fw.invm_major, fw.invm_minor, fw.invm_img_type);
1834 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT, 1803 break;
1835 (fw_version & IGB_BUILD_MASK)); 1804
1805 default:
1806 /* if option is rom valid, display its version too */
1807 if (fw.or_valid) {
1808 snprintf(adapter->fw_version,
1809 sizeof(adapter->fw_version),
1810 "%d.%d, 0x%08x, %d.%d.%d",
1811 fw.eep_major, fw.eep_minor, fw.etrack_id,
1812 fw.or_major, fw.or_build, fw.or_patch);
1813 /* no option rom */
1814 } else {
1815 snprintf(adapter->fw_version,
1816 sizeof(adapter->fw_version),
1817 "%d.%d, 0x%08x",
1818 fw.eep_major, fw.eep_minor, fw.etrack_id);
1819 }
1820 break;
1836 } 1821 }
1837out:
1838 return; 1822 return;
1839} 1823}
1840 1824
@@ -1849,8 +1833,7 @@ out:
1849 * The OS initialization, configuring of the adapter private structure, 1833 * The OS initialization, configuring of the adapter private structure,
1850 * and a hardware reset occur. 1834 * and a hardware reset occur.
1851 **/ 1835 **/
1852static int __devinit igb_probe(struct pci_dev *pdev, 1836static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1853 const struct pci_device_id *ent)
1854{ 1837{
1855 struct net_device *netdev; 1838 struct net_device *netdev;
1856 struct igb_adapter *adapter; 1839 struct igb_adapter *adapter;
@@ -1861,7 +1844,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1861 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1844 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1862 unsigned long mmio_start, mmio_len; 1845 unsigned long mmio_start, mmio_len;
1863 int err, pci_using_dac; 1846 int err, pci_using_dac;
1864 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1865 u8 part_str[E1000_PBANUM_LENGTH]; 1847 u8 part_str[E1000_PBANUM_LENGTH];
1866 1848
1867 /* Catch broken hardware that put the wrong VF device ID in 1849 /* Catch broken hardware that put the wrong VF device ID in
@@ -2069,28 +2051,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2069 2051
2070 igb_validate_mdi_setting(hw); 2052 igb_validate_mdi_setting(hw);
2071 2053
2072 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, 2054 /* By default, support wake on port A */
2073 * enable the ACPI Magic Packet filter
2074 */
2075
2076 if (hw->bus.func == 0) 2055 if (hw->bus.func == 0)
2077 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 2056 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2078 else if (hw->mac.type >= e1000_82580) 2057
2058 /* Check the NVM for wake support on non-port A ports */
2059 if (hw->mac.type >= e1000_82580)
2079 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2060 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2080 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2061 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2081 &eeprom_data); 2062 &eeprom_data);
2082 else if (hw->bus.func == 1) 2063 else if (hw->bus.func == 1)
2083 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 2064 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2084 2065
2085 if (eeprom_data & eeprom_apme_mask) 2066 if (eeprom_data & IGB_EEPROM_APME)
2086 adapter->eeprom_wol |= E1000_WUFC_MAG; 2067 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2087 2068
2088 /* now that we have the eeprom settings, apply the special cases where 2069 /* now that we have the eeprom settings, apply the special cases where
2089 * the eeprom may be wrong or the board simply won't support wake on 2070 * the eeprom may be wrong or the board simply won't support wake on
2090 * lan on a particular port */ 2071 * lan on a particular port */
2091 switch (pdev->device) { 2072 switch (pdev->device) {
2092 case E1000_DEV_ID_82575GB_QUAD_COPPER: 2073 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2093 adapter->eeprom_wol = 0; 2074 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2094 break; 2075 break;
2095 case E1000_DEV_ID_82575EB_FIBER_SERDES: 2076 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2096 case E1000_DEV_ID_82576_FIBER: 2077 case E1000_DEV_ID_82576_FIBER:
@@ -2098,24 +2079,38 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2098 /* Wake events only supported on port A for dual fiber 2079 /* Wake events only supported on port A for dual fiber
2099 * regardless of eeprom setting */ 2080 * regardless of eeprom setting */
2100 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 2081 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2101 adapter->eeprom_wol = 0; 2082 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2102 break; 2083 break;
2103 case E1000_DEV_ID_82576_QUAD_COPPER: 2084 case E1000_DEV_ID_82576_QUAD_COPPER:
2104 case E1000_DEV_ID_82576_QUAD_COPPER_ET2: 2085 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
2105 /* if quad port adapter, disable WoL on all but port A */ 2086 /* if quad port adapter, disable WoL on all but port A */
2106 if (global_quad_port_a != 0) 2087 if (global_quad_port_a != 0)
2107 adapter->eeprom_wol = 0; 2088 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2108 else 2089 else
2109 adapter->flags |= IGB_FLAG_QUAD_PORT_A; 2090 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2110 /* Reset for multiple quad port adapters */ 2091 /* Reset for multiple quad port adapters */
2111 if (++global_quad_port_a == 4) 2092 if (++global_quad_port_a == 4)
2112 global_quad_port_a = 0; 2093 global_quad_port_a = 0;
2113 break; 2094 break;
2095 default:
2096 /* If the device can't wake, don't set software support */
2097 if (!device_can_wakeup(&adapter->pdev->dev))
2098 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2114 } 2099 }
2115 2100
2116 /* initialize the wol settings based on the eeprom settings */ 2101 /* initialize the wol settings based on the eeprom settings */
2117 adapter->wol = adapter->eeprom_wol; 2102 if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
2118 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 2103 adapter->wol |= E1000_WUFC_MAG;
2104
2105 /* Some vendors want WoL disabled by default, but still supported */
2106 if ((hw->mac.type == e1000_i350) &&
2107 (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
2108 adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
2109 adapter->wol = 0;
2110 }
2111
2112 device_set_wakeup_enable(&adapter->pdev->dev,
2113 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
2119 2114
2120 /* reset the hardware with the new settings */ 2115 /* reset the hardware with the new settings */
2121 igb_reset(adapter); 2116 igb_reset(adapter);
@@ -2141,10 +2136,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2141 2136
2142#endif 2137#endif
2143 2138
2144#ifdef CONFIG_IGB_PTP
2145 /* do hw tstamp init after resetting */ 2139 /* do hw tstamp init after resetting */
2146 igb_ptp_init(adapter); 2140 igb_ptp_init(adapter);
2147#endif /* CONFIG_IGB_PTP */
2148 2141
2149 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2142 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2150 /* print bus type/speed/width info */ 2143 /* print bus type/speed/width info */
@@ -2212,16 +2205,14 @@ err_dma:
2212 * Hot-Plug event, or because the driver is going to be removed from 2205 * Hot-Plug event, or because the driver is going to be removed from
2213 * memory. 2206 * memory.
2214 **/ 2207 **/
2215static void __devexit igb_remove(struct pci_dev *pdev) 2208static void igb_remove(struct pci_dev *pdev)
2216{ 2209{
2217 struct net_device *netdev = pci_get_drvdata(pdev); 2210 struct net_device *netdev = pci_get_drvdata(pdev);
2218 struct igb_adapter *adapter = netdev_priv(netdev); 2211 struct igb_adapter *adapter = netdev_priv(netdev);
2219 struct e1000_hw *hw = &adapter->hw; 2212 struct e1000_hw *hw = &adapter->hw;
2220 2213
2221 pm_runtime_get_noresume(&pdev->dev); 2214 pm_runtime_get_noresume(&pdev->dev);
2222#ifdef CONFIG_IGB_PTP
2223 igb_ptp_stop(adapter); 2215 igb_ptp_stop(adapter);
2224#endif /* CONFIG_IGB_PTP */
2225 2216
2226 /* 2217 /*
2227 * The watchdog timer may be rescheduled, so explicitly 2218 * The watchdog timer may be rescheduled, so explicitly
@@ -2294,7 +2285,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2294 * mor expensive time wise to disable SR-IOV than it is to allocate and free 2285 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2295 * the memory for the VFs. 2286 * the memory for the VFs.
2296 **/ 2287 **/
2297static void __devinit igb_probe_vfs(struct igb_adapter * adapter) 2288static void igb_probe_vfs(struct igb_adapter *adapter)
2298{ 2289{
2299#ifdef CONFIG_PCI_IOV 2290#ifdef CONFIG_PCI_IOV
2300 struct pci_dev *pdev = adapter->pdev; 2291 struct pci_dev *pdev = adapter->pdev;
@@ -2355,7 +2346,7 @@ out:
2355 * Fields are initialized based on PCI device information and 2346 * Fields are initialized based on PCI device information and
2356 * OS network device settings (MTU size). 2347 * OS network device settings (MTU size).
2357 **/ 2348 **/
2358static int __devinit igb_sw_init(struct igb_adapter *adapter) 2349static int igb_sw_init(struct igb_adapter *adapter)
2359{ 2350{
2360 struct e1000_hw *hw = &adapter->hw; 2351 struct e1000_hw *hw = &adapter->hw;
2361 struct net_device *netdev = adapter->netdev; 2352 struct net_device *netdev = adapter->netdev;
@@ -2461,7 +2452,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2461 GFP_ATOMIC); 2452 GFP_ATOMIC);
2462 2453
2463 /* This call may decrease the number of queues */ 2454 /* This call may decrease the number of queues */
2464 if (igb_init_interrupt_scheme(adapter)) { 2455 if (igb_init_interrupt_scheme(adapter, true)) {
2465 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 2456 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2466 return -ENOMEM; 2457 return -ENOMEM;
2467 } 2458 }
@@ -2531,6 +2522,17 @@ static int __igb_open(struct net_device *netdev, bool resuming)
2531 if (err) 2522 if (err)
2532 goto err_req_irq; 2523 goto err_req_irq;
2533 2524
2525 /* Notify the stack of the actual queue counts. */
2526 err = netif_set_real_num_tx_queues(adapter->netdev,
2527 adapter->num_tx_queues);
2528 if (err)
2529 goto err_set_queues;
2530
2531 err = netif_set_real_num_rx_queues(adapter->netdev,
2532 adapter->num_rx_queues);
2533 if (err)
2534 goto err_set_queues;
2535
2534 /* From here on the code is the same as igb_up() */ 2536 /* From here on the code is the same as igb_up() */
2535 clear_bit(__IGB_DOWN, &adapter->state); 2537 clear_bit(__IGB_DOWN, &adapter->state);
2536 2538
@@ -2560,6 +2562,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
2560 2562
2561 return 0; 2563 return 0;
2562 2564
2565err_set_queues:
2566 igb_free_irq(adapter);
2563err_req_irq: 2567err_req_irq:
2564 igb_release_hw_control(adapter); 2568 igb_release_hw_control(adapter);
2565 igb_power_down_link(adapter); 2569 igb_power_down_link(adapter);
@@ -2637,10 +2641,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2637 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2641 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2638 tx_ring->size = ALIGN(tx_ring->size, 4096); 2642 tx_ring->size = ALIGN(tx_ring->size, 4096);
2639 2643
2640 tx_ring->desc = dma_alloc_coherent(dev, 2644 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2641 tx_ring->size, 2645 &tx_ring->dma, GFP_KERNEL);
2642 &tx_ring->dma,
2643 GFP_KERNEL);
2644 if (!tx_ring->desc) 2646 if (!tx_ring->desc)
2645 goto err; 2647 goto err;
2646 2648
@@ -2777,18 +2779,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2777 if (!rx_ring->rx_buffer_info) 2779 if (!rx_ring->rx_buffer_info)
2778 goto err; 2780 goto err;
2779 2781
2780
2781 /* Round up to nearest 4K */ 2782 /* Round up to nearest 4K */
2782 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 2783 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
2783 rx_ring->size = ALIGN(rx_ring->size, 4096); 2784 rx_ring->size = ALIGN(rx_ring->size, 4096);
2784 2785
2785 rx_ring->desc = dma_alloc_coherent(dev, 2786 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
2786 rx_ring->size, 2787 &rx_ring->dma, GFP_KERNEL);
2787 &rx_ring->dma,
2788 GFP_KERNEL);
2789 if (!rx_ring->desc) 2788 if (!rx_ring->desc)
2790 goto err; 2789 goto err;
2791 2790
2791 rx_ring->next_to_alloc = 0;
2792 rx_ring->next_to_clean = 0; 2792 rx_ring->next_to_clean = 0;
2793 rx_ring->next_to_use = 0; 2793 rx_ring->next_to_use = 0;
2794 2794
@@ -2893,18 +2893,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2893 2893
2894 /* Don't need to set TUOFL or IPOFL, they default to 1 */ 2894 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2895 wr32(E1000_RXCSUM, rxcsum); 2895 wr32(E1000_RXCSUM, rxcsum);
2896 /*
2897 * Generate RSS hash based on TCP port numbers and/or
2898 * IPv4/v6 src and dst addresses since UDP cannot be
2899 * hashed reliably due to IP fragmentation
2900 */
2901 2896
2897 /* Generate RSS hash based on packet types, TCP/UDP
2898 * port numbers and/or IPv4/v6 src and dst addresses
2899 */
2902 mrqc = E1000_MRQC_RSS_FIELD_IPV4 | 2900 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
2903 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2901 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2904 E1000_MRQC_RSS_FIELD_IPV6 | 2902 E1000_MRQC_RSS_FIELD_IPV6 |
2905 E1000_MRQC_RSS_FIELD_IPV6_TCP | 2903 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2906 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; 2904 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
2907 2905
2906 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
2907 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
2908 if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
2909 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
2910
2908 /* If VMDq is enabled then we set the appropriate mode for that, else 2911 /* If VMDq is enabled then we set the appropriate mode for that, else
2909 * we default to RSS so that an RSS hash is calculated per packet even 2912 * we default to RSS so that an RSS hash is calculated per packet even
2910 * if we are only using one queue */ 2913 * if we are only using one queue */
@@ -3106,16 +3109,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3106 3109
3107 /* set descriptor configuration */ 3110 /* set descriptor configuration */
3108 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 3111 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
3109#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 3112 srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3110 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 3113 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
3111#else
3112 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
3113#endif
3114 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3115#ifdef CONFIG_IGB_PTP
3116 if (hw->mac.type >= e1000_82580) 3114 if (hw->mac.type >= e1000_82580)
3117 srrctl |= E1000_SRRCTL_TIMESTAMP; 3115 srrctl |= E1000_SRRCTL_TIMESTAMP;
3118#endif /* CONFIG_IGB_PTP */
3119 /* Only set Drop Enable if we are supporting multiple queues */ 3116 /* Only set Drop Enable if we are supporting multiple queues */
3120 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 3117 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3121 srrctl |= E1000_SRRCTL_DROP_EN; 3118 srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3305,36 +3302,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3305 unsigned long size; 3302 unsigned long size;
3306 u16 i; 3303 u16 i;
3307 3304
3305 if (rx_ring->skb)
3306 dev_kfree_skb(rx_ring->skb);
3307 rx_ring->skb = NULL;
3308
3308 if (!rx_ring->rx_buffer_info) 3309 if (!rx_ring->rx_buffer_info)
3309 return; 3310 return;
3310 3311
3311 /* Free all the Rx ring sk_buffs */ 3312 /* Free all the Rx ring sk_buffs */
3312 for (i = 0; i < rx_ring->count; i++) { 3313 for (i = 0; i < rx_ring->count; i++) {
3313 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 3314 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
3314 if (buffer_info->dma) {
3315 dma_unmap_single(rx_ring->dev,
3316 buffer_info->dma,
3317 IGB_RX_HDR_LEN,
3318 DMA_FROM_DEVICE);
3319 buffer_info->dma = 0;
3320 }
3321 3315
3322 if (buffer_info->skb) { 3316 if (!buffer_info->page)
3323 dev_kfree_skb(buffer_info->skb); 3317 continue;
3324 buffer_info->skb = NULL; 3318
3325 } 3319 dma_unmap_page(rx_ring->dev,
3326 if (buffer_info->page_dma) { 3320 buffer_info->dma,
3327 dma_unmap_page(rx_ring->dev, 3321 PAGE_SIZE,
3328 buffer_info->page_dma, 3322 DMA_FROM_DEVICE);
3329 PAGE_SIZE / 2, 3323 __free_page(buffer_info->page);
3330 DMA_FROM_DEVICE); 3324
3331 buffer_info->page_dma = 0; 3325 buffer_info->page = NULL;
3332 }
3333 if (buffer_info->page) {
3334 put_page(buffer_info->page);
3335 buffer_info->page = NULL;
3336 buffer_info->page_offset = 0;
3337 }
3338 } 3326 }
3339 3327
3340 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 3328 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -3343,6 +3331,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3343 /* Zero out the descriptor ring */ 3331 /* Zero out the descriptor ring */
3344 memset(rx_ring->desc, 0, rx_ring->size); 3332 memset(rx_ring->desc, 0, rx_ring->size);
3345 3333
3334 rx_ring->next_to_alloc = 0;
3346 rx_ring->next_to_clean = 0; 3335 rx_ring->next_to_clean = 0;
3347 rx_ring->next_to_use = 0; 3336 rx_ring->next_to_use = 0;
3348} 3337}
@@ -4028,6 +4017,9 @@ static int igb_tso(struct igb_ring *tx_ring,
4028 u32 vlan_macip_lens, type_tucmd; 4017 u32 vlan_macip_lens, type_tucmd;
4029 u32 mss_l4len_idx, l4len; 4018 u32 mss_l4len_idx, l4len;
4030 4019
4020 if (skb->ip_summed != CHECKSUM_PARTIAL)
4021 return 0;
4022
4031 if (!skb_is_gso(skb)) 4023 if (!skb_is_gso(skb))
4032 return 0; 4024 return 0;
4033 4025
@@ -4148,26 +4140,32 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4148 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); 4140 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4149} 4141}
4150 4142
4151static __le32 igb_tx_cmd_type(u32 tx_flags) 4143#define IGB_SET_FLAG(_input, _flag, _result) \
4144 ((_flag <= _result) ? \
4145 ((u32)(_input & _flag) * (_result / _flag)) : \
4146 ((u32)(_input & _flag) / (_flag / _result)))
4147
4148static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
4152{ 4149{
4153 /* set type for advanced descriptor with frame checksum insertion */ 4150 /* set type for advanced descriptor with frame checksum insertion */
4154 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA | 4151 u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
4155 E1000_ADVTXD_DCMD_IFCS | 4152 E1000_ADVTXD_DCMD_DEXT |
4156 E1000_ADVTXD_DCMD_DEXT); 4153 E1000_ADVTXD_DCMD_IFCS;
4157 4154
4158 /* set HW vlan bit if vlan is present */ 4155 /* set HW vlan bit if vlan is present */
4159 if (tx_flags & IGB_TX_FLAGS_VLAN) 4156 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
4160 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE); 4157 (E1000_ADVTXD_DCMD_VLE));
4158
4159 /* set segmentation bits for TSO */
4160 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
4161 (E1000_ADVTXD_DCMD_TSE));
4161 4162
4162#ifdef CONFIG_IGB_PTP
4163 /* set timestamp bit if present */ 4163 /* set timestamp bit if present */
4164 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) 4164 cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
4165 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP); 4165 (E1000_ADVTXD_MAC_TSTAMP));
4166#endif /* CONFIG_IGB_PTP */
4167 4166
4168 /* set segmentation bits for TSO */ 4167 /* insert frame checksum */
4169 if (tx_flags & IGB_TX_FLAGS_TSO) 4168 cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
4170 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4171 4169
4172 return cmd_type; 4170 return cmd_type;
4173} 4171}
@@ -4178,19 +4176,19 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4178{ 4176{
4179 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; 4177 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4180 4178
4181 /* 82575 requires a unique index per ring if any offload is enabled */ 4179 /* 82575 requires a unique index per ring */
4182 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) && 4180 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
4183 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
4184 olinfo_status |= tx_ring->reg_idx << 4; 4181 olinfo_status |= tx_ring->reg_idx << 4;
4185 4182
4186 /* insert L4 checksum */ 4183 /* insert L4 checksum */
4187 if (tx_flags & IGB_TX_FLAGS_CSUM) { 4184 olinfo_status |= IGB_SET_FLAG(tx_flags,
4188 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 4185 IGB_TX_FLAGS_CSUM,
4186 (E1000_TXD_POPTS_TXSM << 8));
4189 4187
4190 /* insert IPv4 checksum */ 4188 /* insert IPv4 checksum */
4191 if (tx_flags & IGB_TX_FLAGS_IPV4) 4189 olinfo_status |= IGB_SET_FLAG(tx_flags,
4192 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 4190 IGB_TX_FLAGS_IPV4,
4193 } 4191 (E1000_TXD_POPTS_IXSM << 8));
4194 4192
4195 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 4193 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4196} 4194}
@@ -4209,33 +4207,37 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4209 struct sk_buff *skb = first->skb; 4207 struct sk_buff *skb = first->skb;
4210 struct igb_tx_buffer *tx_buffer; 4208 struct igb_tx_buffer *tx_buffer;
4211 union e1000_adv_tx_desc *tx_desc; 4209 union e1000_adv_tx_desc *tx_desc;
4210 struct skb_frag_struct *frag;
4212 dma_addr_t dma; 4211 dma_addr_t dma;
4213 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 4212 unsigned int data_len, size;
4214 unsigned int data_len = skb->data_len;
4215 unsigned int size = skb_headlen(skb);
4216 unsigned int paylen = skb->len - hdr_len;
4217 __le32 cmd_type;
4218 u32 tx_flags = first->tx_flags; 4213 u32 tx_flags = first->tx_flags;
4214 u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
4219 u16 i = tx_ring->next_to_use; 4215 u16 i = tx_ring->next_to_use;
4220 4216
4221 tx_desc = IGB_TX_DESC(tx_ring, i); 4217 tx_desc = IGB_TX_DESC(tx_ring, i);
4222 4218
4223 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen); 4219 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
4224 cmd_type = igb_tx_cmd_type(tx_flags); 4220
4221 size = skb_headlen(skb);
4222 data_len = skb->data_len;
4225 4223
4226 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 4224 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4227 if (dma_mapping_error(tx_ring->dev, dma))
4228 goto dma_error;
4229 4225
4230 /* record length, and DMA address */ 4226 tx_buffer = first;
4231 dma_unmap_len_set(first, len, size); 4227
4232 dma_unmap_addr_set(first, dma, dma); 4228 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
4233 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4229 if (dma_mapping_error(tx_ring->dev, dma))
4230 goto dma_error;
4231
4232 /* record length, and DMA address */
4233 dma_unmap_len_set(tx_buffer, len, size);
4234 dma_unmap_addr_set(tx_buffer, dma, dma);
4235
4236 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4234 4237
4235 for (;;) {
4236 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) { 4238 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4237 tx_desc->read.cmd_type_len = 4239 tx_desc->read.cmd_type_len =
4238 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD); 4240 cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
4239 4241
4240 i++; 4242 i++;
4241 tx_desc++; 4243 tx_desc++;
@@ -4243,18 +4245,18 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4243 tx_desc = IGB_TX_DESC(tx_ring, 0); 4245 tx_desc = IGB_TX_DESC(tx_ring, 0);
4244 i = 0; 4246 i = 0;
4245 } 4247 }
4248 tx_desc->read.olinfo_status = 0;
4246 4249
4247 dma += IGB_MAX_DATA_PER_TXD; 4250 dma += IGB_MAX_DATA_PER_TXD;
4248 size -= IGB_MAX_DATA_PER_TXD; 4251 size -= IGB_MAX_DATA_PER_TXD;
4249 4252
4250 tx_desc->read.olinfo_status = 0;
4251 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4253 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4252 } 4254 }
4253 4255
4254 if (likely(!data_len)) 4256 if (likely(!data_len))
4255 break; 4257 break;
4256 4258
4257 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); 4259 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
4258 4260
4259 i++; 4261 i++;
4260 tx_desc++; 4262 tx_desc++;
@@ -4262,32 +4264,22 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4262 tx_desc = IGB_TX_DESC(tx_ring, 0); 4264 tx_desc = IGB_TX_DESC(tx_ring, 0);
4263 i = 0; 4265 i = 0;
4264 } 4266 }
4267 tx_desc->read.olinfo_status = 0;
4265 4268
4266 size = skb_frag_size(frag); 4269 size = skb_frag_size(frag);
4267 data_len -= size; 4270 data_len -= size;
4268 4271
4269 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, 4272 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4270 size, DMA_TO_DEVICE); 4273 size, DMA_TO_DEVICE);
4271 if (dma_mapping_error(tx_ring->dev, dma))
4272 goto dma_error;
4273 4274
4274 tx_buffer = &tx_ring->tx_buffer_info[i]; 4275 tx_buffer = &tx_ring->tx_buffer_info[i];
4275 dma_unmap_len_set(tx_buffer, len, size);
4276 dma_unmap_addr_set(tx_buffer, dma, dma);
4277
4278 tx_desc->read.olinfo_status = 0;
4279 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4280
4281 frag++;
4282 } 4276 }
4283 4277
4284 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4285
4286 /* write last descriptor with RS and EOP bits */ 4278 /* write last descriptor with RS and EOP bits */
4287 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); 4279 cmd_type |= size | IGB_TXD_DCMD;
4288 if (unlikely(skb->no_fcs)) 4280 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
4289 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS)); 4281
4290 tx_desc->read.cmd_type_len = cmd_type; 4282 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4291 4283
4292 /* set the timestamp */ 4284 /* set the timestamp */
4293 first->time_stamp = jiffies; 4285 first->time_stamp = jiffies;
@@ -4372,9 +4364,7 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4372netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, 4364netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4373 struct igb_ring *tx_ring) 4365 struct igb_ring *tx_ring)
4374{ 4366{
4375#ifdef CONFIG_IGB_PTP
4376 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); 4367 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4377#endif /* CONFIG_IGB_PTP */
4378 struct igb_tx_buffer *first; 4368 struct igb_tx_buffer *first;
4379 int tso; 4369 int tso;
4380 u32 tx_flags = 0; 4370 u32 tx_flags = 0;
@@ -4397,7 +4387,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4397 first->bytecount = skb->len; 4387 first->bytecount = skb->len;
4398 first->gso_segs = 1; 4388 first->gso_segs = 1;
4399 4389
4400#ifdef CONFIG_IGB_PTP
4401 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 4390 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4402 !(adapter->ptp_tx_skb))) { 4391 !(adapter->ptp_tx_skb))) {
4403 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4392 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -4407,7 +4396,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4407 if (adapter->hw.mac.type == e1000_82576) 4396 if (adapter->hw.mac.type == e1000_82576)
4408 schedule_work(&adapter->ptp_tx_work); 4397 schedule_work(&adapter->ptp_tx_work);
4409 } 4398 }
4410#endif /* CONFIG_IGB_PTP */
4411 4399
4412 if (vlan_tx_tag_present(skb)) { 4400 if (vlan_tx_tag_present(skb)) {
4413 tx_flags |= IGB_TX_FLAGS_VLAN; 4401 tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4467,10 +4455,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4467 * The minimum packet size with TCTL.PSP set is 17 so pad the skb 4455 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4468 * in order to meet this minimum size requirement. 4456 * in order to meet this minimum size requirement.
4469 */ 4457 */
4470 if (skb->len < 17) { 4458 if (unlikely(skb->len < 17)) {
4471 if (skb_padto(skb, 17)) 4459 if (skb_pad(skb, 17 - skb->len))
4472 return NETDEV_TX_OK; 4460 return NETDEV_TX_OK;
4473 skb->len = 17; 4461 skb->len = 17;
4462 skb_set_tail_pointer(skb, 17);
4474 } 4463 }
4475 4464
4476 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); 4465 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
@@ -4800,7 +4789,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4800 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4789 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4801 } 4790 }
4802 4791
4803#ifdef CONFIG_IGB_PTP
4804 if (icr & E1000_ICR_TS) { 4792 if (icr & E1000_ICR_TS) {
4805 u32 tsicr = rd32(E1000_TSICR); 4793 u32 tsicr = rd32(E1000_TSICR);
4806 4794
@@ -4811,7 +4799,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4811 schedule_work(&adapter->ptp_tx_work); 4799 schedule_work(&adapter->ptp_tx_work);
4812 } 4800 }
4813 } 4801 }
4814#endif /* CONFIG_IGB_PTP */
4815 4802
4816 wr32(E1000_EIMS, adapter->eims_other); 4803 wr32(E1000_EIMS, adapter->eims_other);
4817 4804
@@ -4851,45 +4838,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data)
4851} 4838}
4852 4839
4853#ifdef CONFIG_IGB_DCA 4840#ifdef CONFIG_IGB_DCA
4841static void igb_update_tx_dca(struct igb_adapter *adapter,
4842 struct igb_ring *tx_ring,
4843 int cpu)
4844{
4845 struct e1000_hw *hw = &adapter->hw;
4846 u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
4847
4848 if (hw->mac.type != e1000_82575)
4849 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
4850
4851 /*
4852 * We can enable relaxed ordering for reads, but not writes when
4853 * DCA is enabled. This is due to a known issue in some chipsets
4854 * which will cause the DCA tag to be cleared.
4855 */
4856 txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
4857 E1000_DCA_TXCTRL_DATA_RRO_EN |
4858 E1000_DCA_TXCTRL_DESC_DCA_EN;
4859
4860 wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
4861}
4862
4863static void igb_update_rx_dca(struct igb_adapter *adapter,
4864 struct igb_ring *rx_ring,
4865 int cpu)
4866{
4867 struct e1000_hw *hw = &adapter->hw;
4868 u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
4869
4870 if (hw->mac.type != e1000_82575)
4871 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
4872
4873 /*
4874 * We can enable relaxed ordering for reads, but not writes when
4875 * DCA is enabled. This is due to a known issue in some chipsets
4876 * which will cause the DCA tag to be cleared.
4877 */
4878 rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
4879 E1000_DCA_RXCTRL_DESC_DCA_EN;
4880
4881 wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
4882}
4883
4854static void igb_update_dca(struct igb_q_vector *q_vector) 4884static void igb_update_dca(struct igb_q_vector *q_vector)
4855{ 4885{
4856 struct igb_adapter *adapter = q_vector->adapter; 4886 struct igb_adapter *adapter = q_vector->adapter;
4857 struct e1000_hw *hw = &adapter->hw;
4858 int cpu = get_cpu(); 4887 int cpu = get_cpu();
4859 4888
4860 if (q_vector->cpu == cpu) 4889 if (q_vector->cpu == cpu)
4861 goto out_no_update; 4890 goto out_no_update;
4862 4891
4863 if (q_vector->tx.ring) { 4892 if (q_vector->tx.ring)
4864 int q = q_vector->tx.ring->reg_idx; 4893 igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
4865 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); 4894
4866 if (hw->mac.type == e1000_82575) { 4895 if (q_vector->rx.ring)
4867 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; 4896 igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
4868 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 4897
4869 } else {
4870 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4871 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4872 E1000_DCA_TXCTRL_CPUID_SHIFT;
4873 }
4874 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4875 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4876 }
4877 if (q_vector->rx.ring) {
4878 int q = q_vector->rx.ring->reg_idx;
4879 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4880 if (hw->mac.type == e1000_82575) {
4881 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
4882 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4883 } else {
4884 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4885 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4886 E1000_DCA_RXCTRL_CPUID_SHIFT;
4887 }
4888 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4889 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4890 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4891 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
4892 }
4893 q_vector->cpu = cpu; 4898 q_vector->cpu = cpu;
4894out_no_update: 4899out_no_update:
4895 put_cpu(); 4900 put_cpu();
@@ -5545,7 +5550,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5545 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5550 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5546 } 5551 }
5547 5552
5548#ifdef CONFIG_IGB_PTP
5549 if (icr & E1000_ICR_TS) { 5553 if (icr & E1000_ICR_TS) {
5550 u32 tsicr = rd32(E1000_TSICR); 5554 u32 tsicr = rd32(E1000_TSICR);
5551 5555
@@ -5556,7 +5560,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5556 schedule_work(&adapter->ptp_tx_work); 5560 schedule_work(&adapter->ptp_tx_work);
5557 } 5561 }
5558 } 5562 }
5559#endif /* CONFIG_IGB_PTP */
5560 5563
5561 napi_schedule(&q_vector->napi); 5564 napi_schedule(&q_vector->napi);
5562 5565
@@ -5599,7 +5602,6 @@ static irqreturn_t igb_intr(int irq, void *data)
5599 mod_timer(&adapter->watchdog_timer, jiffies + 1); 5602 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5600 } 5603 }
5601 5604
5602#ifdef CONFIG_IGB_PTP
5603 if (icr & E1000_ICR_TS) { 5605 if (icr & E1000_ICR_TS) {
5604 u32 tsicr = rd32(E1000_TSICR); 5606 u32 tsicr = rd32(E1000_TSICR);
5605 5607
@@ -5610,7 +5612,6 @@ static irqreturn_t igb_intr(int irq, void *data)
5610 schedule_work(&adapter->ptp_tx_work); 5612 schedule_work(&adapter->ptp_tx_work);
5611 } 5613 }
5612 } 5614 }
5613#endif /* CONFIG_IGB_PTP */
5614 5615
5615 napi_schedule(&q_vector->napi); 5616 napi_schedule(&q_vector->napi);
5616 5617
@@ -5840,6 +5841,181 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5840 return !!budget; 5841 return !!budget;
5841} 5842}
5842 5843
5844/**
5845 * igb_reuse_rx_page - page flip buffer and store it back on the ring
5846 * @rx_ring: rx descriptor ring to store buffers on
5847 * @old_buff: donor buffer to have page reused
5848 *
5849 * Synchronizes page for reuse by the adapter
5850 **/
5851static void igb_reuse_rx_page(struct igb_ring *rx_ring,
5852 struct igb_rx_buffer *old_buff)
5853{
5854 struct igb_rx_buffer *new_buff;
5855 u16 nta = rx_ring->next_to_alloc;
5856
5857 new_buff = &rx_ring->rx_buffer_info[nta];
5858
5859 /* update, and store next to alloc */
5860 nta++;
5861 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
5862
5863 /* transfer page from old buffer to new buffer */
5864 memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
5865
5866 /* sync the buffer for use by the device */
5867 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
5868 old_buff->page_offset,
5869 IGB_RX_BUFSZ,
5870 DMA_FROM_DEVICE);
5871}
5872
5873/**
5874 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
5875 * @rx_ring: rx descriptor ring to transact packets on
5876 * @rx_buffer: buffer containing page to add
5877 * @rx_desc: descriptor containing length of buffer written by hardware
5878 * @skb: sk_buff to place the data into
5879 *
5880 * This function will add the data contained in rx_buffer->page to the skb.
5881 * This is done either through a direct copy if the data in the buffer is
5882 * less than the skb header size, otherwise it will just attach the page as
5883 * a frag to the skb.
5884 *
5885 * The function will then update the page offset if necessary and return
5886 * true if the buffer can be reused by the adapter.
5887 **/
5888static bool igb_add_rx_frag(struct igb_ring *rx_ring,
5889 struct igb_rx_buffer *rx_buffer,
5890 union e1000_adv_rx_desc *rx_desc,
5891 struct sk_buff *skb)
5892{
5893 struct page *page = rx_buffer->page;
5894 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
5895
5896 if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
5897 unsigned char *va = page_address(page) + rx_buffer->page_offset;
5898
5899 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
5900 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
5901 va += IGB_TS_HDR_LEN;
5902 size -= IGB_TS_HDR_LEN;
5903 }
5904
5905 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
5906
5907 /* we can reuse buffer as-is, just make sure it is local */
5908 if (likely(page_to_nid(page) == numa_node_id()))
5909 return true;
5910
5911 /* this page cannot be reused so discard it */
5912 put_page(page);
5913 return false;
5914 }
5915
5916 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
5917 rx_buffer->page_offset, size, IGB_RX_BUFSZ);
5918
5919 /* avoid re-using remote pages */
5920 if (unlikely(page_to_nid(page) != numa_node_id()))
5921 return false;
5922
5923#if (PAGE_SIZE < 8192)
5924 /* if we are only owner of page we can reuse it */
5925 if (unlikely(page_count(page) != 1))
5926 return false;
5927
5928 /* flip page offset to other buffer */
5929 rx_buffer->page_offset ^= IGB_RX_BUFSZ;
5930
5931 /*
5932 * since we are the only owner of the page and we need to
5933 * increment it, just set the value to 2 in order to avoid
5934 * an unnecessary locked operation
5935 */
5936 atomic_set(&page->_count, 2);
5937#else
5938 /* move offset up to the next cache line */
5939 rx_buffer->page_offset += SKB_DATA_ALIGN(size);
5940
5941 if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
5942 return false;
5943
5944 /* bump ref count on page before it is given to the stack */
5945 get_page(page);
5946#endif
5947
5948 return true;
5949}
5950
5951static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
5952 union e1000_adv_rx_desc *rx_desc,
5953 struct sk_buff *skb)
5954{
5955 struct igb_rx_buffer *rx_buffer;
5956 struct page *page;
5957
5958 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
5959
5960 /*
5961 * This memory barrier is needed to keep us from reading
5962 * any other fields out of the rx_desc until we know the
5963 * RXD_STAT_DD bit is set
5964 */
5965 rmb();
5966
5967 page = rx_buffer->page;
5968 prefetchw(page);
5969
5970 if (likely(!skb)) {
5971 void *page_addr = page_address(page) +
5972 rx_buffer->page_offset;
5973
5974 /* prefetch first cache line of first page */
5975 prefetch(page_addr);
5976#if L1_CACHE_BYTES < 128
5977 prefetch(page_addr + L1_CACHE_BYTES);
5978#endif
5979
5980 /* allocate a skb to store the frags */
5981 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
5982 IGB_RX_HDR_LEN);
5983 if (unlikely(!skb)) {
5984 rx_ring->rx_stats.alloc_failed++;
5985 return NULL;
5986 }
5987
5988 /*
5989 * we will be copying header into skb->data in
5990 * pskb_may_pull so it is in our interest to prefetch
5991 * it now to avoid a possible cache miss
5992 */
5993 prefetchw(skb->data);
5994 }
5995
5996 /* we are reusing so sync this buffer for CPU use */
5997 dma_sync_single_range_for_cpu(rx_ring->dev,
5998 rx_buffer->dma,
5999 rx_buffer->page_offset,
6000 IGB_RX_BUFSZ,
6001 DMA_FROM_DEVICE);
6002
6003 /* pull page into skb */
6004 if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
6005 /* hand second half of page back to the ring */
6006 igb_reuse_rx_page(rx_ring, rx_buffer);
6007 } else {
6008 /* we are not reusing the buffer so unmap it */
6009 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
6010 PAGE_SIZE, DMA_FROM_DEVICE);
6011 }
6012
6013 /* clear contents of rx_buffer */
6014 rx_buffer->page = NULL;
6015
6016 return skb;
6017}
6018
5843static inline void igb_rx_checksum(struct igb_ring *ring, 6019static inline void igb_rx_checksum(struct igb_ring *ring,
5844 union e1000_adv_rx_desc *rx_desc, 6020 union e1000_adv_rx_desc *rx_desc,
5845 struct sk_buff *skb) 6021 struct sk_buff *skb)
@@ -5889,224 +6065,389 @@ static inline void igb_rx_hash(struct igb_ring *ring,
5889 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 6065 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5890} 6066}
5891 6067
5892static void igb_rx_vlan(struct igb_ring *ring, 6068/**
5893 union e1000_adv_rx_desc *rx_desc, 6069 * igb_is_non_eop - process handling of non-EOP buffers
5894 struct sk_buff *skb) 6070 * @rx_ring: Rx ring being processed
6071 * @rx_desc: Rx descriptor for current buffer
6072 * @skb: current socket buffer containing buffer in progress
6073 *
6074 * This function updates next to clean. If the buffer is an EOP buffer
6075 * this function exits returning false, otherwise it will place the
6076 * sk_buff in the next buffer to be chained and return true indicating
6077 * that this is in fact a non-EOP buffer.
6078 **/
6079static bool igb_is_non_eop(struct igb_ring *rx_ring,
6080 union e1000_adv_rx_desc *rx_desc)
5895{ 6081{
5896 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) { 6082 u32 ntc = rx_ring->next_to_clean + 1;
5897 u16 vid;
5898 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5899 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5900 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5901 else
5902 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5903 6083
5904 __vlan_hwaccel_put_tag(skb, vid); 6084 /* fetch, update, and store next to clean */
5905 } 6085 ntc = (ntc < rx_ring->count) ? ntc : 0;
6086 rx_ring->next_to_clean = ntc;
6087
6088 prefetch(IGB_RX_DESC(rx_ring, ntc));
6089
6090 if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
6091 return false;
6092
6093 return true;
5906} 6094}
5907 6095
5908static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc) 6096/**
5909{ 6097 * igb_get_headlen - determine size of header for LRO/GRO
5910 /* HW will not DMA in data larger than the given buffer, even if it 6098 * @data: pointer to the start of the headers
5911 * parses the (NFS, of course) header to be larger. In that case, it 6099 * @max_len: total length of section to find headers in
5912 * fills the header buffer and spills the rest into the page. 6100 *
6101 * This function is meant to determine the length of headers that will
6102 * be recognized by hardware for LRO, and GRO offloads. The main
6103 * motivation of doing this is to only perform one pull for IPv4 TCP
6104 * packets so that we can do basic things like calculating the gso_size
6105 * based on the average data per packet.
6106 **/
6107static unsigned int igb_get_headlen(unsigned char *data,
6108 unsigned int max_len)
6109{
6110 union {
6111 unsigned char *network;
6112 /* l2 headers */
6113 struct ethhdr *eth;
6114 struct vlan_hdr *vlan;
6115 /* l3 headers */
6116 struct iphdr *ipv4;
6117 struct ipv6hdr *ipv6;
6118 } hdr;
6119 __be16 protocol;
6120 u8 nexthdr = 0; /* default to not TCP */
6121 u8 hlen;
6122
6123 /* this should never happen, but better safe than sorry */
6124 if (max_len < ETH_HLEN)
6125 return max_len;
6126
6127 /* initialize network frame pointer */
6128 hdr.network = data;
6129
6130 /* set first protocol and move network header forward */
6131 protocol = hdr.eth->h_proto;
6132 hdr.network += ETH_HLEN;
6133
6134 /* handle any vlan tag if present */
6135 if (protocol == __constant_htons(ETH_P_8021Q)) {
6136 if ((hdr.network - data) > (max_len - VLAN_HLEN))
6137 return max_len;
6138
6139 protocol = hdr.vlan->h_vlan_encapsulated_proto;
6140 hdr.network += VLAN_HLEN;
6141 }
6142
6143 /* handle L3 protocols */
6144 if (protocol == __constant_htons(ETH_P_IP)) {
6145 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
6146 return max_len;
6147
6148 /* access ihl as a u8 to avoid unaligned access on ia64 */
6149 hlen = (hdr.network[0] & 0x0F) << 2;
6150
6151 /* verify hlen meets minimum size requirements */
6152 if (hlen < sizeof(struct iphdr))
6153 return hdr.network - data;
6154
6155 /* record next protocol if header is present */
6156 if (!hdr.ipv4->frag_off)
6157 nexthdr = hdr.ipv4->protocol;
6158 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
6159 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
6160 return max_len;
6161
6162 /* record next protocol */
6163 nexthdr = hdr.ipv6->nexthdr;
6164 hlen = sizeof(struct ipv6hdr);
6165 } else {
6166 return hdr.network - data;
6167 }
6168
6169 /* relocate pointer to start of L4 header */
6170 hdr.network += hlen;
6171
6172 /* finally sort out TCP */
6173 if (nexthdr == IPPROTO_TCP) {
6174 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
6175 return max_len;
6176
6177 /* access doff as a u8 to avoid unaligned access on ia64 */
6178 hlen = (hdr.network[12] & 0xF0) >> 2;
6179
6180 /* verify hlen meets minimum size requirements */
6181 if (hlen < sizeof(struct tcphdr))
6182 return hdr.network - data;
6183
6184 hdr.network += hlen;
6185 } else if (nexthdr == IPPROTO_UDP) {
6186 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
6187 return max_len;
6188
6189 hdr.network += sizeof(struct udphdr);
6190 }
6191
6192 /*
6193 * If everything has gone correctly hdr.network should be the
6194 * data section of the packet and will be the end of the header.
6195 * If not then it probably represents the end of the last recognized
6196 * header.
5913 */ 6197 */
5914 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 6198 if ((hdr.network - data) < max_len)
5915 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 6199 return hdr.network - data;
5916 if (hlen > IGB_RX_HDR_LEN) 6200 else
5917 hlen = IGB_RX_HDR_LEN; 6201 return max_len;
5918 return hlen;
5919} 6202}
5920 6203
5921static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) 6204/**
6205 * igb_pull_tail - igb specific version of skb_pull_tail
6206 * @rx_ring: rx descriptor ring packet is being transacted on
6207 * @rx_desc: pointer to the EOP Rx descriptor
6208 * @skb: pointer to current skb being adjusted
6209 *
6210 * This function is an igb specific version of __pskb_pull_tail. The
6211 * main difference between this version and the original function is that
6212 * this function can make several assumptions about the state of things
6213 * that allow for significant optimizations versus the standard function.
6214 * As a result we can do things like drop a frag and maintain an accurate
6215 * truesize for the skb.
6216 */
6217static void igb_pull_tail(struct igb_ring *rx_ring,
6218 union e1000_adv_rx_desc *rx_desc,
6219 struct sk_buff *skb)
5922{ 6220{
5923 struct igb_ring *rx_ring = q_vector->rx.ring; 6221 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
5924 union e1000_adv_rx_desc *rx_desc; 6222 unsigned char *va;
5925 const int current_node = numa_node_id(); 6223 unsigned int pull_len;
5926 unsigned int total_bytes = 0, total_packets = 0;
5927 u16 cleaned_count = igb_desc_unused(rx_ring);
5928 u16 i = rx_ring->next_to_clean;
5929 6224
5930 rx_desc = IGB_RX_DESC(rx_ring, i); 6225 /*
6226 * it is valid to use page_address instead of kmap since we are
6227 * working with pages allocated out of the lomem pool per
6228 * alloc_page(GFP_ATOMIC)
6229 */
6230 va = skb_frag_address(frag);
5931 6231
5932 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) { 6232 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
5933 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 6233 /* retrieve timestamp from buffer */
5934 struct sk_buff *skb = buffer_info->skb; 6234 igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
5935 union e1000_adv_rx_desc *next_rxd;
5936 6235
5937 buffer_info->skb = NULL; 6236 /* update pointers to remove timestamp header */
5938 prefetch(skb->data); 6237 skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
6238 frag->page_offset += IGB_TS_HDR_LEN;
6239 skb->data_len -= IGB_TS_HDR_LEN;
6240 skb->len -= IGB_TS_HDR_LEN;
5939 6241
5940 i++; 6242 /* move va to start of packet data */
5941 if (i == rx_ring->count) 6243 va += IGB_TS_HDR_LEN;
5942 i = 0; 6244 }
5943 6245
5944 next_rxd = IGB_RX_DESC(rx_ring, i); 6246 /*
5945 prefetch(next_rxd); 6247 * we need the header to contain the greater of either ETH_HLEN or
6248 * 60 bytes if the skb->len is less than 60 for skb_pad.
6249 */
6250 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
5946 6251
5947 /* 6252 /* align pull length to size of long to optimize memcpy performance */
5948 * This memory barrier is needed to keep us from reading 6253 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
5949 * any other fields out of the rx_desc until we know the
5950 * RXD_STAT_DD bit is set
5951 */
5952 rmb();
5953 6254
5954 if (!skb_is_nonlinear(skb)) { 6255 /* update all of the pointers */
5955 __skb_put(skb, igb_get_hlen(rx_desc)); 6256 skb_frag_size_sub(frag, pull_len);
5956 dma_unmap_single(rx_ring->dev, buffer_info->dma, 6257 frag->page_offset += pull_len;
5957 IGB_RX_HDR_LEN, 6258 skb->data_len -= pull_len;
5958 DMA_FROM_DEVICE); 6259 skb->tail += pull_len;
5959 buffer_info->dma = 0; 6260}
6261
6262/**
6263 * igb_cleanup_headers - Correct corrupted or empty headers
6264 * @rx_ring: rx descriptor ring packet is being transacted on
6265 * @rx_desc: pointer to the EOP Rx descriptor
6266 * @skb: pointer to current skb being fixed
6267 *
6268 * Address the case where we are pulling data in on pages only
6269 * and as such no data is present in the skb header.
6270 *
6271 * In addition if skb is not at least 60 bytes we need to pad it so that
6272 * it is large enough to qualify as a valid Ethernet frame.
6273 *
6274 * Returns true if an error was encountered and skb was freed.
6275 **/
6276static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6277 union e1000_adv_rx_desc *rx_desc,
6278 struct sk_buff *skb)
6279{
6280
6281 if (unlikely((igb_test_staterr(rx_desc,
6282 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6283 struct net_device *netdev = rx_ring->netdev;
6284 if (!(netdev->features & NETIF_F_RXALL)) {
6285 dev_kfree_skb_any(skb);
6286 return true;
5960 } 6287 }
6288 }
5961 6289
5962 if (rx_desc->wb.upper.length) { 6290 /* place header in linear portion of buffer */
5963 u16 length = le16_to_cpu(rx_desc->wb.upper.length); 6291 if (skb_is_nonlinear(skb))
6292 igb_pull_tail(rx_ring, rx_desc, skb);
5964 6293
5965 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 6294 /* if skb_pad returns an error the skb was freed */
5966 buffer_info->page, 6295 if (unlikely(skb->len < 60)) {
5967 buffer_info->page_offset, 6296 int pad_len = 60 - skb->len;
5968 length);
5969 6297
5970 skb->len += length; 6298 if (skb_pad(skb, pad_len))
5971 skb->data_len += length; 6299 return true;
5972 skb->truesize += PAGE_SIZE / 2; 6300 __skb_put(skb, pad_len);
6301 }
5973 6302
5974 if ((page_count(buffer_info->page) != 1) || 6303 return false;
5975 (page_to_nid(buffer_info->page) != current_node)) 6304}
5976 buffer_info->page = NULL;
5977 else
5978 get_page(buffer_info->page);
5979 6305
5980 dma_unmap_page(rx_ring->dev, buffer_info->page_dma, 6306/**
5981 PAGE_SIZE / 2, DMA_FROM_DEVICE); 6307 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
5982 buffer_info->page_dma = 0; 6308 * @rx_ring: rx descriptor ring packet is being transacted on
5983 } 6309 * @rx_desc: pointer to the EOP Rx descriptor
6310 * @skb: pointer to current skb being populated
6311 *
6312 * This function checks the ring, descriptor, and packet information in
6313 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
6314 * other fields within the skb.
6315 **/
6316static void igb_process_skb_fields(struct igb_ring *rx_ring,
6317 union e1000_adv_rx_desc *rx_desc,
6318 struct sk_buff *skb)
6319{
6320 struct net_device *dev = rx_ring->netdev;
5984 6321
5985 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) { 6322 igb_rx_hash(rx_ring, rx_desc, skb);
5986 struct igb_rx_buffer *next_buffer;
5987 next_buffer = &rx_ring->rx_buffer_info[i];
5988 buffer_info->skb = next_buffer->skb;
5989 buffer_info->dma = next_buffer->dma;
5990 next_buffer->skb = skb;
5991 next_buffer->dma = 0;
5992 goto next_desc;
5993 }
5994 6323
5995 if (unlikely((igb_test_staterr(rx_desc, 6324 igb_rx_checksum(rx_ring, rx_desc, skb);
5996 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
5997 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
5998 dev_kfree_skb_any(skb);
5999 goto next_desc;
6000 }
6001 6325
6002#ifdef CONFIG_IGB_PTP 6326 igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
6003 igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
6004#endif /* CONFIG_IGB_PTP */
6005 igb_rx_hash(rx_ring, rx_desc, skb);
6006 igb_rx_checksum(rx_ring, rx_desc, skb);
6007 igb_rx_vlan(rx_ring, rx_desc, skb);
6008 6327
6009 total_bytes += skb->len; 6328 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
6010 total_packets++; 6329 igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
6330 u16 vid;
6331 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
6332 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
6333 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
6334 else
6335 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
6011 6336
6012 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 6337 __vlan_hwaccel_put_tag(skb, vid);
6338 }
6013 6339
6014 napi_gro_receive(&q_vector->napi, skb); 6340 skb_record_rx_queue(skb, rx_ring->queue_index);
6015 6341
6016 budget--; 6342 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6017next_desc: 6343}
6018 if (!budget) 6344
6019 break; 6345static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6346{
6347 struct igb_ring *rx_ring = q_vector->rx.ring;
6348 struct sk_buff *skb = rx_ring->skb;
6349 unsigned int total_bytes = 0, total_packets = 0;
6350 u16 cleaned_count = igb_desc_unused(rx_ring);
6351
6352 do {
6353 union e1000_adv_rx_desc *rx_desc;
6020 6354
6021 cleaned_count++;
6022 /* return some buffers to hardware, one at a time is too slow */ 6355 /* return some buffers to hardware, one at a time is too slow */
6023 if (cleaned_count >= IGB_RX_BUFFER_WRITE) { 6356 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
6024 igb_alloc_rx_buffers(rx_ring, cleaned_count); 6357 igb_alloc_rx_buffers(rx_ring, cleaned_count);
6025 cleaned_count = 0; 6358 cleaned_count = 0;
6026 } 6359 }
6027 6360
6028 /* use prefetched values */ 6361 rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
6029 rx_desc = next_rxd;
6030 }
6031 6362
6032 rx_ring->next_to_clean = i; 6363 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
6033 u64_stats_update_begin(&rx_ring->rx_syncp); 6364 break;
6034 rx_ring->rx_stats.packets += total_packets;
6035 rx_ring->rx_stats.bytes += total_bytes;
6036 u64_stats_update_end(&rx_ring->rx_syncp);
6037 q_vector->rx.total_packets += total_packets;
6038 q_vector->rx.total_bytes += total_bytes;
6039 6365
6040 if (cleaned_count) 6366 /* retrieve a buffer from the ring */
6041 igb_alloc_rx_buffers(rx_ring, cleaned_count); 6367 skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
6042 6368
6043 return !!budget; 6369 /* exit if we failed to retrieve a buffer */
6044} 6370 if (!skb)
6371 break;
6045 6372
6046static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring, 6373 cleaned_count++;
6047 struct igb_rx_buffer *bi)
6048{
6049 struct sk_buff *skb = bi->skb;
6050 dma_addr_t dma = bi->dma;
6051 6374
6052 if (dma) 6375 /* fetch next buffer in frame if non-eop */
6053 return true; 6376 if (igb_is_non_eop(rx_ring, rx_desc))
6377 continue;
6054 6378
6055 if (likely(!skb)) { 6379 /* verify the packet layout is correct */
6056 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 6380 if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
6057 IGB_RX_HDR_LEN); 6381 skb = NULL;
6058 bi->skb = skb; 6382 continue;
6059 if (!skb) {
6060 rx_ring->rx_stats.alloc_failed++;
6061 return false;
6062 } 6383 }
6063 6384
6064 /* initialize skb for ring */ 6385 /* probably a little skewed due to removing CRC */
6065 skb_record_rx_queue(skb, rx_ring->queue_index); 6386 total_bytes += skb->len;
6066 }
6067 6387
6068 dma = dma_map_single(rx_ring->dev, skb->data, 6388 /* populate checksum, timestamp, VLAN, and protocol */
6069 IGB_RX_HDR_LEN, DMA_FROM_DEVICE); 6389 igb_process_skb_fields(rx_ring, rx_desc, skb);
6070 6390
6071 if (dma_mapping_error(rx_ring->dev, dma)) { 6391 napi_gro_receive(&q_vector->napi, skb);
6072 rx_ring->rx_stats.alloc_failed++;
6073 return false;
6074 }
6075 6392
6076 bi->dma = dma; 6393 /* reset skb pointer */
6077 return true; 6394 skb = NULL;
6395
6396 /* update budget accounting */
6397 total_packets++;
6398 } while (likely(total_packets < budget));
6399
6400 /* place incomplete frames back on ring for completion */
6401 rx_ring->skb = skb;
6402
6403 u64_stats_update_begin(&rx_ring->rx_syncp);
6404 rx_ring->rx_stats.packets += total_packets;
6405 rx_ring->rx_stats.bytes += total_bytes;
6406 u64_stats_update_end(&rx_ring->rx_syncp);
6407 q_vector->rx.total_packets += total_packets;
6408 q_vector->rx.total_bytes += total_bytes;
6409
6410 if (cleaned_count)
6411 igb_alloc_rx_buffers(rx_ring, cleaned_count);
6412
6413 return (total_packets < budget);
6078} 6414}
6079 6415
6080static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, 6416static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6081 struct igb_rx_buffer *bi) 6417 struct igb_rx_buffer *bi)
6082{ 6418{
6083 struct page *page = bi->page; 6419 struct page *page = bi->page;
6084 dma_addr_t page_dma = bi->page_dma; 6420 dma_addr_t dma;
6085 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6086 6421
6087 if (page_dma) 6422 /* since we are recycling buffers we should seldom need to alloc */
6423 if (likely(page))
6088 return true; 6424 return true;
6089 6425
6090 if (!page) { 6426 /* alloc new page for storage */
6091 page = __skb_alloc_page(GFP_ATOMIC, bi->skb); 6427 page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
6092 bi->page = page; 6428 if (unlikely(!page)) {
6093 if (unlikely(!page)) { 6429 rx_ring->rx_stats.alloc_failed++;
6094 rx_ring->rx_stats.alloc_failed++; 6430 return false;
6095 return false;
6096 }
6097 } 6431 }
6098 6432
6099 page_dma = dma_map_page(rx_ring->dev, page, 6433 /* map page for use */
6100 page_offset, PAGE_SIZE / 2, 6434 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
6101 DMA_FROM_DEVICE); 6435
6436 /*
6437 * if mapping failed free memory back to system since
6438 * there isn't much point in holding memory we can't use
6439 */
6440 if (dma_mapping_error(rx_ring->dev, dma)) {
6441 __free_page(page);
6102 6442
6103 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6104 rx_ring->rx_stats.alloc_failed++; 6443 rx_ring->rx_stats.alloc_failed++;
6105 return false; 6444 return false;
6106 } 6445 }
6107 6446
6108 bi->page_dma = page_dma; 6447 bi->dma = dma;
6109 bi->page_offset = page_offset; 6448 bi->page = page;
6449 bi->page_offset = 0;
6450
6110 return true; 6451 return true;
6111} 6452}
6112 6453
@@ -6120,22 +6461,23 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6120 struct igb_rx_buffer *bi; 6461 struct igb_rx_buffer *bi;
6121 u16 i = rx_ring->next_to_use; 6462 u16 i = rx_ring->next_to_use;
6122 6463
6464 /* nothing to do */
6465 if (!cleaned_count)
6466 return;
6467
6123 rx_desc = IGB_RX_DESC(rx_ring, i); 6468 rx_desc = IGB_RX_DESC(rx_ring, i);
6124 bi = &rx_ring->rx_buffer_info[i]; 6469 bi = &rx_ring->rx_buffer_info[i];
6125 i -= rx_ring->count; 6470 i -= rx_ring->count;
6126 6471
6127 while (cleaned_count--) { 6472 do {
6128 if (!igb_alloc_mapped_skb(rx_ring, bi))
6129 break;
6130
6131 /* Refresh the desc even if buffer_addrs didn't change
6132 * because each write-back erases this info. */
6133 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
6134
6135 if (!igb_alloc_mapped_page(rx_ring, bi)) 6473 if (!igb_alloc_mapped_page(rx_ring, bi))
6136 break; 6474 break;
6137 6475
6138 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 6476 /*
6477 * Refresh the desc even if buffer_addrs didn't change
6478 * because each write-back erases this info.
6479 */
6480 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
6139 6481
6140 rx_desc++; 6482 rx_desc++;
6141 bi++; 6483 bi++;
@@ -6148,17 +6490,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6148 6490
6149 /* clear the hdr_addr for the next_to_use descriptor */ 6491 /* clear the hdr_addr for the next_to_use descriptor */
6150 rx_desc->read.hdr_addr = 0; 6492 rx_desc->read.hdr_addr = 0;
6151 } 6493
6494 cleaned_count--;
6495 } while (cleaned_count);
6152 6496
6153 i += rx_ring->count; 6497 i += rx_ring->count;
6154 6498
6155 if (rx_ring->next_to_use != i) { 6499 if (rx_ring->next_to_use != i) {
6500 /* record the next descriptor to use */
6156 rx_ring->next_to_use = i; 6501 rx_ring->next_to_use = i;
6157 6502
6158 /* Force memory writes to complete before letting h/w 6503 /* update next to alloc since we have filled the ring */
6504 rx_ring->next_to_alloc = i;
6505
6506 /*
6507 * Force memory writes to complete before letting h/w
6159 * know there are new descriptors to fetch. (Only 6508 * know there are new descriptors to fetch. (Only
6160 * applicable for weak-ordered memory model archs, 6509 * applicable for weak-ordered memory model archs,
6161 * such as IA-64). */ 6510 * such as IA-64).
6511 */
6162 wmb(); 6512 wmb();
6163 writel(i, rx_ring->tail); 6513 writel(i, rx_ring->tail);
6164 } 6514 }
@@ -6207,10 +6557,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6207 case SIOCGMIIREG: 6557 case SIOCGMIIREG:
6208 case SIOCSMIIREG: 6558 case SIOCSMIIREG:
6209 return igb_mii_ioctl(netdev, ifr, cmd); 6559 return igb_mii_ioctl(netdev, ifr, cmd);
6210#ifdef CONFIG_IGB_PTP
6211 case SIOCSHWTSTAMP: 6560 case SIOCSHWTSTAMP:
6212 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); 6561 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
6213#endif /* CONFIG_IGB_PTP */
6214 default: 6562 default:
6215 return -EOPNOTSUPP; 6563 return -EOPNOTSUPP;
6216 } 6564 }
@@ -6478,7 +6826,7 @@ static int igb_resume(struct device *dev)
6478 pci_enable_wake(pdev, PCI_D3hot, 0); 6826 pci_enable_wake(pdev, PCI_D3hot, 0);
6479 pci_enable_wake(pdev, PCI_D3cold, 0); 6827 pci_enable_wake(pdev, PCI_D3cold, 0);
6480 6828
6481 if (igb_init_interrupt_scheme(adapter)) { 6829 if (igb_init_interrupt_scheme(adapter, true)) {
6482 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 6830 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6483 return -ENOMEM; 6831 return -ENOMEM;
6484 } 6832 }
@@ -6492,7 +6840,9 @@ static int igb_resume(struct device *dev)
6492 wr32(E1000_WUS, ~0); 6840 wr32(E1000_WUS, ~0);
6493 6841
6494 if (netdev->flags & IFF_UP) { 6842 if (netdev->flags & IFF_UP) {
6843 rtnl_lock();
6495 err = __igb_open(netdev, true); 6844 err = __igb_open(netdev, true);
6845 rtnl_unlock();
6496 if (err) 6846 if (err)
6497 return err; 6847 return err;
6498 } 6848 }
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ee21445157a3..ab3429729bde 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
441 adapter->ptp_tx_skb = NULL; 441 adapter->ptp_tx_skb = NULL;
442} 442}
443 443
444void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, 444/**
445 union e1000_adv_rx_desc *rx_desc, 445 * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
446 * @q_vector: Pointer to interrupt specific structure
447 * @va: Pointer to address containing Rx buffer
448 * @skb: Buffer containing timestamp and packet
449 *
450 * This function is meant to retrieve a timestamp from the first buffer of an
451 * incoming frame. The value is stored in little endian format starting on
452 * byte 8.
453 */
454void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
455 unsigned char *va,
456 struct sk_buff *skb)
457{
458 __le64 *regval = (__le64 *)va;
459
460 /*
461 * The timestamp is recorded in little endian format.
462 * DWORD: 0 1 2 3
463 * Field: Reserved Reserved SYSTIML SYSTIMH
464 */
465 igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
466 le64_to_cpu(regval[1]));
467}
468
469/**
470 * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
471 * @q_vector: Pointer to interrupt specific structure
472 * @skb: Buffer containing timestamp and packet
473 *
474 * This function is meant to retrieve a timestamp from the internal registers
475 * of the adapter and store it in the skb.
476 */
477void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
446 struct sk_buff *skb) 478 struct sk_buff *skb)
447{ 479{
448 struct igb_adapter *adapter = q_vector->adapter; 480 struct igb_adapter *adapter = q_vector->adapter;
449 struct e1000_hw *hw = &adapter->hw; 481 struct e1000_hw *hw = &adapter->hw;
450 u64 regval; 482 u64 regval;
451 483
452 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
453 E1000_RXDADV_STAT_TS))
454 return;
455
456 /* 484 /*
457 * If this bit is set, then the RX registers contain the time stamp. No 485 * If this bit is set, then the RX registers contain the time stamp. No
458 * other packet will be time stamped until we read these registers, so 486 * other packet will be time stamped until we read these registers, so
@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
464 * If nothing went wrong, then it should have a shared tx_flags that we 492 * If nothing went wrong, then it should have a shared tx_flags that we
465 * can turn into a skb_shared_hwtstamps. 493 * can turn into a skb_shared_hwtstamps.
466 */ 494 */
467 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { 495 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
468 u32 *stamp = (u32 *)skb->data; 496 return;
469 regval = le32_to_cpu(*(stamp + 2));
470 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
471 skb_pull(skb, IGB_TS_HDR_LEN);
472 } else {
473 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
474 return;
475 497
476 regval = rd32(E1000_RXSTMPL); 498 regval = rd32(E1000_RXSTMPL);
477 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 499 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
478 }
479 500
480 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 501 igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
481} 502}
@@ -532,18 +553,6 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
532 case HWTSTAMP_FILTER_NONE: 553 case HWTSTAMP_FILTER_NONE:
533 tsync_rx_ctl = 0; 554 tsync_rx_ctl = 0;
534 break; 555 break;
535 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
536 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
537 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
538 case HWTSTAMP_FILTER_ALL:
539 /*
540 * register TSYNCRXCFG must be set, therefore it is not
541 * possible to time stamp both Sync and Delay_Req messages
542 * => fall back to time stamping all packets
543 */
544 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
545 config.rx_filter = HWTSTAMP_FILTER_ALL;
546 break;
547 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 556 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
548 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; 557 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
549 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; 558 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
@@ -554,31 +563,33 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
554 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; 563 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
555 is_l4 = true; 564 is_l4 = true;
556 break; 565 break;
566 case HWTSTAMP_FILTER_PTP_V2_EVENT:
567 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
568 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
569 case HWTSTAMP_FILTER_PTP_V2_SYNC:
557 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 570 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
558 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 571 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
559 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 572 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
560 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
561 is_l2 = true;
562 is_l4 = true;
563 config.rx_filter = HWTSTAMP_FILTER_SOME;
564 break;
565 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 573 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
566 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 574 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
567 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
568 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
569 is_l2 = true;
570 is_l4 = true;
571 config.rx_filter = HWTSTAMP_FILTER_SOME;
572 break;
573 case HWTSTAMP_FILTER_PTP_V2_EVENT:
574 case HWTSTAMP_FILTER_PTP_V2_SYNC:
575 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
576 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; 575 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
577 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 576 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
578 is_l2 = true; 577 is_l2 = true;
579 is_l4 = true; 578 is_l4 = true;
580 break; 579 break;
580 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
581 case HWTSTAMP_FILTER_ALL:
582 /* 82576 cannot timestamp all packets, which it needs to do to
583 * support both V1 Sync and Delay_Req messages
584 */
585 if (hw->mac.type != e1000_82576) {
586 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
587 config.rx_filter = HWTSTAMP_FILTER_ALL;
588 break;
589 }
590 /* fall through */
581 default: 591 default:
592 config.rx_filter = HWTSTAMP_FILTER_NONE;
582 return -ERANGE; 593 return -ERANGE;
583 } 594 }
584 595
@@ -596,6 +607,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
596 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { 607 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
597 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 608 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
598 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; 609 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
610 config.rx_filter = HWTSTAMP_FILTER_ALL;
611 is_l2 = true;
612 is_l4 = true;
599 613
600 if ((hw->mac.type == e1000_i210) || 614 if ((hw->mac.type == e1000_i210) ||
601 (hw->mac.type == e1000_i211)) { 615 (hw->mac.type == e1000_i211)) {
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 3e18045d8f89..d9fa999b1685 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -46,6 +46,7 @@
46#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ 46#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
47#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ 47#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
48 48
49#define E1000_RXDEXT_STATERR_LB 0x00040000
49#define E1000_RXDEXT_STATERR_CE 0x01000000 50#define E1000_RXDEXT_STATERR_CE 0x01000000
50#define E1000_RXDEXT_STATERR_SE 0x02000000 51#define E1000_RXDEXT_STATERR_SE 0x02000000
51#define E1000_RXDEXT_STATERR_SEQ 0x04000000 52#define E1000_RXDEXT_STATERR_SEQ 0x04000000
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a895e2f7b34d..fdca7b672776 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -295,7 +295,7 @@ struct igbvf_info {
295 295
296/* hardware capability, feature, and workaround flags */ 296/* hardware capability, feature, and workaround flags */
297#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) 297#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
298 298#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
299#define IGBVF_RX_DESC_ADV(R, i) \ 299#define IGBVF_RX_DESC_ADV(R, i) \
300 (&((((R).desc))[i].rx_desc)) 300 (&((((R).desc))[i].rx_desc))
301#define IGBVF_TX_DESC_ADV(R, i) \ 301#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0ac11f527a84..277f5dfe3d90 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -47,7 +47,7 @@
47 47
48#include "igbvf.h" 48#include "igbvf.h"
49 49
50#define DRV_VERSION "2.0.1-k" 50#define DRV_VERSION "2.0.2-k"
51char igbvf_driver_name[] = "igbvf"; 51char igbvf_driver_name[] = "igbvf";
52const char igbvf_driver_version[] = DRV_VERSION; 52const char igbvf_driver_version[] = DRV_VERSION;
53static const char igbvf_driver_string[] = 53static const char igbvf_driver_string[] =
@@ -107,12 +107,19 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
107 struct sk_buff *skb, 107 struct sk_buff *skb,
108 u32 status, u16 vlan) 108 u32 status, u16 vlan)
109{ 109{
110 u16 vid;
111
110 if (status & E1000_RXD_STAT_VP) { 112 if (status & E1000_RXD_STAT_VP) {
111 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 113 if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
114 (status & E1000_RXDEXT_STATERR_LB))
115 vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
116 else
117 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
112 if (test_bit(vid, adapter->active_vlans)) 118 if (test_bit(vid, adapter->active_vlans))
113 __vlan_hwaccel_put_tag(skb, vid); 119 __vlan_hwaccel_put_tag(skb, vid);
114 } 120 }
115 netif_receive_skb(skb); 121
122 napi_gro_receive(&adapter->rx_ring->napi, skb);
116} 123}
117 124
118static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 125static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
@@ -184,6 +191,13 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
184 buffer_info->page_offset, 191 buffer_info->page_offset,
185 PAGE_SIZE / 2, 192 PAGE_SIZE / 2,
186 DMA_FROM_DEVICE); 193 DMA_FROM_DEVICE);
194 if (dma_mapping_error(&pdev->dev,
195 buffer_info->page_dma)) {
196 __free_page(buffer_info->page);
197 buffer_info->page = NULL;
198 dev_err(&pdev->dev, "RX DMA map failed\n");
199 break;
200 }
187 } 201 }
188 202
189 if (!buffer_info->skb) { 203 if (!buffer_info->skb) {
@@ -197,6 +211,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
197 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 211 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
198 bufsz, 212 bufsz,
199 DMA_FROM_DEVICE); 213 DMA_FROM_DEVICE);
214 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
215 dev_kfree_skb(buffer_info->skb);
216 buffer_info->skb = NULL;
217 dev_err(&pdev->dev, "RX DMA map failed\n");
218 goto no_buffers;
219 }
200 } 220 }
201 /* Refresh the desc even if buffer_addrs didn't change because 221 /* Refresh the desc even if buffer_addrs didn't change because
202 * each write-back erases this info. */ 222 * each write-back erases this info. */
@@ -1078,7 +1098,7 @@ out:
1078 * igbvf_alloc_queues - Allocate memory for all rings 1098 * igbvf_alloc_queues - Allocate memory for all rings
1079 * @adapter: board private structure to initialize 1099 * @adapter: board private structure to initialize
1080 **/ 1100 **/
1081static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) 1101static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
1082{ 1102{
1083 struct net_device *netdev = adapter->netdev; 1103 struct net_device *netdev = adapter->netdev;
1084 1104
@@ -1530,7 +1550,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1530 * Fields are initialized based on PCI device information and 1550 * Fields are initialized based on PCI device information and
1531 * OS network device settings (MTU size). 1551 * OS network device settings (MTU size).
1532 **/ 1552 **/
1533static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) 1553static int igbvf_sw_init(struct igbvf_adapter *adapter)
1534{ 1554{
1535 struct net_device *netdev = adapter->netdev; 1555 struct net_device *netdev = adapter->netdev;
1536 s32 rc; 1556 s32 rc;
@@ -2598,8 +2618,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
2598 * The OS initialization, configuring of the adapter private structure, 2618 * The OS initialization, configuring of the adapter private structure,
2599 * and a hardware reset occur. 2619 * and a hardware reset occur.
2600 **/ 2620 **/
2601static int __devinit igbvf_probe(struct pci_dev *pdev, 2621static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2602 const struct pci_device_id *ent)
2603{ 2622{
2604 struct net_device *netdev; 2623 struct net_device *netdev;
2605 struct igbvf_adapter *adapter; 2624 struct igbvf_adapter *adapter;
@@ -2754,6 +2773,10 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2754 /* reset the hardware with the new settings */ 2773 /* reset the hardware with the new settings */
2755 igbvf_reset(adapter); 2774 igbvf_reset(adapter);
2756 2775
2776 /* set hardware-specific flags */
2777 if (adapter->hw.mac.type == e1000_vfadapt_i350)
2778 adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
2779
2757 strcpy(netdev->name, "eth%d"); 2780 strcpy(netdev->name, "eth%d");
2758 err = register_netdev(netdev); 2781 err = register_netdev(netdev);
2759 if (err) 2782 if (err)
@@ -2794,7 +2817,7 @@ err_dma:
2794 * Hot-Plug event, or because the driver is going to be removed from 2817 * Hot-Plug event, or because the driver is going to be removed from
2795 * memory. 2818 * memory.
2796 **/ 2819 **/
2797static void __devexit igbvf_remove(struct pci_dev *pdev) 2820static void igbvf_remove(struct pci_dev *pdev)
2798{ 2821{
2799 struct net_device *netdev = pci_get_drvdata(pdev); 2822 struct net_device *netdev = pci_get_drvdata(pdev);
2800 struct igbvf_adapter *adapter = netdev_priv(netdev); 2823 struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2851,7 +2874,7 @@ static struct pci_driver igbvf_driver = {
2851 .name = igbvf_driver_name, 2874 .name = igbvf_driver_name,
2852 .id_table = igbvf_pci_tbl, 2875 .id_table = igbvf_pci_tbl,
2853 .probe = igbvf_probe, 2876 .probe = igbvf_probe,
2854 .remove = __devexit_p(igbvf_remove), 2877 .remove = igbvf_remove,
2855#ifdef CONFIG_PM 2878#ifdef CONFIG_PM
2856 /* Power Management Hooks */ 2879 /* Power Management Hooks */
2857 .suspend = igbvf_suspend, 2880 .suspend = igbvf_suspend,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index d99a2d51b948..ae96c10251be 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -73,7 +73,7 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
73static int ixgb_init_module(void); 73static int ixgb_init_module(void);
74static void ixgb_exit_module(void); 74static void ixgb_exit_module(void);
75static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 75static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
76static void __devexit ixgb_remove(struct pci_dev *pdev); 76static void ixgb_remove(struct pci_dev *pdev);
77static int ixgb_sw_init(struct ixgb_adapter *adapter); 77static int ixgb_sw_init(struct ixgb_adapter *adapter);
78static int ixgb_open(struct net_device *netdev); 78static int ixgb_open(struct net_device *netdev);
79static int ixgb_close(struct net_device *netdev); 79static int ixgb_close(struct net_device *netdev);
@@ -125,7 +125,7 @@ static struct pci_driver ixgb_driver = {
125 .name = ixgb_driver_name, 125 .name = ixgb_driver_name,
126 .id_table = ixgb_pci_tbl, 126 .id_table = ixgb_pci_tbl,
127 .probe = ixgb_probe, 127 .probe = ixgb_probe,
128 .remove = __devexit_p(ixgb_remove), 128 .remove = ixgb_remove,
129 .err_handler = &ixgb_err_handler 129 .err_handler = &ixgb_err_handler
130}; 130};
131 131
@@ -391,7 +391,7 @@ static const struct net_device_ops ixgb_netdev_ops = {
391 * and a hardware reset occur. 391 * and a hardware reset occur.
392 **/ 392 **/
393 393
394static int __devinit 394static int
395ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 395ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
396{ 396{
397 struct net_device *netdev = NULL; 397 struct net_device *netdev = NULL;
@@ -558,7 +558,7 @@ err_dma_mask:
558 * memory. 558 * memory.
559 **/ 559 **/
560 560
561static void __devexit 561static void
562ixgb_remove(struct pci_dev *pdev) 562ixgb_remove(struct pci_dev *pdev)
563{ 563{
564 struct net_device *netdev = pci_get_drvdata(pdev); 564 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -584,7 +584,7 @@ ixgb_remove(struct pci_dev *pdev)
584 * OS network device settings (MTU size). 584 * OS network device settings (MTU size).
585 **/ 585 **/
586 586
587static int __devinit 587static int
588ixgb_sw_init(struct ixgb_adapter *adapter) 588ixgb_sw_init(struct ixgb_adapter *adapter)
589{ 589{
590 struct ixgb_hw *hw = &adapter->hw; 590 struct ixgb_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index 07d83ab46e21..04a60640ddda 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -47,7 +47,7 @@
47 47
48#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET } 48#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
49#define IXGB_PARAM(X, desc) \ 49#define IXGB_PARAM(X, desc) \
50 static int __devinitdata X[IXGB_MAX_NIC+1] \ 50 static int X[IXGB_MAX_NIC+1] \
51 = IXGB_PARAM_INIT; \ 51 = IXGB_PARAM_INIT; \
52 static unsigned int num_##X = 0; \ 52 static unsigned int num_##X = 0; \
53 module_param_array_named(X, X, int, &num_##X, 0); \ 53 module_param_array_named(X, X, int, &num_##X, 0); \
@@ -199,7 +199,7 @@ struct ixgb_option {
199 } arg; 199 } arg;
200}; 200};
201 201
202static int __devinit 202static int
203ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt) 203ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
204{ 204{
205 if (*value == OPTION_UNSET) { 205 if (*value == OPTION_UNSET) {
@@ -257,7 +257,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
257 * in a variable in the adapter structure. 257 * in a variable in the adapter structure.
258 **/ 258 **/
259 259
260void __devinit 260void
261ixgb_check_options(struct ixgb_adapter *adapter) 261ixgb_check_options(struct ixgb_adapter *adapter)
262{ 262{
263 int bd = adapter->bd_number; 263 int bd = adapter->bd_number;
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 89f40e51fc13..f3a632bf8d96 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,10 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
41 41
42ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
43ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o 42ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
44ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o 43ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 30efc9f0f47a..8e786764c60e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -36,11 +36,9 @@
36#include <linux/aer.h> 36#include <linux/aer.h>
37#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
38 38
39#ifdef CONFIG_IXGBE_PTP
40#include <linux/clocksource.h> 39#include <linux/clocksource.h>
41#include <linux/net_tstamp.h> 40#include <linux/net_tstamp.h>
42#include <linux/ptp_clock_kernel.h> 41#include <linux/ptp_clock_kernel.h>
43#endif /* CONFIG_IXGBE_PTP */
44 42
45#include "ixgbe_type.h" 43#include "ixgbe_type.h"
46#include "ixgbe_common.h" 44#include "ixgbe_common.h"
@@ -135,6 +133,7 @@ struct vf_data_storage {
135 u16 tx_rate; 133 u16 tx_rate;
136 u16 vlan_count; 134 u16 vlan_count;
137 u8 spoofchk_enabled; 135 u8 spoofchk_enabled;
136 unsigned int vf_api;
138}; 137};
139 138
140struct vf_macvlans { 139struct vf_macvlans {
@@ -482,8 +481,9 @@ struct ixgbe_adapter {
482#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) 481#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
483#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) 482#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
484#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) 483#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
485#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10) 484#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
486#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) 485#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
486#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12)
487 487
488 /* Tx fast path data */ 488 /* Tx fast path data */
489 int num_tx_queues; 489 int num_tx_queues;
@@ -571,7 +571,6 @@ struct ixgbe_adapter {
571 u32 interrupt_event; 571 u32 interrupt_event;
572 u32 led_reg; 572 u32 led_reg;
573 573
574#ifdef CONFIG_IXGBE_PTP
575 struct ptp_clock *ptp_clock; 574 struct ptp_clock *ptp_clock;
576 struct ptp_clock_info ptp_caps; 575 struct ptp_clock_info ptp_caps;
577 unsigned long last_overflow_check; 576 unsigned long last_overflow_check;
@@ -580,8 +579,6 @@ struct ixgbe_adapter {
580 struct timecounter tc; 579 struct timecounter tc;
581 int rx_hwtstamp_filter; 580 int rx_hwtstamp_filter;
582 u32 base_incval; 581 u32 base_incval;
583 u32 cycle_speed;
584#endif /* CONFIG_IXGBE_PTP */
585 582
586 /* SR-IOV */ 583 /* SR-IOV */
587 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 584 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -600,6 +597,8 @@ struct ixgbe_adapter {
600#ifdef CONFIG_DEBUG_FS 597#ifdef CONFIG_DEBUG_FS
601 struct dentry *ixgbe_dbg_adapter; 598 struct dentry *ixgbe_dbg_adapter;
602#endif /*CONFIG_DEBUG_FS*/ 599#endif /*CONFIG_DEBUG_FS*/
600
601 u8 default_up;
603}; 602};
604 603
605struct ixgbe_fdir_filter { 604struct ixgbe_fdir_filter {
@@ -691,6 +690,7 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
691 u16 soft_id); 690 u16 soft_id);
692extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 691extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
693 union ixgbe_atr_input *mask); 692 union ixgbe_atr_input *mask);
693extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
694extern void ixgbe_set_rx_mode(struct net_device *netdev); 694extern void ixgbe_set_rx_mode(struct net_device *netdev);
695#ifdef CONFIG_IXGBE_DCB 695#ifdef CONFIG_IXGBE_DCB
696extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); 696extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -739,7 +739,6 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
739 return netdev_get_tx_queue(ring->netdev, ring->queue_index); 739 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
740} 740}
741 741
742#ifdef CONFIG_IXGBE_PTP
743extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter); 742extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
744extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); 743extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
745extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); 744extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
@@ -751,7 +750,7 @@ extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
751extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 750extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
752 struct ifreq *ifr, int cmd); 751 struct ifreq *ifr, int cmd);
753extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 752extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
753extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
754extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); 754extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
755#endif /* CONFIG_IXGBE_PTP */
756 755
757#endif /* _IXGBE_H_ */ 756#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1077cb2b38db..1073aea5da40 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -62,7 +62,6 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
62 bool autoneg, 62 bool autoneg,
63 bool autoneg_wait_to_complete); 63 bool autoneg_wait_to_complete);
64static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 64static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
65static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
66 65
67static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 66static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
68{ 67{
@@ -99,9 +98,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
99static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 98static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
100{ 99{
101 s32 ret_val = 0; 100 s32 ret_val = 0;
102 u32 reg_anlp1 = 0;
103 u32 i = 0;
104 u16 list_offset, data_offset, data_value; 101 u16 list_offset, data_offset, data_value;
102 bool got_lock = false;
105 103
106 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 104 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
107 ixgbe_init_mac_link_ops_82599(hw); 105 ixgbe_init_mac_link_ops_82599(hw);
@@ -137,28 +135,36 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
137 usleep_range(hw->eeprom.semaphore_delay * 1000, 135 usleep_range(hw->eeprom.semaphore_delay * 1000,
138 hw->eeprom.semaphore_delay * 2000); 136 hw->eeprom.semaphore_delay * 2000);
139 137
140 /* Now restart DSP by setting Restart_AN and clearing LMS */ 138 /* Need SW/FW semaphore around AUTOC writes if LESM on,
141 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, 139 * likewise reset_pipeline requires lock as it also writes
142 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | 140 * AUTOC.
143 IXGBE_AUTOC_AN_RESTART)); 141 */
144 142 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
145 /* Wait for AN to leave state 0 */ 143 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
146 for (i = 0; i < 10; i++) { 144 IXGBE_GSSR_MAC_CSR_SM);
147 usleep_range(4000, 8000); 145 if (ret_val)
148 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); 146 goto setup_sfp_out;
149 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) 147
150 break; 148 got_lock = true;
149 }
150
151 /* Restart DSP and set SFI mode */
152 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
153 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
154
155 ret_val = ixgbe_reset_pipeline_82599(hw);
156
157 if (got_lock) {
158 hw->mac.ops.release_swfw_sync(hw,
159 IXGBE_GSSR_MAC_CSR_SM);
160 got_lock = false;
151 } 161 }
152 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { 162
153 hw_dbg(hw, "sfp module setup not complete\n"); 163 if (ret_val) {
164 hw_dbg(hw, " sfp module setup not complete\n");
154 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 165 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
155 goto setup_sfp_out; 166 goto setup_sfp_out;
156 } 167 }
157
158 /* Restart DSP by setting Restart_AN and return to SFI mode */
159 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
160 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
161 IXGBE_AUTOC_AN_RESTART));
162 } 168 }
163 169
164setup_sfp_out: 170setup_sfp_out:
@@ -394,14 +400,26 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
394 u32 links_reg; 400 u32 links_reg;
395 u32 i; 401 u32 i;
396 s32 status = 0; 402 s32 status = 0;
403 bool got_lock = false;
404
405 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
406 status = hw->mac.ops.acquire_swfw_sync(hw,
407 IXGBE_GSSR_MAC_CSR_SM);
408 if (status)
409 goto out;
410
411 got_lock = true;
412 }
397 413
398 /* Restart link */ 414 /* Restart link */
399 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 415 ixgbe_reset_pipeline_82599(hw);
400 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 416
401 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 417 if (got_lock)
418 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
402 419
403 /* Only poll for autoneg to complete if specified to do so */ 420 /* Only poll for autoneg to complete if specified to do so */
404 if (autoneg_wait_to_complete) { 421 if (autoneg_wait_to_complete) {
422 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
405 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 423 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
406 IXGBE_AUTOC_LMS_KX4_KX_KR || 424 IXGBE_AUTOC_LMS_KX4_KX_KR ||
407 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 425 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -425,6 +443,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
425 /* Add delay to filter out noises during initial link setup */ 443 /* Add delay to filter out noises during initial link setup */
426 msleep(50); 444 msleep(50);
427 445
446out:
428 return status; 447 return status;
429} 448}
430 449
@@ -779,6 +798,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
779 u32 links_reg; 798 u32 links_reg;
780 u32 i; 799 u32 i;
781 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 800 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
801 bool got_lock = false;
782 802
783 /* Check to see if speed passed in is supported. */ 803 /* Check to see if speed passed in is supported. */
784 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, 804 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -836,9 +856,26 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
836 } 856 }
837 857
838 if (autoc != start_autoc) { 858 if (autoc != start_autoc) {
859 /* Need SW/FW semaphore around AUTOC writes if LESM is on,
860 * likewise reset_pipeline requires us to hold this lock as
861 * it also writes to AUTOC.
862 */
863 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
864 status = hw->mac.ops.acquire_swfw_sync(hw,
865 IXGBE_GSSR_MAC_CSR_SM);
866 if (status != 0)
867 goto out;
868
869 got_lock = true;
870 }
871
839 /* Restart link */ 872 /* Restart link */
840 autoc |= IXGBE_AUTOC_AN_RESTART;
841 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 873 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
874 ixgbe_reset_pipeline_82599(hw);
875
876 if (got_lock)
877 hw->mac.ops.release_swfw_sync(hw,
878 IXGBE_GSSR_MAC_CSR_SM);
842 879
843 /* Only poll for autoneg to complete if specified to do so */ 880 /* Only poll for autoneg to complete if specified to do so */
844 if (autoneg_wait_to_complete) { 881 if (autoneg_wait_to_complete) {
@@ -994,9 +1031,28 @@ mac_reset_top:
994 hw->mac.orig_autoc2 = autoc2; 1031 hw->mac.orig_autoc2 = autoc2;
995 hw->mac.orig_link_settings_stored = true; 1032 hw->mac.orig_link_settings_stored = true;
996 } else { 1033 } else {
997 if (autoc != hw->mac.orig_autoc) 1034 if (autoc != hw->mac.orig_autoc) {
998 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 1035 /* Need SW/FW semaphore around AUTOC writes if LESM is
999 IXGBE_AUTOC_AN_RESTART)); 1036 * on, likewise reset_pipeline requires us to hold
1037 * this lock as it also writes to AUTOC.
1038 */
1039 bool got_lock = false;
1040 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1041 status = hw->mac.ops.acquire_swfw_sync(hw,
1042 IXGBE_GSSR_MAC_CSR_SM);
1043 if (status)
1044 goto reset_hw_out;
1045
1046 got_lock = true;
1047 }
1048
1049 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1050 ixgbe_reset_pipeline_82599(hw);
1051
1052 if (got_lock)
1053 hw->mac.ops.release_swfw_sync(hw,
1054 IXGBE_GSSR_MAC_CSR_SM);
1055 }
1000 1056
1001 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1057 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1002 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1058 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1022,7 +1078,7 @@ mac_reset_top:
1022 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1078 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1023 1079
1024 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1080 /* Add the SAN MAC address to the RAR only if it's a valid address */
1025 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1081 if (is_valid_ether_addr(hw->mac.san_addr)) {
1026 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1082 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1027 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1083 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1028 1084
@@ -1983,7 +2039,7 @@ fw_version_out:
1983 * Returns true if the LESM FW module is present and enabled. Otherwise 2039 * Returns true if the LESM FW module is present and enabled. Otherwise
1984 * returns false. Smart Speed must be disabled if LESM FW module is enabled. 2040 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
1985 **/ 2041 **/
1986static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2042bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
1987{ 2043{
1988 bool lesm_enabled = false; 2044 bool lesm_enabled = false;
1989 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2045 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2080,6 +2136,50 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2080 return ret_val; 2136 return ret_val;
2081} 2137}
2082 2138
2139/**
2140 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2141 *
2142 * @hw: pointer to hardware structure
2143 *
2144 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2145 * full pipeline reset. Note - We must hold the SW/FW semaphore before writing
2146 * to AUTOC, so this function assumes the semaphore is held.
2147 **/
2148s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2149{
2150 s32 i, autoc_reg, ret_val;
2151 s32 anlp1_reg = 0;
2152
2153 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2154 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2155
2156 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2157 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
2158
2159 /* Wait for AN to leave state 0 */
2160 for (i = 0; i < 10; i++) {
2161 usleep_range(4000, 8000);
2162 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2163 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2164 break;
2165 }
2166
2167 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2168 hw_dbg(hw, "auto negotiation not completed\n");
2169 ret_val = IXGBE_ERR_RESET_FAILED;
2170 goto reset_pipeline_out;
2171 }
2172
2173 ret_val = 0;
2174
2175reset_pipeline_out:
2176 /* Write AUTOC register with original LMS field and Restart_AN */
2177 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2178 IXGBE_WRITE_FLUSH(hw);
2179
2180 return ret_val;
2181}
2182
2083static struct ixgbe_mac_operations mac_ops_82599 = { 2183static struct ixgbe_mac_operations mac_ops_82599 = {
2084 .init_hw = &ixgbe_init_hw_generic, 2184 .init_hw = &ixgbe_init_hw_generic,
2085 .reset_hw = &ixgbe_reset_hw_82599, 2185 .reset_hw = &ixgbe_reset_hw_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index dbf37e4a45fd..5e68afdd502a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,13 +65,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
65 * function check the device id to see if the associated phy supports 65 * function check the device id to see if the associated phy supports
66 * autoneg flow control. 66 * autoneg flow control.
67 **/ 67 **/
68static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) 68s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
69{ 69{
70 70
71 switch (hw->device_id) { 71 switch (hw->device_id) {
72 case IXGBE_DEV_ID_X540T: 72 case IXGBE_DEV_ID_X540T:
73 case IXGBE_DEV_ID_X540T1: 73 case IXGBE_DEV_ID_X540T1:
74 return 0;
75 case IXGBE_DEV_ID_82599_T3_LOM: 74 case IXGBE_DEV_ID_82599_T3_LOM:
76 return 0; 75 return 0;
77 default: 76 default:
@@ -90,6 +89,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
90 s32 ret_val = 0; 89 s32 ret_val = 0;
91 u32 reg = 0, reg_bp = 0; 90 u32 reg = 0, reg_bp = 0;
92 u16 reg_cu = 0; 91 u16 reg_cu = 0;
92 bool got_lock = false;
93 93
94 /* 94 /*
95 * Validate the requested mode. Strict IEEE mode does not allow 95 * Validate the requested mode. Strict IEEE mode does not allow
@@ -210,8 +210,29 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
210 * 210 *
211 */ 211 */
212 if (hw->phy.media_type == ixgbe_media_type_backplane) { 212 if (hw->phy.media_type == ixgbe_media_type_backplane) {
213 reg_bp |= IXGBE_AUTOC_AN_RESTART; 213 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
214 * LESM is on, likewise reset_pipeline requries the lock as
215 * it also writes AUTOC.
216 */
217 if ((hw->mac.type == ixgbe_mac_82599EB) &&
218 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
219 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
220 IXGBE_GSSR_MAC_CSR_SM);
221 if (ret_val)
222 goto out;
223
224 got_lock = true;
225 }
226
214 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); 227 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
228
229 if (hw->mac.type == ixgbe_mac_82599EB)
230 ixgbe_reset_pipeline_82599(hw);
231
232 if (got_lock)
233 hw->mac.ops.release_swfw_sync(hw,
234 IXGBE_GSSR_MAC_CSR_SM);
235
215 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 236 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
216 (ixgbe_device_supports_autoneg_fc(hw) == 0)) { 237 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
217 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 238 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
@@ -1762,30 +1783,6 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1762} 1783}
1763 1784
1764/** 1785/**
1765 * ixgbe_validate_mac_addr - Validate MAC address
1766 * @mac_addr: pointer to MAC address.
1767 *
1768 * Tests a MAC address to ensure it is a valid Individual Address
1769 **/
1770s32 ixgbe_validate_mac_addr(u8 *mac_addr)
1771{
1772 s32 status = 0;
1773
1774 /* Make sure it is not a multicast address */
1775 if (IXGBE_IS_MULTICAST(mac_addr))
1776 status = IXGBE_ERR_INVALID_MAC_ADDR;
1777 /* Not a broadcast address */
1778 else if (IXGBE_IS_BROADCAST(mac_addr))
1779 status = IXGBE_ERR_INVALID_MAC_ADDR;
1780 /* Reject the zero address */
1781 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
1782 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
1783 status = IXGBE_ERR_INVALID_MAC_ADDR;
1784
1785 return status;
1786}
1787
1788/**
1789 * ixgbe_set_rar_generic - Set Rx address register 1786 * ixgbe_set_rar_generic - Set Rx address register
1790 * @hw: pointer to hardware structure 1787 * @hw: pointer to hardware structure
1791 * @index: Receive address register to write 1788 * @index: Receive address register to write
@@ -1889,8 +1886,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
1889 * to the permanent address. 1886 * to the permanent address.
1890 * Otherwise, use the permanent address from the eeprom. 1887 * Otherwise, use the permanent address from the eeprom.
1891 */ 1888 */
1892 if (ixgbe_validate_mac_addr(hw->mac.addr) == 1889 if (!is_valid_ether_addr(hw->mac.addr)) {
1893 IXGBE_ERR_INVALID_MAC_ADDR) {
1894 /* Get the MAC address from the RAR0 for later reference */ 1890 /* Get the MAC address from the RAR0 for later reference */
1895 hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1891 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
1896 1892
@@ -2617,6 +2613,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2617 bool link_up = false; 2613 bool link_up = false;
2618 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2614 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2619 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2615 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2616 s32 ret_val = 0;
2620 2617
2621 /* 2618 /*
2622 * Link must be up to auto-blink the LEDs; 2619 * Link must be up to auto-blink the LEDs;
@@ -2625,10 +2622,28 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2625 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2622 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2626 2623
2627 if (!link_up) { 2624 if (!link_up) {
2625 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
2626 * LESM is on.
2627 */
2628 bool got_lock = false;
2629
2630 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2631 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2632 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2633 IXGBE_GSSR_MAC_CSR_SM);
2634 if (ret_val)
2635 goto out;
2636
2637 got_lock = true;
2638 }
2628 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2639 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2629 autoc_reg |= IXGBE_AUTOC_FLU; 2640 autoc_reg |= IXGBE_AUTOC_FLU;
2630 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2641 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2631 IXGBE_WRITE_FLUSH(hw); 2642 IXGBE_WRITE_FLUSH(hw);
2643
2644 if (got_lock)
2645 hw->mac.ops.release_swfw_sync(hw,
2646 IXGBE_GSSR_MAC_CSR_SM);
2632 usleep_range(10000, 20000); 2647 usleep_range(10000, 20000);
2633 } 2648 }
2634 2649
@@ -2637,7 +2652,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2637 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2652 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2638 IXGBE_WRITE_FLUSH(hw); 2653 IXGBE_WRITE_FLUSH(hw);
2639 2654
2640 return 0; 2655out:
2656 return ret_val;
2641} 2657}
2642 2658
2643/** 2659/**
@@ -2649,18 +2665,40 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2649{ 2665{
2650 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2666 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2651 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2667 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2668 s32 ret_val = 0;
2669 bool got_lock = false;
2670
2671 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
2672 * LESM is on.
2673 */
2674 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2675 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2676 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2677 IXGBE_GSSR_MAC_CSR_SM);
2678 if (ret_val)
2679 goto out;
2680
2681 got_lock = true;
2682 }
2652 2683
2653 autoc_reg &= ~IXGBE_AUTOC_FLU; 2684 autoc_reg &= ~IXGBE_AUTOC_FLU;
2654 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2685 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2655 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2686 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2656 2687
2688 if (hw->mac.type == ixgbe_mac_82599EB)
2689 ixgbe_reset_pipeline_82599(hw);
2690
2691 if (got_lock)
2692 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
2693
2657 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2694 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2658 led_reg &= ~IXGBE_LED_BLINK(index); 2695 led_reg &= ~IXGBE_LED_BLINK(index);
2659 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 2696 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
2660 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 2697 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
2661 IXGBE_WRITE_FLUSH(hw); 2698 IXGBE_WRITE_FLUSH(hw);
2662 2699
2663 return 0; 2700out:
2701 return ret_val;
2664} 2702}
2665 2703
2666/** 2704/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d813d1188c36..f7a0970a251c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -78,9 +78,9 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
78s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); 78s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
79s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 79s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
80s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); 80s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
81s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
81void ixgbe_fc_autoneg(struct ixgbe_hw *hw); 82void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
82 83
83s32 ixgbe_validate_mac_addr(u8 *mac_addr);
84s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 84s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
85void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 85void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
86s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); 86s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
@@ -107,6 +107,7 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
107 107
108void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, 108void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
109 u32 headroom, int strategy); 109 u32 headroom, int strategy);
110s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
110 111
111#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 112#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
112#define IXGBE_EMC_INTERNAL_DATA 0x00 113#define IXGBE_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 8d3a21889099..50aa546b8c7a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -37,20 +37,6 @@ static struct dentry *ixgbe_dbg_root;
37static char ixgbe_dbg_reg_ops_buf[256] = ""; 37static char ixgbe_dbg_reg_ops_buf[256] = "";
38 38
39/** 39/**
40 * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
41 * @inode: inode that was opened
42 * @filp: file info
43 *
44 * Stash the adapter pointer hiding in the inode into the file pointer where
45 * we can find it later in the read and write calls
46 **/
47static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
48{
49 filp->private_data = inode->i_private;
50 return 0;
51}
52
53/**
54 * ixgbe_dbg_reg_ops_read - read for reg_ops datum 40 * ixgbe_dbg_reg_ops_read - read for reg_ops datum
55 * @filp: the opened file 41 * @filp: the opened file
56 * @buffer: where to write the data for the user to read 42 * @buffer: where to write the data for the user to read
@@ -61,23 +47,27 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
61 size_t count, loff_t *ppos) 47 size_t count, loff_t *ppos)
62{ 48{
63 struct ixgbe_adapter *adapter = filp->private_data; 49 struct ixgbe_adapter *adapter = filp->private_data;
64 char buf[256]; 50 char *buf;
65 int bytes_not_copied;
66 int len; 51 int len;
67 52
68 /* don't allow partial reads */ 53 /* don't allow partial reads */
69 if (*ppos != 0) 54 if (*ppos != 0)
70 return 0; 55 return 0;
71 56
72 len = snprintf(buf, sizeof(buf), "%s: %s\n", 57 buf = kasprintf(GFP_KERNEL, "%s: %s\n",
73 adapter->netdev->name, ixgbe_dbg_reg_ops_buf); 58 adapter->netdev->name,
74 if (count < len) 59 ixgbe_dbg_reg_ops_buf);
60 if (!buf)
61 return -ENOMEM;
62
63 if (count < strlen(buf)) {
64 kfree(buf);
75 return -ENOSPC; 65 return -ENOSPC;
76 bytes_not_copied = copy_to_user(buffer, buf, len); 66 }
77 if (bytes_not_copied < 0) 67
78 return bytes_not_copied; 68 len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
79 69
80 *ppos = len; 70 kfree(buf);
81 return len; 71 return len;
82} 72}
83 73
@@ -93,7 +83,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
93 size_t count, loff_t *ppos) 83 size_t count, loff_t *ppos)
94{ 84{
95 struct ixgbe_adapter *adapter = filp->private_data; 85 struct ixgbe_adapter *adapter = filp->private_data;
96 int bytes_not_copied; 86 int len;
97 87
98 /* don't allow partial writes */ 88 /* don't allow partial writes */
99 if (*ppos != 0) 89 if (*ppos != 0)
@@ -101,14 +91,15 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
101 if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) 91 if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
102 return -ENOSPC; 92 return -ENOSPC;
103 93
104 bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count); 94 len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf,
105 if (bytes_not_copied < 0) 95 sizeof(ixgbe_dbg_reg_ops_buf)-1,
106 return bytes_not_copied; 96 ppos,
107 else if (bytes_not_copied < count) 97 buffer,
108 count -= bytes_not_copied; 98 count);
109 else 99 if (len < 0)
110 return -ENOSPC; 100 return len;
111 ixgbe_dbg_reg_ops_buf[count] = '\0'; 101
102 ixgbe_dbg_reg_ops_buf[len] = '\0';
112 103
113 if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { 104 if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
114 u32 reg, value; 105 u32 reg, value;
@@ -142,7 +133,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
142 133
143static const struct file_operations ixgbe_dbg_reg_ops_fops = { 134static const struct file_operations ixgbe_dbg_reg_ops_fops = {
144 .owner = THIS_MODULE, 135 .owner = THIS_MODULE,
145 .open = ixgbe_dbg_reg_ops_open, 136 .open = simple_open,
146 .read = ixgbe_dbg_reg_ops_read, 137 .read = ixgbe_dbg_reg_ops_read,
147 .write = ixgbe_dbg_reg_ops_write, 138 .write = ixgbe_dbg_reg_ops_write,
148}; 139};
@@ -150,20 +141,6 @@ static const struct file_operations ixgbe_dbg_reg_ops_fops = {
150static char ixgbe_dbg_netdev_ops_buf[256] = ""; 141static char ixgbe_dbg_netdev_ops_buf[256] = "";
151 142
152/** 143/**
153 * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
154 * @inode: inode that was opened
155 * @filp: file info
156 *
157 * Stash the adapter pointer hiding in the inode into the file pointer
158 * where we can find it later in the read and write calls
159 **/
160static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
161{
162 filp->private_data = inode->i_private;
163 return 0;
164}
165
166/**
167 * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum 144 * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
168 * @filp: the opened file 145 * @filp: the opened file
169 * @buffer: where to write the data for the user to read 146 * @buffer: where to write the data for the user to read
@@ -175,23 +152,27 @@ static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
175 size_t count, loff_t *ppos) 152 size_t count, loff_t *ppos)
176{ 153{
177 struct ixgbe_adapter *adapter = filp->private_data; 154 struct ixgbe_adapter *adapter = filp->private_data;
178 char buf[256]; 155 char *buf;
179 int bytes_not_copied;
180 int len; 156 int len;
181 157
182 /* don't allow partial reads */ 158 /* don't allow partial reads */
183 if (*ppos != 0) 159 if (*ppos != 0)
184 return 0; 160 return 0;
185 161
186 len = snprintf(buf, sizeof(buf), "%s: %s\n", 162 buf = kasprintf(GFP_KERNEL, "%s: %s\n",
187 adapter->netdev->name, ixgbe_dbg_netdev_ops_buf); 163 adapter->netdev->name,
188 if (count < len) 164 ixgbe_dbg_netdev_ops_buf);
165 if (!buf)
166 return -ENOMEM;
167
168 if (count < strlen(buf)) {
169 kfree(buf);
189 return -ENOSPC; 170 return -ENOSPC;
190 bytes_not_copied = copy_to_user(buffer, buf, len); 171 }
191 if (bytes_not_copied < 0) 172
192 return bytes_not_copied; 173 len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
193 174
194 *ppos = len; 175 kfree(buf);
195 return len; 176 return len;
196} 177}
197 178
@@ -207,7 +188,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
207 size_t count, loff_t *ppos) 188 size_t count, loff_t *ppos)
208{ 189{
209 struct ixgbe_adapter *adapter = filp->private_data; 190 struct ixgbe_adapter *adapter = filp->private_data;
210 int bytes_not_copied; 191 int len;
211 192
212 /* don't allow partial writes */ 193 /* don't allow partial writes */
213 if (*ppos != 0) 194 if (*ppos != 0)
@@ -215,15 +196,15 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
215 if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) 196 if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
216 return -ENOSPC; 197 return -ENOSPC;
217 198
218 bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf, 199 len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf,
219 buffer, count); 200 sizeof(ixgbe_dbg_netdev_ops_buf)-1,
220 if (bytes_not_copied < 0) 201 ppos,
221 return bytes_not_copied; 202 buffer,
222 else if (bytes_not_copied < count) 203 count);
223 count -= bytes_not_copied; 204 if (len < 0)
224 else 205 return len;
225 return -ENOSPC; 206
226 ixgbe_dbg_netdev_ops_buf[count] = '\0'; 207 ixgbe_dbg_netdev_ops_buf[len] = '\0';
227 208
228 if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { 209 if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
229 adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); 210 adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
@@ -238,7 +219,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
238 219
239static const struct file_operations ixgbe_dbg_netdev_ops_fops = { 220static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
240 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
241 .open = ixgbe_dbg_netdev_ops_open, 222 .open = simple_open,
242 .read = ixgbe_dbg_netdev_ops_read, 223 .read = ixgbe_dbg_netdev_ops_read,
243 .write = ixgbe_dbg_netdev_ops_write, 224 .write = ixgbe_dbg_netdev_ops_write,
244}; 225};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 116f0e901bee..326858424345 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -383,6 +383,11 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
383 (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) 383 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
384 return -EINVAL; 384 return -EINVAL;
385 385
386 /* some devices do not support autoneg of link flow control */
387 if ((pause->autoneg == AUTONEG_ENABLE) &&
388 (ixgbe_device_supports_autoneg_fc(hw) != 0))
389 return -EINVAL;
390
386 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); 391 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
387 392
388 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) 393 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
@@ -887,24 +892,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
887 struct ethtool_ringparam *ring) 892 struct ethtool_ringparam *ring)
888{ 893{
889 struct ixgbe_adapter *adapter = netdev_priv(netdev); 894 struct ixgbe_adapter *adapter = netdev_priv(netdev);
890 struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; 895 struct ixgbe_ring *temp_ring;
891 int i, err = 0; 896 int i, err = 0;
892 u32 new_rx_count, new_tx_count; 897 u32 new_rx_count, new_tx_count;
893 bool need_update = false;
894 898
895 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 899 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
896 return -EINVAL; 900 return -EINVAL;
897 901
898 new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD); 902 new_tx_count = clamp_t(u32, ring->tx_pending,
899 new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD); 903 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
900 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
901
902 new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
903 new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
904 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); 904 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
905 905
906 if ((new_tx_count == adapter->tx_ring[0]->count) && 906 new_rx_count = clamp_t(u32, ring->rx_pending,
907 (new_rx_count == adapter->rx_ring[0]->count)) { 907 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
908 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
909
910 if ((new_tx_count == adapter->tx_ring_count) &&
911 (new_rx_count == adapter->rx_ring_count)) {
908 /* nothing to do */ 912 /* nothing to do */
909 return 0; 913 return 0;
910 } 914 }
@@ -922,81 +926,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
922 goto clear_reset; 926 goto clear_reset;
923 } 927 }
924 928
925 temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 929 /* allocate temporary buffer to store rings in */
926 if (!temp_tx_ring) { 930 i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
931 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
932
933 if (!temp_ring) {
927 err = -ENOMEM; 934 err = -ENOMEM;
928 goto clear_reset; 935 goto clear_reset;
929 } 936 }
930 937
938 ixgbe_down(adapter);
939
940 /*
941 * Setup new Tx resources and free the old Tx resources in that order.
942 * We can then assign the new resources to the rings via a memcpy.
943 * The advantage to this approach is that we are guaranteed to still
944 * have resources even in the case of an allocation failure.
945 */
931 if (new_tx_count != adapter->tx_ring_count) { 946 if (new_tx_count != adapter->tx_ring_count) {
932 for (i = 0; i < adapter->num_tx_queues; i++) { 947 for (i = 0; i < adapter->num_tx_queues; i++) {
933 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 948 memcpy(&temp_ring[i], adapter->tx_ring[i],
934 sizeof(struct ixgbe_ring)); 949 sizeof(struct ixgbe_ring));
935 temp_tx_ring[i].count = new_tx_count; 950
936 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]); 951 temp_ring[i].count = new_tx_count;
952 err = ixgbe_setup_tx_resources(&temp_ring[i]);
937 if (err) { 953 if (err) {
938 while (i) { 954 while (i) {
939 i--; 955 i--;
940 ixgbe_free_tx_resources(&temp_tx_ring[i]); 956 ixgbe_free_tx_resources(&temp_ring[i]);
941 } 957 }
942 goto clear_reset; 958 goto err_setup;
943 } 959 }
944 } 960 }
945 need_update = true;
946 }
947 961
948 temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 962 for (i = 0; i < adapter->num_tx_queues; i++) {
949 if (!temp_rx_ring) { 963 ixgbe_free_tx_resources(adapter->tx_ring[i]);
950 err = -ENOMEM; 964
951 goto err_setup; 965 memcpy(adapter->tx_ring[i], &temp_ring[i],
966 sizeof(struct ixgbe_ring));
967 }
968
969 adapter->tx_ring_count = new_tx_count;
952 } 970 }
953 971
972 /* Repeat the process for the Rx rings if needed */
954 if (new_rx_count != adapter->rx_ring_count) { 973 if (new_rx_count != adapter->rx_ring_count) {
955 for (i = 0; i < adapter->num_rx_queues; i++) { 974 for (i = 0; i < adapter->num_rx_queues; i++) {
956 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 975 memcpy(&temp_ring[i], adapter->rx_ring[i],
957 sizeof(struct ixgbe_ring)); 976 sizeof(struct ixgbe_ring));
958 temp_rx_ring[i].count = new_rx_count; 977
959 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]); 978 temp_ring[i].count = new_rx_count;
979 err = ixgbe_setup_rx_resources(&temp_ring[i]);
960 if (err) { 980 if (err) {
961 while (i) { 981 while (i) {
962 i--; 982 i--;
963 ixgbe_free_rx_resources(&temp_rx_ring[i]); 983 ixgbe_free_rx_resources(&temp_ring[i]);
964 } 984 }
965 goto err_setup; 985 goto err_setup;
966 } 986 }
987
967 } 988 }
968 need_update = true;
969 }
970 989
971 /* if rings need to be updated, here's the place to do it in one shot */ 990 for (i = 0; i < adapter->num_rx_queues; i++) {
972 if (need_update) { 991 ixgbe_free_rx_resources(adapter->rx_ring[i]);
973 ixgbe_down(adapter);
974 992
975 /* tx */ 993 memcpy(adapter->rx_ring[i], &temp_ring[i],
976 if (new_tx_count != adapter->tx_ring_count) { 994 sizeof(struct ixgbe_ring));
977 for (i = 0; i < adapter->num_tx_queues; i++) {
978 ixgbe_free_tx_resources(adapter->tx_ring[i]);
979 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
980 sizeof(struct ixgbe_ring));
981 }
982 adapter->tx_ring_count = new_tx_count;
983 } 995 }
984 996
985 /* rx */ 997 adapter->rx_ring_count = new_rx_count;
986 if (new_rx_count != adapter->rx_ring_count) {
987 for (i = 0; i < adapter->num_rx_queues; i++) {
988 ixgbe_free_rx_resources(adapter->rx_ring[i]);
989 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
990 sizeof(struct ixgbe_ring));
991 }
992 adapter->rx_ring_count = new_rx_count;
993 }
994 ixgbe_up(adapter);
995 } 998 }
996 999
997 vfree(temp_rx_ring);
998err_setup: 1000err_setup:
999 vfree(temp_tx_ring); 1001 ixgbe_up(adapter);
1002 vfree(temp_ring);
1000clear_reset: 1003clear_reset:
1001 clear_bit(__IXGBE_RESETTING, &adapter->state); 1004 clear_bit(__IXGBE_RESETTING, &adapter->state);
1002 return err; 1005 return err;
@@ -2669,7 +2672,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2669 struct ixgbe_adapter *adapter = netdev_priv(dev); 2672 struct ixgbe_adapter *adapter = netdev_priv(dev);
2670 2673
2671 switch (adapter->hw.mac.type) { 2674 switch (adapter->hw.mac.type) {
2672#ifdef CONFIG_IXGBE_PTP
2673 case ixgbe_mac_X540: 2675 case ixgbe_mac_X540:
2674 case ixgbe_mac_82599EB: 2676 case ixgbe_mac_82599EB:
2675 info->so_timestamping = 2677 info->so_timestamping =
@@ -2695,7 +2697,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2695 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | 2697 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2696 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 2698 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2697 break; 2699 break;
2698#endif /* CONFIG_IXGBE_PTP */
2699 default: 2700 default:
2700 return ethtool_op_get_ts_info(dev, info); 2701 return ethtool_op_get_ts_info(dev, info);
2701 break; 2702 break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ae73ef14fdf3..252850d9a3e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
800 return -EINVAL; 800 return -EINVAL;
801 801
802 e_info(drv, "Enabling FCoE offload features.\n"); 802 e_info(drv, "Enabling FCoE offload features.\n");
803
804 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
805 e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
806
803 if (netif_running(netdev)) 807 if (netif_running(netdev))
804 netdev->netdev_ops->ndo_stop(netdev); 808 netdev->netdev_ops->ndo_stop(netdev);
805 809
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 17ecbcedd548..8c74f739011d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
802 /* setup affinity mask and node */ 802 /* setup affinity mask and node */
803 if (cpu != -1) 803 if (cpu != -1)
804 cpumask_set_cpu(cpu, &q_vector->affinity_mask); 804 cpumask_set_cpu(cpu, &q_vector->affinity_mask);
805 else
806 cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
807 q_vector->numa_node = node; 805 q_vector->numa_node = node;
808 806
807#ifdef CONFIG_IXGBE_DCA
808 /* initialize CPU for DCA */
809 q_vector->cpu = -1;
810
811#endif
809 /* initialize NAPI */ 812 /* initialize NAPI */
810 netif_napi_add(adapter->netdev, &q_vector->napi, 813 netif_napi_add(adapter->netdev, &q_vector->napi,
811 ixgbe_poll, 64); 814 ixgbe_poll, 64);
@@ -821,6 +824,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
821 /* initialize pointer to rings */ 824 /* initialize pointer to rings */
822 ring = q_vector->ring; 825 ring = q_vector->ring;
823 826
827 /* intialize ITR */
828 if (txr_count && !rxr_count) {
829 /* tx only vector */
830 if (adapter->tx_itr_setting == 1)
831 q_vector->itr = IXGBE_10K_ITR;
832 else
833 q_vector->itr = adapter->tx_itr_setting;
834 } else {
835 /* rx or rx/tx vector */
836 if (adapter->rx_itr_setting == 1)
837 q_vector->itr = IXGBE_20K_ITR;
838 else
839 q_vector->itr = adapter->rx_itr_setting;
840 }
841
824 while (txr_count) { 842 while (txr_count) {
825 /* assign generic ring traits */ 843 /* assign generic ring traits */
826 ring->dev = &adapter->pdev->dev; 844 ring->dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fa3d552e1f4a..20a5af6d87d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
44#include <linux/ethtool.h> 44#include <linux/ethtool.h>
45#include <linux/if.h> 45#include <linux/if.h>
46#include <linux/if_vlan.h> 46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
47#include <linux/prefetch.h> 48#include <linux/prefetch.h>
48#include <scsi/fc/fc_fcoe.h> 49#include <scsi/fc/fc_fcoe.h>
49 50
@@ -62,11 +63,7 @@ char ixgbe_default_device_descr[] =
62static char ixgbe_default_device_descr[] = 63static char ixgbe_default_device_descr[] =
63 "Intel(R) 10 Gigabit Network Connection"; 64 "Intel(R) 10 Gigabit Network Connection";
64#endif 65#endif
65#define MAJ 3 66#define DRV_VERSION "3.11.33-k"
66#define MIN 9
67#define BUILD 15
68#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
69 __stringify(BUILD) "-k"
70const char ixgbe_driver_version[] = DRV_VERSION; 67const char ixgbe_driver_version[] = DRV_VERSION;
71static const char ixgbe_copyright[] = 68static const char ixgbe_copyright[] =
72 "Copyright (c) 1999-2012 Intel Corporation."; 69 "Copyright (c) 1999-2012 Intel Corporation.";
@@ -335,11 +332,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
335 goto exit; 332 goto exit;
336 333
337 dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); 334 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
338 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); 335 pr_info(" %s %s %s %s\n",
336 "Queue [NTU] [NTC] [bi(ntc)->dma ]",
337 "leng", "ntw", "timestamp");
339 for (n = 0; n < adapter->num_tx_queues; n++) { 338 for (n = 0; n < adapter->num_tx_queues; n++) {
340 tx_ring = adapter->tx_ring[n]; 339 tx_ring = adapter->tx_ring[n];
341 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 340 tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
342 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", 341 pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
343 n, tx_ring->next_to_use, tx_ring->next_to_clean, 342 n, tx_ring->next_to_use, tx_ring->next_to_clean,
344 (u64)dma_unmap_addr(tx_buffer, dma), 343 (u64)dma_unmap_addr(tx_buffer, dma),
345 dma_unmap_len(tx_buffer, len), 344 dma_unmap_len(tx_buffer, len),
@@ -355,13 +354,37 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
355 354
356 /* Transmit Descriptor Formats 355 /* Transmit Descriptor Formats
357 * 356 *
358 * Advanced Transmit Descriptor 357 * 82598 Advanced Transmit Descriptor
359 * +--------------------------------------------------------------+ 358 * +--------------------------------------------------------------+
360 * 0 | Buffer Address [63:0] | 359 * 0 | Buffer Address [63:0] |
361 * +--------------------------------------------------------------+ 360 * +--------------------------------------------------------------+
362 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | 361 * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
363 * +--------------------------------------------------------------+ 362 * +--------------------------------------------------------------+
364 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 363 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
364 *
365 * 82598 Advanced Transmit Descriptor (Write-Back Format)
366 * +--------------------------------------------------------------+
367 * 0 | RSV [63:0] |
368 * +--------------------------------------------------------------+
369 * 8 | RSV | STA | NXTSEQ |
370 * +--------------------------------------------------------------+
371 * 63 36 35 32 31 0
372 *
373 * 82599+ Advanced Transmit Descriptor
374 * +--------------------------------------------------------------+
375 * 0 | Buffer Address [63:0] |
376 * +--------------------------------------------------------------+
377 * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
378 * +--------------------------------------------------------------+
379 * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
380 *
381 * 82599+ Advanced Transmit Descriptor (Write-Back Format)
382 * +--------------------------------------------------------------+
383 * 0 | RSV [63:0] |
384 * +--------------------------------------------------------------+
385 * 8 | RSV | STA | RSV |
386 * +--------------------------------------------------------------+
387 * 63 36 35 32 31 0
365 */ 388 */
366 389
367 for (n = 0; n < adapter->num_tx_queues; n++) { 390 for (n = 0; n < adapter->num_tx_queues; n++) {
@@ -369,40 +392,43 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
369 pr_info("------------------------------------\n"); 392 pr_info("------------------------------------\n");
370 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); 393 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
371 pr_info("------------------------------------\n"); 394 pr_info("------------------------------------\n");
372 pr_info("T [desc] [address 63:0 ] " 395 pr_info("%s%s %s %s %s %s\n",
373 "[PlPOIdStDDt Ln] [bi->dma ] " 396 "T [desc] [address 63:0 ] ",
374 "leng ntw timestamp bi->skb\n"); 397 "[PlPOIdStDDt Ln] [bi->dma ] ",
398 "leng", "ntw", "timestamp", "bi->skb");
375 399
376 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { 400 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
377 tx_desc = IXGBE_TX_DESC(tx_ring, i); 401 tx_desc = IXGBE_TX_DESC(tx_ring, i);
378 tx_buffer = &tx_ring->tx_buffer_info[i]; 402 tx_buffer = &tx_ring->tx_buffer_info[i];
379 u0 = (struct my_u0 *)tx_desc; 403 u0 = (struct my_u0 *)tx_desc;
380 pr_info("T [0x%03X] %016llX %016llX %016llX" 404 if (dma_unmap_len(tx_buffer, len) > 0) {
381 " %04X %p %016llX %p", i, 405 pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
382 le64_to_cpu(u0->a), 406 i,
383 le64_to_cpu(u0->b), 407 le64_to_cpu(u0->a),
384 (u64)dma_unmap_addr(tx_buffer, dma), 408 le64_to_cpu(u0->b),
385 dma_unmap_len(tx_buffer, len), 409 (u64)dma_unmap_addr(tx_buffer, dma),
386 tx_buffer->next_to_watch,
387 (u64)tx_buffer->time_stamp,
388 tx_buffer->skb);
389 if (i == tx_ring->next_to_use &&
390 i == tx_ring->next_to_clean)
391 pr_cont(" NTC/U\n");
392 else if (i == tx_ring->next_to_use)
393 pr_cont(" NTU\n");
394 else if (i == tx_ring->next_to_clean)
395 pr_cont(" NTC\n");
396 else
397 pr_cont("\n");
398
399 if (netif_msg_pktdata(adapter) &&
400 tx_buffer->skb)
401 print_hex_dump(KERN_INFO, "",
402 DUMP_PREFIX_ADDRESS, 16, 1,
403 tx_buffer->skb->data,
404 dma_unmap_len(tx_buffer, len), 410 dma_unmap_len(tx_buffer, len),
405 true); 411 tx_buffer->next_to_watch,
412 (u64)tx_buffer->time_stamp,
413 tx_buffer->skb);
414 if (i == tx_ring->next_to_use &&
415 i == tx_ring->next_to_clean)
416 pr_cont(" NTC/U\n");
417 else if (i == tx_ring->next_to_use)
418 pr_cont(" NTU\n");
419 else if (i == tx_ring->next_to_clean)
420 pr_cont(" NTC\n");
421 else
422 pr_cont("\n");
423
424 if (netif_msg_pktdata(adapter) &&
425 tx_buffer->skb)
426 print_hex_dump(KERN_INFO, "",
427 DUMP_PREFIX_ADDRESS, 16, 1,
428 tx_buffer->skb->data,
429 dma_unmap_len(tx_buffer, len),
430 true);
431 }
406 } 432 }
407 } 433 }
408 434
@@ -422,7 +448,9 @@ rx_ring_summary:
422 448
423 dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); 449 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
424 450
425 /* Advanced Receive Descriptor (Read) Format 451 /* Receive Descriptor Formats
452 *
453 * 82598 Advanced Receive Descriptor (Read) Format
426 * 63 1 0 454 * 63 1 0
427 * +-----------------------------------------------------+ 455 * +-----------------------------------------------------+
428 * 0 | Packet Buffer Address [63:1] |A0/NSE| 456 * 0 | Packet Buffer Address [63:1] |A0/NSE|
@@ -431,27 +459,52 @@ rx_ring_summary:
431 * +-----------------------------------------------------+ 459 * +-----------------------------------------------------+
432 * 460 *
433 * 461 *
434 * Advanced Receive Descriptor (Write-Back) Format 462 * 82598 Advanced Receive Descriptor (Write-Back) Format
435 * 463 *
436 * 63 48 47 32 31 30 21 20 16 15 4 3 0 464 * 63 48 47 32 31 30 21 20 16 15 4 3 0
437 * +------------------------------------------------------+ 465 * +------------------------------------------------------+
438 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS | 466 * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
439 * | Checksum Ident | | | | Type | Type | 467 * | Packet | IP | | | | Type | Type |
468 * | Checksum | Ident | | | | | |
440 * +------------------------------------------------------+ 469 * +------------------------------------------------------+
441 * 8 | VLAN Tag | Length | Extended Error | Extended Status | 470 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
442 * +------------------------------------------------------+ 471 * +------------------------------------------------------+
443 * 63 48 47 32 31 20 19 0 472 * 63 48 47 32 31 20 19 0
473 *
474 * 82599+ Advanced Receive Descriptor (Read) Format
475 * 63 1 0
476 * +-----------------------------------------------------+
477 * 0 | Packet Buffer Address [63:1] |A0/NSE|
478 * +----------------------------------------------+------+
479 * 8 | Header Buffer Address [63:1] | DD |
480 * +-----------------------------------------------------+
481 *
482 *
483 * 82599+ Advanced Receive Descriptor (Write-Back) Format
484 *
485 * 63 48 47 32 31 30 21 20 17 16 4 3 0
486 * +------------------------------------------------------+
487 * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
488 * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
489 * |/ Flow Dir Flt ID | | | | | |
490 * +------------------------------------------------------+
491 * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
492 * +------------------------------------------------------+
493 * 63 48 47 32 31 20 19 0
444 */ 494 */
495
445 for (n = 0; n < adapter->num_rx_queues; n++) { 496 for (n = 0; n < adapter->num_rx_queues; n++) {
446 rx_ring = adapter->rx_ring[n]; 497 rx_ring = adapter->rx_ring[n];
447 pr_info("------------------------------------\n"); 498 pr_info("------------------------------------\n");
448 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); 499 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
449 pr_info("------------------------------------\n"); 500 pr_info("------------------------------------\n");
450 pr_info("R [desc] [ PktBuf A0] " 501 pr_info("%s%s%s",
451 "[ HeadBuf DD] [bi->dma ] [bi->skb] " 502 "R [desc] [ PktBuf A0] ",
503 "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
452 "<-- Adv Rx Read format\n"); 504 "<-- Adv Rx Read format\n");
453 pr_info("RWB[desc] [PcsmIpSHl PtRs] " 505 pr_info("%s%s%s",
454 "[vl er S cks ln] ---------------- [bi->skb] " 506 "RWB[desc] [PcsmIpSHl PtRs] ",
507 "[vl er S cks ln] ---------------- [bi->skb ] ",
455 "<-- Adv Rx Write-Back format\n"); 508 "<-- Adv Rx Write-Back format\n");
456 509
457 for (i = 0; i < rx_ring->count; i++) { 510 for (i = 0; i < rx_ring->count; i++) {
@@ -646,6 +699,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
646 struct ixgbe_hw *hw = &adapter->hw; 699 struct ixgbe_hw *hw = &adapter->hw;
647 struct ixgbe_hw_stats *hwstats = &adapter->stats; 700 struct ixgbe_hw_stats *hwstats = &adapter->stats;
648 u32 xoff[8] = {0}; 701 u32 xoff[8] = {0};
702 u8 tc;
649 int i; 703 int i;
650 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; 704 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
651 705
@@ -659,21 +713,26 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
659 713
660 /* update stats for each tc, only valid with PFC enabled */ 714 /* update stats for each tc, only valid with PFC enabled */
661 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { 715 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
716 u32 pxoffrxc;
717
662 switch (hw->mac.type) { 718 switch (hw->mac.type) {
663 case ixgbe_mac_82598EB: 719 case ixgbe_mac_82598EB:
664 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 720 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
665 break; 721 break;
666 default: 722 default:
667 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); 723 pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
668 } 724 }
669 hwstats->pxoffrxc[i] += xoff[i]; 725 hwstats->pxoffrxc[i] += pxoffrxc;
726 /* Get the TC for given UP */
727 tc = netdev_get_prio_tc_map(adapter->netdev, i);
728 xoff[tc] += pxoffrxc;
670 } 729 }
671 730
672 /* disarm tx queues that have received xoff frames */ 731 /* disarm tx queues that have received xoff frames */
673 for (i = 0; i < adapter->num_tx_queues; i++) { 732 for (i = 0; i < adapter->num_tx_queues; i++) {
674 struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; 733 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
675 u8 tc = tx_ring->dcb_tc;
676 734
735 tc = tx_ring->dcb_tc;
677 if (xoff[tc]) 736 if (xoff[tc])
678 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); 737 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
679 } 738 }
@@ -791,10 +850,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
791 total_bytes += tx_buffer->bytecount; 850 total_bytes += tx_buffer->bytecount;
792 total_packets += tx_buffer->gso_segs; 851 total_packets += tx_buffer->gso_segs;
793 852
794#ifdef CONFIG_IXGBE_PTP
795 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) 853 if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
796 ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb); 854 ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
797#endif
798 855
799 /* free the skb */ 856 /* free the skb */
800 dev_kfree_skb_any(tx_buffer->skb); 857 dev_kfree_skb_any(tx_buffer->skb);
@@ -967,7 +1024,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
967 * which will cause the DCA tag to be cleared. 1024 * which will cause the DCA tag to be cleared.
968 */ 1025 */
969 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | 1026 rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
970 IXGBE_DCA_RXCTRL_DATA_DCA_EN |
971 IXGBE_DCA_RXCTRL_DESC_DCA_EN; 1027 IXGBE_DCA_RXCTRL_DESC_DCA_EN;
972 1028
973 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); 1029 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
@@ -1244,6 +1300,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1244 struct vlan_hdr *vlan; 1300 struct vlan_hdr *vlan;
1245 /* l3 headers */ 1301 /* l3 headers */
1246 struct iphdr *ipv4; 1302 struct iphdr *ipv4;
1303 struct ipv6hdr *ipv6;
1247 } hdr; 1304 } hdr;
1248 __be16 protocol; 1305 __be16 protocol;
1249 u8 nexthdr = 0; /* default to not TCP */ 1306 u8 nexthdr = 0; /* default to not TCP */
@@ -1281,20 +1338,30 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1281 if (hlen < sizeof(struct iphdr)) 1338 if (hlen < sizeof(struct iphdr))
1282 return hdr.network - data; 1339 return hdr.network - data;
1283 1340
1341 /* record next protocol if header is present */
1342 if (!hdr.ipv4->frag_off)
1343 nexthdr = hdr.ipv4->protocol;
1344 } else if (protocol == __constant_htons(ETH_P_IPV6)) {
1345 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1346 return max_len;
1347
1284 /* record next protocol */ 1348 /* record next protocol */
1285 nexthdr = hdr.ipv4->protocol; 1349 nexthdr = hdr.ipv6->nexthdr;
1286 hdr.network += hlen; 1350 hlen = sizeof(struct ipv6hdr);
1287#ifdef IXGBE_FCOE 1351#ifdef IXGBE_FCOE
1288 } else if (protocol == __constant_htons(ETH_P_FCOE)) { 1352 } else if (protocol == __constant_htons(ETH_P_FCOE)) {
1289 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) 1353 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1290 return max_len; 1354 return max_len;
1291 hdr.network += FCOE_HEADER_LEN; 1355 hlen = FCOE_HEADER_LEN;
1292#endif 1356#endif
1293 } else { 1357 } else {
1294 return hdr.network - data; 1358 return hdr.network - data;
1295 } 1359 }
1296 1360
1297 /* finally sort out TCP */ 1361 /* relocate pointer to start of L4 header */
1362 hdr.network += hlen;
1363
1364 /* finally sort out TCP/UDP */
1298 if (nexthdr == IPPROTO_TCP) { 1365 if (nexthdr == IPPROTO_TCP) {
1299 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) 1366 if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
1300 return max_len; 1367 return max_len;
@@ -1307,6 +1374,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1307 return hdr.network - data; 1374 return hdr.network - data;
1308 1375
1309 hdr.network += hlen; 1376 hdr.network += hlen;
1377 } else if (nexthdr == IPPROTO_UDP) {
1378 if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
1379 return max_len;
1380
1381 hdr.network += sizeof(struct udphdr);
1310 } 1382 }
1311 1383
1312 /* 1384 /*
@@ -1369,9 +1441,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1369 1441
1370 ixgbe_rx_checksum(rx_ring, rx_desc, skb); 1442 ixgbe_rx_checksum(rx_ring, rx_desc, skb);
1371 1443
1372#ifdef CONFIG_IXGBE_PTP
1373 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); 1444 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
1374#endif
1375 1445
1376 if ((dev->features & NETIF_F_HW_VLAN_RX) && 1446 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
1377 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1447 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1781,7 +1851,7 @@ dma_sync:
1781 **/ 1851 **/
1782static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1852static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1783 struct ixgbe_ring *rx_ring, 1853 struct ixgbe_ring *rx_ring,
1784 int budget) 1854 const int budget)
1785{ 1855{
1786 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1856 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1787#ifdef IXGBE_FCOE 1857#ifdef IXGBE_FCOE
@@ -1832,7 +1902,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1832 1902
1833 /* probably a little skewed due to removing CRC */ 1903 /* probably a little skewed due to removing CRC */
1834 total_rx_bytes += skb->len; 1904 total_rx_bytes += skb->len;
1835 total_rx_packets++;
1836 1905
1837 /* populate checksum, timestamp, VLAN, and protocol */ 1906 /* populate checksum, timestamp, VLAN, and protocol */
1838 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); 1907 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
@@ -1865,8 +1934,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1865 ixgbe_rx_skb(q_vector, skb); 1934 ixgbe_rx_skb(q_vector, skb);
1866 1935
1867 /* update budget accounting */ 1936 /* update budget accounting */
1868 budget--; 1937 total_rx_packets++;
1869 } while (likely(budget)); 1938 } while (likely(total_rx_packets < budget));
1870 1939
1871 u64_stats_update_begin(&rx_ring->syncp); 1940 u64_stats_update_begin(&rx_ring->syncp);
1872 rx_ring->stats.packets += total_rx_packets; 1941 rx_ring->stats.packets += total_rx_packets;
@@ -1878,7 +1947,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1878 if (cleaned_count) 1947 if (cleaned_count)
1879 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); 1948 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1880 1949
1881 return !!budget; 1950 return (total_rx_packets < budget);
1882} 1951}
1883 1952
1884/** 1953/**
@@ -1914,20 +1983,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1914 ixgbe_for_each_ring(ring, q_vector->tx) 1983 ixgbe_for_each_ring(ring, q_vector->tx)
1915 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx); 1984 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1916 1985
1917 if (q_vector->tx.ring && !q_vector->rx.ring) {
1918 /* tx only vector */
1919 if (adapter->tx_itr_setting == 1)
1920 q_vector->itr = IXGBE_10K_ITR;
1921 else
1922 q_vector->itr = adapter->tx_itr_setting;
1923 } else {
1924 /* rx or rx/tx vector */
1925 if (adapter->rx_itr_setting == 1)
1926 q_vector->itr = IXGBE_20K_ITR;
1927 else
1928 q_vector->itr = adapter->rx_itr_setting;
1929 }
1930
1931 ixgbe_write_eitr(q_vector); 1986 ixgbe_write_eitr(q_vector);
1932 } 1987 }
1933 1988
@@ -2324,10 +2379,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2324 break; 2379 break;
2325 } 2380 }
2326 2381
2327#ifdef CONFIG_IXGBE_PTP
2328 if (adapter->hw.mac.type == ixgbe_mac_X540) 2382 if (adapter->hw.mac.type == ixgbe_mac_X540)
2329 mask |= IXGBE_EIMS_TIMESYNC; 2383 mask |= IXGBE_EIMS_TIMESYNC;
2330#endif
2331 2384
2332 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && 2385 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2333 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) 2386 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
@@ -2393,10 +2446,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
2393 2446
2394 ixgbe_check_fan_failure(adapter, eicr); 2447 ixgbe_check_fan_failure(adapter, eicr);
2395 2448
2396#ifdef CONFIG_IXGBE_PTP
2397 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 2449 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2398 ixgbe_ptp_check_pps_event(adapter, eicr); 2450 ixgbe_ptp_check_pps_event(adapter, eicr);
2399#endif
2400 2451
2401 /* re-enable the original interrupt state, no lsc, no queues */ 2452 /* re-enable the original interrupt state, no lsc, no queues */
2402 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2453 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2588,10 +2639,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2588 } 2639 }
2589 2640
2590 ixgbe_check_fan_failure(adapter, eicr); 2641 ixgbe_check_fan_failure(adapter, eicr);
2591#ifdef CONFIG_IXGBE_PTP
2592 if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) 2642 if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
2593 ixgbe_ptp_check_pps_event(adapter, eicr); 2643 ixgbe_ptp_check_pps_event(adapter, eicr);
2594#endif
2595 2644
2596 /* would disable interrupts here but EIAM disabled it */ 2645 /* would disable interrupts here but EIAM disabled it */
2597 napi_schedule(&q_vector->napi); 2646 napi_schedule(&q_vector->napi);
@@ -2699,12 +2748,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2699{ 2748{
2700 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2749 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2701 2750
2702 /* rx/tx vector */
2703 if (adapter->rx_itr_setting == 1)
2704 q_vector->itr = IXGBE_20K_ITR;
2705 else
2706 q_vector->itr = adapter->rx_itr_setting;
2707
2708 ixgbe_write_eitr(q_vector); 2751 ixgbe_write_eitr(q_vector);
2709 2752
2710 ixgbe_set_ivar(adapter, 0, 0, 0); 2753 ixgbe_set_ivar(adapter, 0, 0, 0);
@@ -3132,14 +3175,6 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
3132 ixgbe_configure_srrctl(adapter, ring); 3175 ixgbe_configure_srrctl(adapter, ring);
3133 ixgbe_configure_rscctl(adapter, ring); 3176 ixgbe_configure_rscctl(adapter, ring);
3134 3177
3135 /* If operating in IOV mode set RLPML for X540 */
3136 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
3137 hw->mac.type == ixgbe_mac_X540) {
3138 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
3139 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
3140 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
3141 }
3142
3143 if (hw->mac.type == ixgbe_mac_82598EB) { 3178 if (hw->mac.type == ixgbe_mac_82598EB) {
3144 /* 3179 /*
3145 * enable cache line friendly hardware writes: 3180 * enable cache line friendly hardware writes:
@@ -3211,7 +3246,8 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3211 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); 3246 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
3212 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); 3247 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
3213 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); 3248 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
3214 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 3249 if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
3250 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3215 3251
3216 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ 3252 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
3217 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); 3253 hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
@@ -3234,8 +3270,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
3234 3270
3235 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3271 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3236 3272
3237 /* enable Tx loopback for VF/PF communication */
3238 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3239 3273
3240 /* Enable MAC Anti-Spoofing */ 3274 /* Enable MAC Anti-Spoofing */
3241 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), 3275 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
@@ -3263,6 +3297,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3263 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; 3297 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
3264 3298
3265#endif /* IXGBE_FCOE */ 3299#endif /* IXGBE_FCOE */
3300
3301 /* adjust max frame to be at least the size of a standard frame */
3302 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3303 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
3304
3266 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 3305 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
3267 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 3306 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
3268 mhadd &= ~IXGBE_MHADD_MFS_MASK; 3307 mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -3271,9 +3310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
3271 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); 3310 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
3272 } 3311 }
3273 3312
3274 /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
3275 max_frame += VLAN_HLEN;
3276
3277 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 3313 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
3278 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ 3314 /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
3279 hlreg0 |= IXGBE_HLREG0_JUMBOEN; 3315 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -4072,11 +4108,8 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4072 else 4108 else
4073 ixgbe_configure_msi_and_legacy(adapter); 4109 ixgbe_configure_msi_and_legacy(adapter);
4074 4110
4075 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */ 4111 /* enable the optics for 82599 SFP+ fiber */
4076 if (hw->mac.ops.enable_tx_laser && 4112 if (hw->mac.ops.enable_tx_laser)
4077 ((hw->phy.multispeed_fiber) ||
4078 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4079 (hw->mac.type == ixgbe_mac_82599EB))))
4080 hw->mac.ops.enable_tx_laser(hw); 4113 hw->mac.ops.enable_tx_laser(hw);
4081 4114
4082 clear_bit(__IXGBE_DOWN, &adapter->state); 4115 clear_bit(__IXGBE_DOWN, &adapter->state);
@@ -4192,6 +4225,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
4192 /* update SAN MAC vmdq pool selection */ 4225 /* update SAN MAC vmdq pool selection */
4193 if (hw->mac.san_mac_rar_index) 4226 if (hw->mac.san_mac_rar_index)
4194 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); 4227 hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
4228
4229 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
4230 ixgbe_ptp_reset(adapter);
4195} 4231}
4196 4232
4197/** 4233/**
@@ -4393,11 +4429,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
4393 if (!pci_channel_offline(adapter->pdev)) 4429 if (!pci_channel_offline(adapter->pdev))
4394 ixgbe_reset(adapter); 4430 ixgbe_reset(adapter);
4395 4431
4396 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ 4432 /* power down the optics for 82599 SFP+ fiber */
4397 if (hw->mac.ops.disable_tx_laser && 4433 if (hw->mac.ops.disable_tx_laser)
4398 ((hw->phy.multispeed_fiber) ||
4399 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4400 (hw->mac.type == ixgbe_mac_82599EB))))
4401 hw->mac.ops.disable_tx_laser(hw); 4434 hw->mac.ops.disable_tx_laser(hw);
4402 4435
4403 ixgbe_clean_all_tx_rings(adapter); 4436 ixgbe_clean_all_tx_rings(adapter);
@@ -4429,11 +4462,12 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
4429 * Fields are initialized based on PCI device information and 4462 * Fields are initialized based on PCI device information and
4430 * OS network device settings (MTU size). 4463 * OS network device settings (MTU size).
4431 **/ 4464 **/
4432static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) 4465static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
4433{ 4466{
4434 struct ixgbe_hw *hw = &adapter->hw; 4467 struct ixgbe_hw *hw = &adapter->hw;
4435 struct pci_dev *pdev = adapter->pdev; 4468 struct pci_dev *pdev = adapter->pdev;
4436 unsigned int rss; 4469 unsigned int rss;
4470 u32 fwsm;
4437#ifdef CONFIG_IXGBE_DCB 4471#ifdef CONFIG_IXGBE_DCB
4438 int j; 4472 int j;
4439 struct tc_configuration *tc; 4473 struct tc_configuration *tc;
@@ -4457,7 +4491,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4457 adapter->max_q_vectors = MAX_Q_VECTORS_82598; 4491 adapter->max_q_vectors = MAX_Q_VECTORS_82598;
4458 break; 4492 break;
4459 case ixgbe_mac_X540: 4493 case ixgbe_mac_X540:
4460 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 4494 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4495 if (fwsm & IXGBE_FWSM_TS_ENABLED)
4496 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4461 case ixgbe_mac_82599EB: 4497 case ixgbe_mac_82599EB:
4462 adapter->max_q_vectors = MAX_Q_VECTORS_82599; 4498 adapter->max_q_vectors = MAX_Q_VECTORS_82599;
4463 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 4499 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
@@ -4533,7 +4569,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4533 ixgbe_pbthresh_setup(adapter); 4569 ixgbe_pbthresh_setup(adapter);
4534 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 4570 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4535 hw->fc.send_xon = true; 4571 hw->fc.send_xon = true;
4536 hw->fc.disable_fc_autoneg = false; 4572 hw->fc.disable_fc_autoneg =
4573 (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
4537 4574
4538#ifdef CONFIG_PCI_IOV 4575#ifdef CONFIG_PCI_IOV
4539 /* assign number of SR-IOV VFs */ 4576 /* assign number of SR-IOV VFs */
@@ -4828,14 +4865,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4828 return -EINVAL; 4865 return -EINVAL;
4829 4866
4830 /* 4867 /*
4831 * For 82599EB we cannot allow PF to change MTU greater than 1500 4868 * For 82599EB we cannot allow legacy VFs to enable their receive
4832 * in SR-IOV mode as it may cause buffer overruns in guest VFs that 4869 * paths when MTU greater than 1500 is configured. So display a
4833 * don't allocate and chain buffers correctly. 4870 * warning that legacy VFs will be disabled.
4834 */ 4871 */
4835 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && 4872 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
4836 (adapter->hw.mac.type == ixgbe_mac_82599EB) && 4873 (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
4837 (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) 4874 (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
4838 return -EINVAL; 4875 e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
4839 4876
4840 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 4877 e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4841 4878
@@ -4901,6 +4938,8 @@ static int ixgbe_open(struct net_device *netdev)
4901 if (err) 4938 if (err)
4902 goto err_set_queues; 4939 goto err_set_queues;
4903 4940
4941 ixgbe_ptp_init(adapter);
4942
4904 ixgbe_up_complete(adapter); 4943 ixgbe_up_complete(adapter);
4905 4944
4906 return 0; 4945 return 0;
@@ -4932,6 +4971,8 @@ static int ixgbe_close(struct net_device *netdev)
4932{ 4971{
4933 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4972 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4934 4973
4974 ixgbe_ptp_stop(adapter);
4975
4935 ixgbe_down(adapter); 4976 ixgbe_down(adapter);
4936 ixgbe_free_irq(adapter); 4977 ixgbe_free_irq(adapter);
4937 4978
@@ -5022,14 +5063,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5022 if (wufc) { 5063 if (wufc) {
5023 ixgbe_set_rx_mode(netdev); 5064 ixgbe_set_rx_mode(netdev);
5024 5065
5025 /* 5066 /* enable the optics for 82599 SFP+ fiber as we can WoL */
5026 * enable the optics for both mult-speed fiber and 5067 if (hw->mac.ops.enable_tx_laser)
5027 * 82599 SFP+ fiber as we can WoL.
5028 */
5029 if (hw->mac.ops.enable_tx_laser &&
5030 (hw->phy.multispeed_fiber ||
5031 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
5032 hw->mac.type == ixgbe_mac_82599EB)))
5033 hw->mac.ops.enable_tx_laser(hw); 5068 hw->mac.ops.enable_tx_laser(hw);
5034 5069
5035 /* turn on all-multi mode if wake on multicast is enabled */ 5070 /* turn on all-multi mode if wake on multicast is enabled */
@@ -5442,6 +5477,23 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5442 adapter->link_speed = link_speed; 5477 adapter->link_speed = link_speed;
5443} 5478}
5444 5479
5480static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
5481{
5482#ifdef CONFIG_IXGBE_DCB
5483 struct net_device *netdev = adapter->netdev;
5484 struct dcb_app app = {
5485 .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
5486 .protocol = 0,
5487 };
5488 u8 up = 0;
5489
5490 if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
5491 up = dcb_ieee_getapp_mask(netdev, &app);
5492
5493 adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
5494#endif
5495}
5496
5445/** 5497/**
5446 * ixgbe_watchdog_link_is_up - update netif_carrier status and 5498 * ixgbe_watchdog_link_is_up - update netif_carrier status and
5447 * print link up message 5499 * print link up message
@@ -5482,9 +5534,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5482 break; 5534 break;
5483 } 5535 }
5484 5536
5485#ifdef CONFIG_IXGBE_PTP 5537 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
5486 ixgbe_ptp_start_cyclecounter(adapter); 5538 ixgbe_ptp_start_cyclecounter(adapter);
5487#endif
5488 5539
5489 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 5540 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5490 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 5541 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5501,6 +5552,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
5501 netif_carrier_on(netdev); 5552 netif_carrier_on(netdev);
5502 ixgbe_check_vf_rate_limit(adapter); 5553 ixgbe_check_vf_rate_limit(adapter);
5503 5554
5555 /* update the default user priority for VFs */
5556 ixgbe_update_default_up(adapter);
5557
5504 /* ping all the active vfs to let them know link has changed */ 5558 /* ping all the active vfs to let them know link has changed */
5505 ixgbe_ping_all_vfs(adapter); 5559 ixgbe_ping_all_vfs(adapter);
5506} 5560}
@@ -5526,9 +5580,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
5526 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) 5580 if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
5527 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; 5581 adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
5528 5582
5529#ifdef CONFIG_IXGBE_PTP 5583 if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
5530 ixgbe_ptp_start_cyclecounter(adapter); 5584 ixgbe_ptp_start_cyclecounter(adapter);
5531#endif
5532 5585
5533 e_info(drv, "NIC Link is Down\n"); 5586 e_info(drv, "NIC Link is Down\n");
5534 netif_carrier_off(netdev); 5587 netif_carrier_off(netdev);
@@ -5833,9 +5886,7 @@ static void ixgbe_service_task(struct work_struct *work)
5833 ixgbe_watchdog_subtask(adapter); 5886 ixgbe_watchdog_subtask(adapter);
5834 ixgbe_fdir_reinit_subtask(adapter); 5887 ixgbe_fdir_reinit_subtask(adapter);
5835 ixgbe_check_hang_subtask(adapter); 5888 ixgbe_check_hang_subtask(adapter);
5836#ifdef CONFIG_IXGBE_PTP
5837 ixgbe_ptp_overflow_check(adapter); 5889 ixgbe_ptp_overflow_check(adapter);
5838#endif
5839 5890
5840 ixgbe_service_event_complete(adapter); 5891 ixgbe_service_event_complete(adapter);
5841} 5892}
@@ -5988,10 +6039,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
5988 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN) 6039 if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
5989 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); 6040 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
5990 6041
5991#ifdef CONFIG_IXGBE_PTP
5992 if (tx_flags & IXGBE_TX_FLAGS_TSTAMP) 6042 if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
5993 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP); 6043 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
5994#endif
5995 6044
5996 /* set segmentation enable bits for TSO/FSO */ 6045 /* set segmentation enable bits for TSO/FSO */
5997#ifdef IXGBE_FCOE 6046#ifdef IXGBE_FCOE
@@ -6393,12 +6442,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6393 6442
6394 skb_tx_timestamp(skb); 6443 skb_tx_timestamp(skb);
6395 6444
6396#ifdef CONFIG_IXGBE_PTP
6397 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 6445 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
6398 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 6446 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
6399 tx_flags |= IXGBE_TX_FLAGS_TSTAMP; 6447 tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
6400 } 6448 }
6401#endif
6402 6449
6403#ifdef CONFIG_PCI_IOV 6450#ifdef CONFIG_PCI_IOV
6404 /* 6451 /*
@@ -6485,6 +6532,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
6485 if (skb_pad(skb, 17 - skb->len)) 6532 if (skb_pad(skb, 17 - skb->len))
6486 return NETDEV_TX_OK; 6533 return NETDEV_TX_OK;
6487 skb->len = 17; 6534 skb->len = 17;
6535 skb_set_tail_pointer(skb, 17);
6488 } 6536 }
6489 6537
6490 tx_ring = adapter->tx_ring[skb->queue_mapping]; 6538 tx_ring = adapter->tx_ring[skb->queue_mapping];
@@ -6547,10 +6595,8 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
6547 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6595 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6548 6596
6549 switch (cmd) { 6597 switch (cmd) {
6550#ifdef CONFIG_IXGBE_PTP
6551 case SIOCSHWTSTAMP: 6598 case SIOCSHWTSTAMP:
6552 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd); 6599 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
6553#endif
6554 default: 6600 default:
6555 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 6601 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
6556 } 6602 }
@@ -6910,13 +6956,16 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
6910 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 6956 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
6911 return -EOPNOTSUPP; 6957 return -EOPNOTSUPP;
6912 6958
6913 if (ndm->ndm_state & NUD_PERMANENT) { 6959 /* Hardware does not support aging addresses so if a
6960 * ndm_state is given only allow permanent addresses
6961 */
6962 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
6914 pr_info("%s: FDB only supports static addresses\n", 6963 pr_info("%s: FDB only supports static addresses\n",
6915 ixgbe_driver_name); 6964 ixgbe_driver_name);
6916 return -EINVAL; 6965 return -EINVAL;
6917 } 6966 }
6918 6967
6919 if (is_unicast_ether_addr(addr)) { 6968 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
6920 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; 6969 u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
6921 6970
6922 if (netdev_uc_count(dev) < rar_uc_entries) 6971 if (netdev_uc_count(dev) < rar_uc_entries)
@@ -6974,6 +7023,61 @@ static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
6974 return idx; 7023 return idx;
6975} 7024}
6976 7025
7026static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7027 struct nlmsghdr *nlh)
7028{
7029 struct ixgbe_adapter *adapter = netdev_priv(dev);
7030 struct nlattr *attr, *br_spec;
7031 int rem;
7032
7033 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7034 return -EOPNOTSUPP;
7035
7036 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7037
7038 nla_for_each_nested(attr, br_spec, rem) {
7039 __u16 mode;
7040 u32 reg = 0;
7041
7042 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7043 continue;
7044
7045 mode = nla_get_u16(attr);
7046 if (mode == BRIDGE_MODE_VEPA) {
7047 reg = 0;
7048 adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
7049 } else if (mode == BRIDGE_MODE_VEB) {
7050 reg = IXGBE_PFDTXGSWC_VT_LBEN;
7051 adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
7052 } else
7053 return -EINVAL;
7054
7055 IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
7056
7057 e_info(drv, "enabling bridge mode: %s\n",
7058 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
7059 }
7060
7061 return 0;
7062}
7063
7064static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7065 struct net_device *dev)
7066{
7067 struct ixgbe_adapter *adapter = netdev_priv(dev);
7068 u16 mode;
7069
7070 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7071 return 0;
7072
7073 if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
7074 mode = BRIDGE_MODE_VEB;
7075 else
7076 mode = BRIDGE_MODE_VEPA;
7077
7078 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
7079}
7080
6977static const struct net_device_ops ixgbe_netdev_ops = { 7081static const struct net_device_ops ixgbe_netdev_ops = {
6978 .ndo_open = ixgbe_open, 7082 .ndo_open = ixgbe_open,
6979 .ndo_stop = ixgbe_close, 7083 .ndo_stop = ixgbe_close,
@@ -7013,6 +7117,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7013 .ndo_fdb_add = ixgbe_ndo_fdb_add, 7117 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7014 .ndo_fdb_del = ixgbe_ndo_fdb_del, 7118 .ndo_fdb_del = ixgbe_ndo_fdb_del,
7015 .ndo_fdb_dump = ixgbe_ndo_fdb_dump, 7119 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
7120 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7121 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7016}; 7122};
7017 7123
7018/** 7124/**
@@ -7042,6 +7148,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7042 break; 7148 break;
7043 case IXGBE_SUBDEV_ID_82599_SFP: 7149 case IXGBE_SUBDEV_ID_82599_SFP:
7044 case IXGBE_SUBDEV_ID_82599_RNDC: 7150 case IXGBE_SUBDEV_ID_82599_RNDC:
7151 case IXGBE_SUBDEV_ID_82599_ECNA_DP:
7045 is_wol_supported = 1; 7152 is_wol_supported = 1;
7046 break; 7153 break;
7047 } 7154 }
@@ -7079,8 +7186,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7079 * The OS initialization, configuring of the adapter private structure, 7186 * The OS initialization, configuring of the adapter private structure,
7080 * and a hardware reset occur. 7187 * and a hardware reset occur.
7081 **/ 7188 **/
7082static int __devinit ixgbe_probe(struct pci_dev *pdev, 7189static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7083 const struct pci_device_id *ent)
7084{ 7190{
7085 struct net_device *netdev; 7191 struct net_device *netdev;
7086 struct ixgbe_adapter *adapter = NULL; 7192 struct ixgbe_adapter *adapter = NULL;
@@ -7340,7 +7446,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7340 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 7446 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
7341 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 7447 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
7342 7448
7343 if (ixgbe_validate_mac_addr(netdev->perm_addr)) { 7449 if (!is_valid_ether_addr(netdev->perm_addr)) {
7344 e_dev_err("invalid MAC address\n"); 7450 e_dev_err("invalid MAC address\n");
7345 err = -EIO; 7451 err = -EIO;
7346 goto err_sw_init; 7452 goto err_sw_init;
@@ -7364,10 +7470,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7364 7470
7365 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 7471 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7366 7472
7367#ifdef CONFIG_IXGBE_PTP
7368 ixgbe_ptp_init(adapter);
7369#endif /* CONFIG_IXGBE_PTP*/
7370
7371 /* save off EEPROM version number */ 7473 /* save off EEPROM version number */
7372 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); 7474 hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
7373 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); 7475 hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
@@ -7420,11 +7522,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7420 if (err) 7522 if (err)
7421 goto err_register; 7523 goto err_register;
7422 7524
7423 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */ 7525 /* power down the optics for 82599 SFP+ fiber */
7424 if (hw->mac.ops.disable_tx_laser && 7526 if (hw->mac.ops.disable_tx_laser)
7425 ((hw->phy.multispeed_fiber) ||
7426 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7427 (hw->mac.type == ixgbe_mac_82599EB))))
7428 hw->mac.ops.disable_tx_laser(hw); 7527 hw->mac.ops.disable_tx_laser(hw);
7429 7528
7430 /* carrier off reporting is important to ethtool even BEFORE open */ 7529 /* carrier off reporting is important to ethtool even BEFORE open */
@@ -7493,7 +7592,7 @@ err_dma:
7493 * Hot-Plug event, or because the driver is going to be removed from 7592 * Hot-Plug event, or because the driver is going to be removed from
7494 * memory. 7593 * memory.
7495 **/ 7594 **/
7496static void __devexit ixgbe_remove(struct pci_dev *pdev) 7595static void ixgbe_remove(struct pci_dev *pdev)
7497{ 7596{
7498 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 7597 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7499 struct net_device *netdev = adapter->netdev; 7598 struct net_device *netdev = adapter->netdev;
@@ -7505,9 +7604,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7505 set_bit(__IXGBE_DOWN, &adapter->state); 7604 set_bit(__IXGBE_DOWN, &adapter->state);
7506 cancel_work_sync(&adapter->service_task); 7605 cancel_work_sync(&adapter->service_task);
7507 7606
7508#ifdef CONFIG_IXGBE_PTP
7509 ixgbe_ptp_stop(adapter);
7510#endif
7511 7607
7512#ifdef CONFIG_IXGBE_DCA 7608#ifdef CONFIG_IXGBE_DCA
7513 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 7609 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -7736,7 +7832,7 @@ static struct pci_driver ixgbe_driver = {
7736 .name = ixgbe_driver_name, 7832 .name = ixgbe_driver_name,
7737 .id_table = ixgbe_pci_tbl, 7833 .id_table = ixgbe_pci_tbl,
7738 .probe = ixgbe_probe, 7834 .probe = ixgbe_probe,
7739 .remove = __devexit_p(ixgbe_remove), 7835 .remove = ixgbe_remove,
7740#ifdef CONFIG_PM 7836#ifdef CONFIG_PM
7741 .suspend = ixgbe_suspend, 7837 .suspend = ixgbe_suspend,
7742 .resume = ixgbe_resume, 7838 .resume = ixgbe_resume,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 310bdd961075..42dd65e6ac97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -62,12 +62,39 @@
62/* bits 23:16 are used for exra info for certain messages */ 62/* bits 23:16 are used for exra info for certain messages */
63#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) 63#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
64 64
65/* definitions to support mailbox API version negotiation */
66
67/*
68 * Each element denotes a version of the API; existing numbers may not
69 * change; any additions must go at the end
70 */
71enum ixgbe_pfvf_api_rev {
72 ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
73 ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
74 ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
75 /* This value should always be last */
76 ixgbe_mbox_api_unknown, /* indicates that API version is not known */
77};
78
79/* mailbox API, legacy requests */
65#define IXGBE_VF_RESET 0x01 /* VF requests reset */ 80#define IXGBE_VF_RESET 0x01 /* VF requests reset */
66#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 81#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
67#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 82#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
68#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 83#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
69#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 84
70#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ 85/* mailbox API, version 1.0 VF requests */
86#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
87#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
88#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
89
90/* mailbox API, version 1.1 VF requests */
91#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
92
93/* GET_QUEUES return data indices within the mailbox */
94#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
95#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
96#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
97#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
71 98
72/* length of permanent address message returned from PF */ 99/* length of permanent address message returned from PF */
73#define IXGBE_VF_PERMADDR_MSG_LEN 4 100#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index d9291316ee9f..1a751c9d09c4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -387,6 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
387 struct ixgbe_hw *hw = &adapter->hw; 387 struct ixgbe_hw *hw = &adapter->hw;
388 struct ptp_clock_event event; 388 struct ptp_clock_event event;
389 389
390 event.type = PTP_CLOCK_PPS;
391
392 /* this check is necessary in case the interrupt was enabled via some
393 * alternative means (ex. debug_fs). Better to check here than
394 * everywhere that calls this function.
395 */
396 if (!adapter->ptp_clock)
397 return;
398
390 switch (hw->mac.type) { 399 switch (hw->mac.type) {
391 case ixgbe_mac_X540: 400 case ixgbe_mac_X540:
392 ptp_clock_event(adapter->ptp_clock, &event); 401 ptp_clock_event(adapter->ptp_clock, &event);
@@ -411,7 +420,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
411 unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies; 420 unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
412 struct timespec ts; 421 struct timespec ts;
413 422
414 if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) && 423 if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
415 (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) { 424 (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
416 ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); 425 ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
417 adapter->last_overflow_check = jiffies; 426 adapter->last_overflow_check = jiffies;
@@ -554,12 +563,14 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
554 adapter = q_vector->adapter; 563 adapter = q_vector->adapter;
555 hw = &adapter->hw; 564 hw = &adapter->hw;
556 565
566 if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
567 return;
568
557 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); 569 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
558 570
559 /* Check if we have a valid timestamp and make sure the skb should 571 /* Check if we have a valid timestamp and make sure the skb should
560 * have been timestamped */ 572 * have been timestamped */
561 if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) || 573 if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
562 !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
563 return; 574 return;
564 575
565 /* 576 /*
@@ -622,8 +633,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
622 struct hwtstamp_config config; 633 struct hwtstamp_config config;
623 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; 634 u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
624 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED; 635 u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
625 u32 tsync_rx_mtrl = 0; 636 u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
626 bool is_l4 = false;
627 bool is_l2 = false; 637 bool is_l2 = false;
628 u32 regval; 638 u32 regval;
629 639
@@ -646,16 +656,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
646 switch (config.rx_filter) { 656 switch (config.rx_filter) {
647 case HWTSTAMP_FILTER_NONE: 657 case HWTSTAMP_FILTER_NONE:
648 tsync_rx_ctl = 0; 658 tsync_rx_ctl = 0;
659 tsync_rx_mtrl = 0;
649 break; 660 break;
650 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 661 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
651 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; 662 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
652 tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG; 663 tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
653 is_l4 = true;
654 break; 664 break;
655 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 665 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
656 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; 666 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
657 tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; 667 tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
658 is_l4 = true;
659 break; 668 break;
660 case HWTSTAMP_FILTER_PTP_V2_EVENT: 669 case HWTSTAMP_FILTER_PTP_V2_EVENT:
661 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 670 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -668,7 +677,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
668 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 677 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
669 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; 678 tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
670 is_l2 = true; 679 is_l2 = true;
671 is_l4 = true;
672 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 680 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
673 break; 681 break;
674 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 682 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -693,42 +701,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
693 /* Store filter value for later use */ 701 /* Store filter value for later use */
694 adapter->rx_hwtstamp_filter = config.rx_filter; 702 adapter->rx_hwtstamp_filter = config.rx_filter;
695 703
696 /* define ethertype filter for timestamped packets */ 704 /* define ethertype filter for timestamping L2 packets */
697 if (is_l2) 705 if (is_l2)
698 IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 706 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
699 (IXGBE_ETQF_FILTER_EN | /* enable filter */ 707 (IXGBE_ETQF_FILTER_EN | /* enable filter */
700 IXGBE_ETQF_1588 | /* enable timestamping */ 708 IXGBE_ETQF_1588 | /* enable timestamping */
701 ETH_P_1588)); /* 1588 eth protocol type */ 709 ETH_P_1588)); /* 1588 eth protocol type */
702 else 710 else
703 IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0); 711 IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
704
705#define PTP_PORT 319
706 /* L4 Queue Filter[3]: filter by destination port and protocol */
707 if (is_l4) {
708 u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */
709 | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */
710 | IXGBE_FTQF_QUEUE_ENABLE);
711
712 ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */
713 & IXGBE_FTQF_DEST_PORT_MASK /* dest check */
714 & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */
715 << IXGBE_FTQF_5TUPLE_MASK_SHIFT);
716 712
717 IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3),
718 (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 |
719 IXGBE_IMIR_SIZE_BP_82599));
720
721 /* enable port check */
722 IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3),
723 (htons(PTP_PORT) |
724 htons(PTP_PORT) << 16));
725
726 IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf);
727
728 tsync_rx_mtrl |= PTP_PORT << 16;
729 } else {
730 IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0);
731 }
732 713
733 /* enable/disable TX */ 714 /* enable/disable TX */
734 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); 715 regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
@@ -759,58 +740,20 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
759 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw 740 * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
760 * @adapter: pointer to the adapter structure 741 * @adapter: pointer to the adapter structure
761 * 742 *
762 * this function initializes the timecounter and cyclecounter 743 * This function should be called to set the proper values for the TIMINCA
763 * structures for use in generated a ns counter from the arbitrary 744 * register and tell the cyclecounter structure what the tick rate of SYSTIME
764 * fixed point cycles registers in the hardware. 745 * is. It does not directly modify SYSTIME registers or the timecounter
765 * 746 * structure. It should be called whenever a new TIMINCA value is necessary,
766 * A change in link speed impacts the frequency of the DMA clock on 747 * such as during initialization or when the link speed changes.
767 * the device, which is used to generate the cycle counter
768 * registers. Therefor this function is called whenever the link speed
769 * changes.
770 *
771 * This function also turns on the SDP pin for clock out feature (X540
772 * only), because this is where the shift is first calculated.
773 */ 748 */
774void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) 749void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
775{ 750{
776 struct ixgbe_hw *hw = &adapter->hw; 751 struct ixgbe_hw *hw = &adapter->hw;
777 u32 incval = 0; 752 u32 incval = 0;
778 u32 timinca = 0;
779 u32 shift = 0; 753 u32 shift = 0;
780 u32 cycle_speed;
781 unsigned long flags; 754 unsigned long flags;
782 755
783 /** 756 /**
784 * Determine what speed we need to set the cyclecounter
785 * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
786 * unknown speeds as 10Gb. (Hence why we can't just copy the
787 * link_speed.
788 */
789 switch (adapter->link_speed) {
790 case IXGBE_LINK_SPEED_100_FULL:
791 case IXGBE_LINK_SPEED_1GB_FULL:
792 case IXGBE_LINK_SPEED_10GB_FULL:
793 cycle_speed = adapter->link_speed;
794 break;
795 default:
796 /* cycle speed should be 10Gb when there is no link */
797 cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
798 break;
799 }
800
801 /*
802 * grab the current TIMINCA value from the register so that it can be
803 * double checked. If the register value has been cleared, it must be
804 * reset to the correct value for generating a cyclecounter. If
805 * TIMINCA is zero, the SYSTIME registers do not increment at all.
806 */
807 timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
808
809 /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
810 if (adapter->cycle_speed == cycle_speed && timinca)
811 return;
812
813 /**
814 * Scale the NIC cycle counter by a large factor so that 757 * Scale the NIC cycle counter by a large factor so that
815 * relatively small corrections to the frequency can be added 758 * relatively small corrections to the frequency can be added
816 * or subtracted. The drawbacks of a large factor include 759 * or subtracted. The drawbacks of a large factor include
@@ -819,8 +762,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
819 * to nanoseconds using only a multiplier and a right-shift, 762 * to nanoseconds using only a multiplier and a right-shift,
820 * and (c) the value must fit within the timinca register space 763 * and (c) the value must fit within the timinca register space
821 * => math based on internal DMA clock rate and available bits 764 * => math based on internal DMA clock rate and available bits
765 *
766 * Note that when there is no link, internal DMA clock is same as when
767 * link speed is 10Gb. Set the registers correctly even when link is
768 * down to preserve the clock setting
822 */ 769 */
823 switch (cycle_speed) { 770 switch (adapter->link_speed) {
824 case IXGBE_LINK_SPEED_100_FULL: 771 case IXGBE_LINK_SPEED_100_FULL:
825 incval = IXGBE_INCVAL_100; 772 incval = IXGBE_INCVAL_100;
826 shift = IXGBE_INCVAL_SHIFT_100; 773 shift = IXGBE_INCVAL_SHIFT_100;
@@ -830,6 +777,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
830 shift = IXGBE_INCVAL_SHIFT_1GB; 777 shift = IXGBE_INCVAL_SHIFT_1GB;
831 break; 778 break;
832 case IXGBE_LINK_SPEED_10GB_FULL: 779 case IXGBE_LINK_SPEED_10GB_FULL:
780 default:
833 incval = IXGBE_INCVAL_10GB; 781 incval = IXGBE_INCVAL_10GB;
834 shift = IXGBE_INCVAL_SHIFT_10GB; 782 shift = IXGBE_INCVAL_SHIFT_10GB;
835 break; 783 break;
@@ -857,18 +805,11 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
857 return; 805 return;
858 } 806 }
859 807
860 /* reset the system time registers */ 808 /* update the base incval used to calculate frequency adjustment */
861 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
862 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
863 IXGBE_WRITE_FLUSH(hw);
864
865 /* store the new cycle speed */
866 adapter->cycle_speed = cycle_speed;
867
868 ACCESS_ONCE(adapter->base_incval) = incval; 809 ACCESS_ONCE(adapter->base_incval) = incval;
869 smp_mb(); 810 smp_mb();
870 811
871 /* grab the ptp lock */ 812 /* need lock to prevent incorrect read while modifying cyclecounter */
872 spin_lock_irqsave(&adapter->tmreg_lock, flags); 813 spin_lock_irqsave(&adapter->tmreg_lock, flags);
873 814
874 memset(&adapter->cc, 0, sizeof(adapter->cc)); 815 memset(&adapter->cc, 0, sizeof(adapter->cc));
@@ -877,6 +818,31 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
877 adapter->cc.shift = shift; 818 adapter->cc.shift = shift;
878 adapter->cc.mult = 1; 819 adapter->cc.mult = 1;
879 820
821 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
822}
823
824/**
825 * ixgbe_ptp_reset
826 * @adapter: the ixgbe private board structure
827 *
828 * When the MAC resets, all timesync features are reset. This function should be
829 * called to re-enable the PTP clock structure. It will re-init the timecounter
830 * structure based on the kernel time as well as setup the cycle counter data.
831 */
832void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
833{
834 struct ixgbe_hw *hw = &adapter->hw;
835 unsigned long flags;
836
837 /* set SYSTIME registers to 0 just in case */
838 IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
839 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
840 IXGBE_WRITE_FLUSH(hw);
841
842 ixgbe_ptp_start_cyclecounter(adapter);
843
844 spin_lock_irqsave(&adapter->tmreg_lock, flags);
845
880 /* reset the ns time counter */ 846 /* reset the ns time counter */
881 timecounter_init(&adapter->tc, &adapter->cc, 847 timecounter_init(&adapter->tc, &adapter->cc,
882 ktime_to_ns(ktime_get_real())); 848 ktime_to_ns(ktime_get_real()));
@@ -904,7 +870,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
904 870
905 switch (adapter->hw.mac.type) { 871 switch (adapter->hw.mac.type) {
906 case ixgbe_mac_X540: 872 case ixgbe_mac_X540:
907 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 873 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
908 adapter->ptp_caps.owner = THIS_MODULE; 874 adapter->ptp_caps.owner = THIS_MODULE;
909 adapter->ptp_caps.max_adj = 250000000; 875 adapter->ptp_caps.max_adj = 250000000;
910 adapter->ptp_caps.n_alarm = 0; 876 adapter->ptp_caps.n_alarm = 0;
@@ -918,7 +884,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
918 adapter->ptp_caps.enable = ixgbe_ptp_enable; 884 adapter->ptp_caps.enable = ixgbe_ptp_enable;
919 break; 885 break;
920 case ixgbe_mac_82599EB: 886 case ixgbe_mac_82599EB:
921 snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); 887 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
922 adapter->ptp_caps.owner = THIS_MODULE; 888 adapter->ptp_caps.owner = THIS_MODULE;
923 adapter->ptp_caps.max_adj = 250000000; 889 adapter->ptp_caps.max_adj = 250000000;
924 adapter->ptp_caps.n_alarm = 0; 890 adapter->ptp_caps.n_alarm = 0;
@@ -942,11 +908,6 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
942 908
943 spin_lock_init(&adapter->tmreg_lock); 909 spin_lock_init(&adapter->tmreg_lock);
944 910
945 ixgbe_ptp_start_cyclecounter(adapter);
946
947 /* (Re)start the overflow check */
948 adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
949
950 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, 911 adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
951 &adapter->pdev->dev); 912 &adapter->pdev->dev);
952 if (IS_ERR(adapter->ptp_clock)) { 913 if (IS_ERR(adapter->ptp_clock)) {
@@ -955,6 +916,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
955 } else 916 } else
956 e_dev_info("registered PHC device on %s\n", netdev->name); 917 e_dev_info("registered PHC device on %s\n", netdev->name);
957 918
919 ixgbe_ptp_reset(adapter);
920
921 /* set the flag that PTP has been enabled */
922 adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
923
958 return; 924 return;
959} 925}
960 926
@@ -967,7 +933,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
967void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) 933void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
968{ 934{
969 /* stop the overflow check task */ 935 /* stop the overflow check task */
970 adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED | 936 adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
971 IXGBE_FLAG2_PTP_PPS_ENABLED); 937 IXGBE_FLAG2_PTP_PPS_ENABLED);
972 938
973 ixgbe_ptp_setup_sdp(adapter); 939 ixgbe_ptp_setup_sdp(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dce48bf64d96..85cddac673ef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -117,6 +117,10 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
117 } 117 }
118 } 118 }
119 119
120 /* Initialize default switching mode VEB */
121 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
122 adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
123
120 /* If call to enable VFs succeeded then allocate memory 124 /* If call to enable VFs succeeded then allocate memory
121 * for per VF control structures. 125 * for per VF control structures.
122 */ 126 */
@@ -150,16 +154,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
150 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 154 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
151 IXGBE_FLAG2_RSC_ENABLED); 155 IXGBE_FLAG2_RSC_ENABLED);
152 156
153#ifdef IXGBE_FCOE
154 /*
155 * When SR-IOV is enabled 82599 cannot support jumbo frames
156 * so we must disable FCoE because we cannot support FCoE MTU.
157 */
158 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
159 adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
160 IXGBE_FLAG_FCOE_CAPABLE);
161#endif
162
163 /* enable spoof checking for all VFs */ 157 /* enable spoof checking for all VFs */
164 for (i = 0; i < adapter->num_vfs; i++) 158 for (i = 0; i < adapter->num_vfs; i++)
165 adapter->vfinfo[i].spoofchk_enabled = true; 159 adapter->vfinfo[i].spoofchk_enabled = true;
@@ -265,8 +259,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
265} 259}
266 260
267static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, 261static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
268 int entries, u16 *hash_list, u32 vf) 262 u32 *msgbuf, u32 vf)
269{ 263{
264 int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
265 >> IXGBE_VT_MSGINFO_SHIFT;
266 u16 *hash_list = (u16 *)&msgbuf[1];
270 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 267 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
271 struct ixgbe_hw *hw = &adapter->hw; 268 struct ixgbe_hw *hw = &adapter->hw;
272 int i; 269 int i;
@@ -353,31 +350,89 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
353 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 350 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
354} 351}
355 352
356static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) 353static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
357{ 354{
358 struct ixgbe_hw *hw = &adapter->hw; 355 struct ixgbe_hw *hw = &adapter->hw;
359 int new_mtu = msgbuf[1]; 356 int max_frame = msgbuf[1];
360 u32 max_frs; 357 u32 max_frs;
361 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
362 358
363 /* Only X540 supports jumbo frames in IOV mode */ 359 /*
364 if (adapter->hw.mac.type != ixgbe_mac_X540) 360 * For 82599EB we have to keep all PFs and VFs operating with
365 return; 361 * the same max_frame value in order to avoid sending an oversize
362 * frame to a VF. In order to guarantee this is handled correctly
363 * for all cases we have several special exceptions to take into
364 * account before we can enable the VF for receive
365 */
366 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
367 struct net_device *dev = adapter->netdev;
368 int pf_max_frame = dev->mtu + ETH_HLEN;
369 u32 reg_offset, vf_shift, vfre;
370 s32 err = 0;
371
372#ifdef CONFIG_FCOE
373 if (dev->features & NETIF_F_FCOE_MTU)
374 pf_max_frame = max_t(int, pf_max_frame,
375 IXGBE_FCOE_JUMBO_FRAME_SIZE);
376
377#endif /* CONFIG_FCOE */
378 switch (adapter->vfinfo[vf].vf_api) {
379 case ixgbe_mbox_api_11:
380 /*
381 * Version 1.1 supports jumbo frames on VFs if PF has
382 * jumbo frames enabled which means legacy VFs are
383 * disabled
384 */
385 if (pf_max_frame > ETH_FRAME_LEN)
386 break;
387 default:
388 /*
389 * If the PF or VF are running w/ jumbo frames enabled
390 * we need to shut down the VF Rx path as we cannot
391 * support jumbo frames on legacy VFs
392 */
393 if ((pf_max_frame > ETH_FRAME_LEN) ||
394 (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
395 err = -EINVAL;
396 break;
397 }
398
399 /* determine VF receive enable location */
400 vf_shift = vf % 32;
401 reg_offset = vf / 32;
402
403 /* enable or disable receive depending on error */
404 vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
405 if (err)
406 vfre &= ~(1 << vf_shift);
407 else
408 vfre |= 1 << vf_shift;
409 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
410
411 if (err) {
412 e_err(drv, "VF max_frame %d out of range\n", max_frame);
413 return err;
414 }
415 }
366 416
367 /* MTU < 68 is an error and causes problems on some kernels */ 417 /* MTU < 68 is an error and causes problems on some kernels */
368 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { 418 if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
369 e_err(drv, "VF mtu %d out of range\n", new_mtu); 419 e_err(drv, "VF max_frame %d out of range\n", max_frame);
370 return; 420 return -EINVAL;
371 } 421 }
372 422
373 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & 423 /* pull current max frame size from hardware */
374 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; 424 max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
375 if (max_frs < new_mtu) { 425 max_frs &= IXGBE_MHADD_MFS_MASK;
376 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; 426 max_frs >>= IXGBE_MHADD_MFS_SHIFT;
427
428 if (max_frs < max_frame) {
429 max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
377 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); 430 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
378 } 431 }
379 432
380 e_info(hw, "VF requests change max MTU to %d\n", new_mtu); 433 e_info(hw, "VF requests change max MTU to %d\n", max_frame);
434
435 return 0;
381} 436}
382 437
383static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) 438static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
@@ -392,35 +447,47 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
392 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 447 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
393} 448}
394 449
395static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf) 450static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
451 u16 vid, u16 qos, u32 vf)
396{ 452{
397 struct ixgbe_hw *hw = &adapter->hw; 453 struct ixgbe_hw *hw = &adapter->hw;
454 u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
398 455
399 if (vid) 456 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
400 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
401 (vid | IXGBE_VMVIR_VLANA_DEFAULT));
402 else
403 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
404} 457}
405 458
459static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
460{
461 struct ixgbe_hw *hw = &adapter->hw;
462
463 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
464}
406static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 465static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
407{ 466{
408 struct ixgbe_hw *hw = &adapter->hw; 467 struct ixgbe_hw *hw = &adapter->hw;
468 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
409 int rar_entry = hw->mac.num_rar_entries - (vf + 1); 469 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
470 u8 num_tcs = netdev_get_num_tc(adapter->netdev);
471
472 /* add PF assigned VLAN or VLAN 0 */
473 ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
410 474
411 /* reset offloads to defaults */ 475 /* reset offloads to defaults */
412 if (adapter->vfinfo[vf].pf_vlan) { 476 ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
413 ixgbe_set_vf_vlan(adapter, true, 477
414 adapter->vfinfo[vf].pf_vlan, vf); 478 /* set outgoing tags for VFs */
415 ixgbe_set_vmvir(adapter, 479 if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
416 (adapter->vfinfo[vf].pf_vlan | 480 ixgbe_clear_vmvir(adapter, vf);
417 (adapter->vfinfo[vf].pf_qos <<
418 VLAN_PRIO_SHIFT)), vf);
419 ixgbe_set_vmolr(hw, vf, false);
420 } else { 481 } else {
421 ixgbe_set_vf_vlan(adapter, true, 0, vf); 482 if (vfinfo->pf_qos || !num_tcs)
422 ixgbe_set_vmvir(adapter, 0, vf); 483 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
423 ixgbe_set_vmolr(hw, vf, true); 484 vfinfo->pf_qos, vf);
485 else
486 ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
487 adapter->default_up, vf);
488
489 if (vfinfo->spoofchk_enabled)
490 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
424 } 491 }
425 492
426 /* reset multicast table array for vf */ 493 /* reset multicast table array for vf */
@@ -430,6 +497,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
430 ixgbe_set_rx_mode(adapter->netdev); 497 ixgbe_set_rx_mode(adapter->netdev);
431 498
432 hw->mac.ops.clear_rar(hw, rar_entry); 499 hw->mac.ops.clear_rar(hw, rar_entry);
500
501 /* reset VF api back to unknown */
502 adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
433} 503}
434 504
435static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, 505static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
@@ -521,30 +591,221 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
521 return 0; 591 return 0;
522} 592}
523 593
524static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) 594static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
525{ 595{
526 struct ixgbe_hw *hw = &adapter->hw; 596 struct ixgbe_hw *hw = &adapter->hw;
527 u32 reg; 597 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
598 u32 reg, msgbuf[4];
528 u32 reg_offset, vf_shift; 599 u32 reg_offset, vf_shift;
600 u8 *addr = (u8 *)(&msgbuf[1]);
601
602 e_info(probe, "VF Reset msg received from vf %d\n", vf);
603
604 /* reset the filters for the device */
605 ixgbe_vf_reset_event(adapter, vf);
606
607 /* set vf mac address */
608 ixgbe_set_vf_mac(adapter, vf, vf_mac);
529 609
530 vf_shift = vf % 32; 610 vf_shift = vf % 32;
531 reg_offset = vf / 32; 611 reg_offset = vf / 32;
532 612
533 /* enable transmit and receive for vf */ 613 /* enable transmit for vf */
534 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); 614 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
535 reg |= (reg | (1 << vf_shift)); 615 reg |= 1 << vf_shift;
536 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); 616 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
537 617
618 /* enable receive for vf */
538 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); 619 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
539 reg |= (reg | (1 << vf_shift)); 620 reg |= 1 << vf_shift;
621 /*
622 * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
623 * For more info take a look at ixgbe_set_vf_lpe
624 */
625 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
626 struct net_device *dev = adapter->netdev;
627 int pf_max_frame = dev->mtu + ETH_HLEN;
628
629#ifdef CONFIG_FCOE
630 if (dev->features & NETIF_F_FCOE_MTU)
631 pf_max_frame = max_t(int, pf_max_frame,
632 IXGBE_FCOE_JUMBO_FRAME_SIZE);
633
634#endif /* CONFIG_FCOE */
635 if (pf_max_frame > ETH_FRAME_LEN)
636 reg &= ~(1 << vf_shift);
637 }
540 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); 638 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
541 639
640 /* enable VF mailbox for further messages */
641 adapter->vfinfo[vf].clear_to_send = true;
642
542 /* Enable counting of spoofed packets in the SSVPC register */ 643 /* Enable counting of spoofed packets in the SSVPC register */
543 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); 644 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
544 reg |= (1 << vf_shift); 645 reg |= (1 << vf_shift);
545 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); 646 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
546 647
547 ixgbe_vf_reset_event(adapter, vf); 648 /* reply to reset with ack and vf mac address */
649 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
650 memcpy(addr, vf_mac, ETH_ALEN);
651
652 /*
653 * Piggyback the multicast filter type so VF can compute the
654 * correct vectors
655 */
656 msgbuf[3] = hw->mac.mc_filter_type;
657 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
658
659 return 0;
660}
661
662static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
663 u32 *msgbuf, u32 vf)
664{
665 u8 *new_mac = ((u8 *)(&msgbuf[1]));
666
667 if (!is_valid_ether_addr(new_mac)) {
668 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
669 return -1;
670 }
671
672 if (adapter->vfinfo[vf].pf_set_mac &&
673 memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
674 ETH_ALEN)) {
675 e_warn(drv,
676 "VF %d attempted to override administratively set MAC address\n"
677 "Reload the VF driver to resume operations\n",
678 vf);
679 return -1;
680 }
681
682 return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
683}
684
685static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
686 u32 *msgbuf, u32 vf)
687{
688 struct ixgbe_hw *hw = &adapter->hw;
689 int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
690 int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
691 int err;
692 u8 tcs = netdev_get_num_tc(adapter->netdev);
693
694 if (adapter->vfinfo[vf].pf_vlan || tcs) {
695 e_warn(drv,
696 "VF %d attempted to override administratively set VLAN configuration\n"
697 "Reload the VF driver to resume operations\n",
698 vf);
699 return -1;
700 }
701
702 if (add)
703 adapter->vfinfo[vf].vlan_count++;
704 else if (adapter->vfinfo[vf].vlan_count)
705 adapter->vfinfo[vf].vlan_count--;
706
707 err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
708 if (!err && adapter->vfinfo[vf].spoofchk_enabled)
709 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
710
711 return err;
712}
713
714static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
715 u32 *msgbuf, u32 vf)
716{
717 u8 *new_mac = ((u8 *)(&msgbuf[1]));
718 int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
719 IXGBE_VT_MSGINFO_SHIFT;
720 int err;
721
722 if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
723 e_warn(drv,
724 "VF %d requested MACVLAN filter but is administratively denied\n",
725 vf);
726 return -1;
727 }
728
729 /* An non-zero index indicates the VF is setting a filter */
730 if (index) {
731 if (!is_valid_ether_addr(new_mac)) {
732 e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
733 return -1;
734 }
735
736 /*
737 * If the VF is allowed to set MAC filters then turn off
738 * anti-spoofing to avoid false positives.
739 */
740 if (adapter->vfinfo[vf].spoofchk_enabled)
741 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
742 }
743
744 err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
745 if (err == -ENOSPC)
746 e_warn(drv,
747 "VF %d has requested a MACVLAN filter but there is no space for it\n",
748 vf);
749
750 return err < 0;
751}
752
753static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
754 u32 *msgbuf, u32 vf)
755{
756 int api = msgbuf[1];
757
758 switch (api) {
759 case ixgbe_mbox_api_10:
760 case ixgbe_mbox_api_11:
761 adapter->vfinfo[vf].vf_api = api;
762 return 0;
763 default:
764 break;
765 }
766
767 e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
768
769 return -1;
770}
771
772static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
773 u32 *msgbuf, u32 vf)
774{
775 struct net_device *dev = adapter->netdev;
776 struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
777 unsigned int default_tc = 0;
778 u8 num_tcs = netdev_get_num_tc(dev);
779
780 /* verify the PF is supporting the correct APIs */
781 switch (adapter->vfinfo[vf].vf_api) {
782 case ixgbe_mbox_api_20:
783 case ixgbe_mbox_api_11:
784 break;
785 default:
786 return -1;
787 }
788
789 /* only allow 1 Tx queue for bandwidth limiting */
790 msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
791 msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
792
793 /* if TCs > 1 determine which TC belongs to default user priority */
794 if (num_tcs > 1)
795 default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
796
797 /* notify VF of need for VLAN tag stripping, and correct queue */
798 if (num_tcs)
799 msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
800 else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
801 msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
802 else
803 msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
804
805 /* notify VF of default queue */
806 msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
807
808 return 0;
548} 809}
549 810
550static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) 811static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
@@ -553,10 +814,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
553 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 814 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
554 struct ixgbe_hw *hw = &adapter->hw; 815 struct ixgbe_hw *hw = &adapter->hw;
555 s32 retval; 816 s32 retval;
556 int entries;
557 u16 *hash_list;
558 int add, vid, index;
559 u8 *new_mac;
560 817
561 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 818 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
562 819
@@ -572,39 +829,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
572 /* flush the ack before we write any messages back */ 829 /* flush the ack before we write any messages back */
573 IXGBE_WRITE_FLUSH(hw); 830 IXGBE_WRITE_FLUSH(hw);
574 831
832 if (msgbuf[0] == IXGBE_VF_RESET)
833 return ixgbe_vf_reset_msg(adapter, vf);
834
575 /* 835 /*
576 * until the vf completes a virtual function reset it should not be 836 * until the vf completes a virtual function reset it should not be
577 * allowed to start any configuration. 837 * allowed to start any configuration.
578 */ 838 */
579
580 if (msgbuf[0] == IXGBE_VF_RESET) {
581 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
582 new_mac = (u8 *)(&msgbuf[1]);
583 e_info(probe, "VF Reset msg received from vf %d\n", vf);
584 adapter->vfinfo[vf].clear_to_send = false;
585 ixgbe_vf_reset_msg(adapter, vf);
586 adapter->vfinfo[vf].clear_to_send = true;
587
588 if (is_valid_ether_addr(new_mac) &&
589 !adapter->vfinfo[vf].pf_set_mac)
590 ixgbe_set_vf_mac(adapter, vf, vf_mac);
591 else
592 ixgbe_set_vf_mac(adapter,
593 vf, adapter->vfinfo[vf].vf_mac_addresses);
594
595 /* reply to reset with ack and vf mac address */
596 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
597 memcpy(new_mac, vf_mac, ETH_ALEN);
598 /*
599 * Piggyback the multicast filter type so VF can compute the
600 * correct vectors
601 */
602 msgbuf[3] = hw->mac.mc_filter_type;
603 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
604
605 return retval;
606 }
607
608 if (!adapter->vfinfo[vf].clear_to_send) { 839 if (!adapter->vfinfo[vf].clear_to_send) {
609 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; 840 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
610 ixgbe_write_mbx(hw, msgbuf, 1, vf); 841 ixgbe_write_mbx(hw, msgbuf, 1, vf);
@@ -613,70 +844,25 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
613 844
614 switch ((msgbuf[0] & 0xFFFF)) { 845 switch ((msgbuf[0] & 0xFFFF)) {
615 case IXGBE_VF_SET_MAC_ADDR: 846 case IXGBE_VF_SET_MAC_ADDR:
616 new_mac = ((u8 *)(&msgbuf[1])); 847 retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
617 if (is_valid_ether_addr(new_mac) &&
618 !adapter->vfinfo[vf].pf_set_mac) {
619 ixgbe_set_vf_mac(adapter, vf, new_mac);
620 } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
621 new_mac, ETH_ALEN)) {
622 e_warn(drv, "VF %d attempted to override "
623 "administratively set MAC address\nReload "
624 "the VF driver to resume operations\n", vf);
625 retval = -1;
626 }
627 break; 848 break;
628 case IXGBE_VF_SET_MULTICAST: 849 case IXGBE_VF_SET_MULTICAST:
629 entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 850 retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
630 >> IXGBE_VT_MSGINFO_SHIFT;
631 hash_list = (u16 *)&msgbuf[1];
632 retval = ixgbe_set_vf_multicasts(adapter, entries,
633 hash_list, vf);
634 break;
635 case IXGBE_VF_SET_LPE:
636 ixgbe_set_vf_lpe(adapter, msgbuf);
637 break; 851 break;
638 case IXGBE_VF_SET_VLAN: 852 case IXGBE_VF_SET_VLAN:
639 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 853 retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
640 >> IXGBE_VT_MSGINFO_SHIFT; 854 break;
641 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 855 case IXGBE_VF_SET_LPE:
642 if (adapter->vfinfo[vf].pf_vlan) { 856 retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
643 e_warn(drv, "VF %d attempted to override "
644 "administratively set VLAN configuration\n"
645 "Reload the VF driver to resume operations\n",
646 vf);
647 retval = -1;
648 } else {
649 if (add)
650 adapter->vfinfo[vf].vlan_count++;
651 else if (adapter->vfinfo[vf].vlan_count)
652 adapter->vfinfo[vf].vlan_count--;
653 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
654 if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
655 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
656 }
657 break; 857 break;
658 case IXGBE_VF_SET_MACVLAN: 858 case IXGBE_VF_SET_MACVLAN:
659 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> 859 retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
660 IXGBE_VT_MSGINFO_SHIFT; 860 break;
661 if (adapter->vfinfo[vf].pf_set_mac && index > 0) { 861 case IXGBE_VF_API_NEGOTIATE:
662 e_warn(drv, "VF %d requested MACVLAN filter but is " 862 retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
663 "administratively denied\n", vf); 863 break;
664 retval = -1; 864 case IXGBE_VF_GET_QUEUES:
665 break; 865 retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
666 }
667 /*
668 * If the VF is allowed to set MAC filters then turn off
669 * anti-spoofing to avoid false positives. An index
670 * greater than 0 will indicate the VF is setting a
671 * macvlan MAC filter.
672 */
673 if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
674 ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
675 retval = ixgbe_set_vf_macvlan(adapter, vf, index,
676 (unsigned char *)(&msgbuf[1]));
677 if (retval == -ENOSPC)
678 e_warn(drv, "VF %d has requested a MACVLAN filter "
679 "but there is no space for it\n", vf);
680 break; 866 break;
681 default: 867 default:
682 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 868 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -692,7 +878,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
692 878
693 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; 879 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
694 880
695 ixgbe_write_mbx(hw, msgbuf, 1, vf); 881 ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
696 882
697 return retval; 883 return retval;
698} 884}
@@ -783,7 +969,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
783 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); 969 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
784 if (err) 970 if (err)
785 goto out; 971 goto out;
786 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); 972 ixgbe_set_vmvir(adapter, vlan, qos, vf);
787 ixgbe_set_vmolr(hw, vf, false); 973 ixgbe_set_vmolr(hw, vf, false);
788 if (adapter->vfinfo[vf].spoofchk_enabled) 974 if (adapter->vfinfo[vf].spoofchk_enabled)
789 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); 975 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
@@ -803,7 +989,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
803 } else { 989 } else {
804 err = ixgbe_set_vf_vlan(adapter, false, 990 err = ixgbe_set_vf_vlan(adapter, false,
805 adapter->vfinfo[vf].pf_vlan, vf); 991 adapter->vfinfo[vf].pf_vlan, vf);
806 ixgbe_set_vmvir(adapter, vlan, vf); 992 ixgbe_clear_vmvir(adapter, vf);
807 ixgbe_set_vmolr(hw, vf, true); 993 ixgbe_set_vmolr(hw, vf, true);
808 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); 994 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
809 if (adapter->vfinfo[vf].vlan_count) 995 if (adapter->vfinfo[vf].vlan_count)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0722f3368092..9cd8a13711d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -56,6 +56,7 @@
56#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 56#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
59#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
59#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
60#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D 61#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
61#define IXGBE_DEV_ID_82599EN_SFP 0x1557 62#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -1833,15 +1834,6 @@ enum {
1833/* Number of 100 microseconds we wait for PCI Express master disable */ 1834/* Number of 100 microseconds we wait for PCI Express master disable */
1834#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 1835#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
1835 1836
1836/* Check whether address is multicast. This is little-endian specific check.*/
1837#define IXGBE_IS_MULTICAST(Address) \
1838 (bool)(((u8 *)(Address))[0] & ((u8)0x01))
1839
1840/* Check whether an address is broadcast. */
1841#define IXGBE_IS_BROADCAST(Address) \
1842 ((((u8 *)(Address))[0] == ((u8)0xff)) && \
1843 (((u8 *)(Address))[1] == ((u8)0xff)))
1844
1845/* RAH */ 1837/* RAH */
1846#define IXGBE_RAH_VIND_MASK 0x003C0000 1838#define IXGBE_RAH_VIND_MASK 0x003C0000
1847#define IXGBE_RAH_VIND_SHIFT 18 1839#define IXGBE_RAH_VIND_SHIFT 18
@@ -1962,6 +1954,8 @@ enum {
1962#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 1954#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
1963#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 1955#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
1964 1956
1957#define IXGBE_FWSM_TS_ENABLED 0x1
1958
1965/* Queue Drop Enable */ 1959/* Queue Drop Enable */
1966#define IXGBE_QDE_ENABLE 0x00000001 1960#define IXGBE_QDE_ENABLE 0x00000001
1967#define IXGBE_QDE_IDX_MASK 0x00007F00 1961#define IXGBE_QDE_IDX_MASK 0x00007F00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index de4da5219b71..c73b92993391 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -152,7 +152,7 @@ mac_reset_top:
152 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 152 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
153 153
154 /* Add the SAN MAC address to the RAR only if it's a valid address */ 154 /* Add the SAN MAC address to the RAR only if it's a valid address */
155 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 155 if (is_valid_ether_addr(hw->mac.san_addr)) {
156 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 156 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
157 hw->mac.san_addr, 0, IXGBE_RAH_AV); 157 hw->mac.san_addr, 0, IXGBE_RAH_AV);
158 158
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index da17ccf5c09d..3147795bd135 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,8 +33,11 @@
33#define IXGBE_DEV_ID_X540_VF 0x1515 33#define IXGBE_DEV_ID_X540_VF 0x1515
34 34
35#define IXGBE_VF_IRQ_CLEAR_MASK 7 35#define IXGBE_VF_IRQ_CLEAR_MASK 7
36#define IXGBE_VF_MAX_TX_QUEUES 1 36#define IXGBE_VF_MAX_TX_QUEUES 8
37#define IXGBE_VF_MAX_RX_QUEUES 1 37#define IXGBE_VF_MAX_RX_QUEUES 8
38
39/* DCB define */
40#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
38 41
39/* Link speed */ 42/* Link speed */
40typedef u32 ixgbe_link_speed; 43typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 4a9c9c285685..fc0af9a3bb35 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,7 +58,6 @@ struct ixgbevf_ring {
58 struct ixgbevf_ring *next; 58 struct ixgbevf_ring *next;
59 struct net_device *netdev; 59 struct net_device *netdev;
60 struct device *dev; 60 struct device *dev;
61 struct ixgbevf_adapter *adapter; /* backlink */
62 void *desc; /* descriptor ring memory */ 61 void *desc; /* descriptor ring memory */
63 dma_addr_t dma; /* phys. address of descriptor ring */ 62 dma_addr_t dma; /* phys. address of descriptor ring */
64 unsigned int size; /* length in bytes */ 63 unsigned int size; /* length in bytes */
@@ -75,6 +74,8 @@ struct ixgbevf_ring {
75 u64 total_bytes; 74 u64 total_bytes;
76 u64 total_packets; 75 u64 total_packets;
77 struct u64_stats_sync syncp; 76 struct u64_stats_sync syncp;
77 u64 hw_csum_rx_error;
78 u64 hw_csum_rx_good;
78 79
79 u16 head; 80 u16 head;
80 u16 tail; 81 u16 tail;
@@ -89,8 +90,8 @@ struct ixgbevf_ring {
89/* How many Rx Buffers do we bundle into one write to the hardware ? */ 90/* How many Rx Buffers do we bundle into one write to the hardware ? */
90#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ 91#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
91 92
92#define MAX_RX_QUEUES 1 93#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
93#define MAX_TX_QUEUES 1 94#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
94 95
95#define IXGBEVF_DEFAULT_TXD 1024 96#define IXGBEVF_DEFAULT_TXD 1024
96#define IXGBEVF_DEFAULT_RXD 512 97#define IXGBEVF_DEFAULT_RXD 512
@@ -101,10 +102,10 @@ struct ixgbevf_ring {
101 102
102/* Supported Rx Buffer Sizes */ 103/* Supported Rx Buffer Sizes */
103#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ 104#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
104#define IXGBEVF_RXBUFFER_3K 3072 105#define IXGBEVF_RXBUFFER_2K 2048
105#define IXGBEVF_RXBUFFER_7K 7168 106#define IXGBEVF_RXBUFFER_4K 4096
106#define IXGBEVF_RXBUFFER_15K 15360 107#define IXGBEVF_RXBUFFER_8K 8192
107#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ 108#define IXGBEVF_RXBUFFER_10K 10240
108 109
109#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 110#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
110 111
@@ -229,6 +230,7 @@ struct ixgbevf_adapter {
229 */ 230 */
230 u32 flags; 231 u32 flags;
231#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) 232#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
233#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
232 234
233 /* OS defined structs */ 235 /* OS defined structs */
234 struct net_device *netdev; 236 struct net_device *netdev;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de1ad506665d..257357ae66c3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
58static const char ixgbevf_driver_string[] = 58static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60 60
61#define DRV_VERSION "2.6.0-k" 61#define DRV_VERSION "2.7.12-k"
62const char ixgbevf_driver_version[] = DRV_VERSION; 62const char ixgbevf_driver_version[] = DRV_VERSION;
63static char ixgbevf_copyright[] = 63static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation."; 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99 99
100/* forward decls */ 100/* forward decls */
101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
102static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
102 103
103static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 104static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
104 struct ixgbevf_ring *rx_ring, 105 struct ixgbevf_ring *rx_ring,
@@ -120,7 +121,6 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to 122 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue 123 * @msix_vector: the vector to map to the corresponding queue
123 *
124 */ 124 */
125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126 u8 queue, u8 msix_vector) 126 u8 queue, u8 msix_vector)
@@ -287,17 +287,19 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) 287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
288 __vlan_hwaccel_put_tag(skb, tag); 288 __vlan_hwaccel_put_tag(skb, tag);
289 289
290 napi_gro_receive(&q_vector->napi, skb); 290 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
291 napi_gro_receive(&q_vector->napi, skb);
292 else
293 netif_rx(skb);
291} 294}
292 295
293/** 296/**
294 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 297 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
295 * @adapter: address of board private structure 298 * @ring: pointer to Rx descriptor ring structure
296 * @status_err: hardware indication of status of receive 299 * @status_err: hardware indication of status of receive
297 * @skb: skb currently being received and modified 300 * @skb: skb currently being received and modified
298 **/ 301 **/
299static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 302static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
300 struct ixgbevf_ring *ring,
301 u32 status_err, struct sk_buff *skb) 303 u32 status_err, struct sk_buff *skb)
302{ 304{
303 skb_checksum_none_assert(skb); 305 skb_checksum_none_assert(skb);
@@ -309,7 +311,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
309 /* if IP and error */ 311 /* if IP and error */
310 if ((status_err & IXGBE_RXD_STAT_IPCS) && 312 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
311 (status_err & IXGBE_RXDADV_ERR_IPE)) { 313 (status_err & IXGBE_RXDADV_ERR_IPE)) {
312 adapter->hw_csum_rx_error++; 314 ring->hw_csum_rx_error++;
313 return; 315 return;
314 } 316 }
315 317
@@ -317,13 +319,13 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
317 return; 319 return;
318 320
319 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 321 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
320 adapter->hw_csum_rx_error++; 322 ring->hw_csum_rx_error++;
321 return; 323 return;
322 } 324 }
323 325
324 /* It must be a TCP or UDP packet with a valid checksum */ 326 /* It must be a TCP or UDP packet with a valid checksum */
325 skb->ip_summed = CHECKSUM_UNNECESSARY; 327 skb->ip_summed = CHECKSUM_UNNECESSARY;
326 adapter->hw_csum_rx_good++; 328 ring->hw_csum_rx_good++;
327} 329}
328 330
329/** 331/**
@@ -337,15 +339,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
337 struct pci_dev *pdev = adapter->pdev; 339 struct pci_dev *pdev = adapter->pdev;
338 union ixgbe_adv_rx_desc *rx_desc; 340 union ixgbe_adv_rx_desc *rx_desc;
339 struct ixgbevf_rx_buffer *bi; 341 struct ixgbevf_rx_buffer *bi;
340 struct sk_buff *skb;
341 unsigned int i = rx_ring->next_to_use; 342 unsigned int i = rx_ring->next_to_use;
342 343
343 bi = &rx_ring->rx_buffer_info[i]; 344 bi = &rx_ring->rx_buffer_info[i];
344 345
345 while (cleaned_count--) { 346 while (cleaned_count--) {
346 rx_desc = IXGBEVF_RX_DESC(rx_ring, i); 347 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
347 skb = bi->skb; 348
348 if (!skb) { 349 if (!bi->skb) {
350 struct sk_buff *skb;
351
349 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 352 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
350 rx_ring->rx_buf_len); 353 rx_ring->rx_buf_len);
351 if (!skb) { 354 if (!skb) {
@@ -353,11 +356,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
353 goto no_buffers; 356 goto no_buffers;
354 } 357 }
355 bi->skb = skb; 358 bi->skb = skb;
356 } 359
357 if (!bi->dma) {
358 bi->dma = dma_map_single(&pdev->dev, skb->data, 360 bi->dma = dma_map_single(&pdev->dev, skb->data,
359 rx_ring->rx_buf_len, 361 rx_ring->rx_buf_len,
360 DMA_FROM_DEVICE); 362 DMA_FROM_DEVICE);
363 if (dma_mapping_error(&pdev->dev, bi->dma)) {
364 dev_kfree_skb(skb);
365 bi->skb = NULL;
366 dev_err(&pdev->dev, "RX DMA map failed\n");
367 break;
368 }
361 } 369 }
362 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 370 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
363 371
@@ -370,7 +378,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
370no_buffers: 378no_buffers:
371 if (rx_ring->next_to_use != i) { 379 if (rx_ring->next_to_use != i) {
372 rx_ring->next_to_use = i; 380 rx_ring->next_to_use = i;
373
374 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 381 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
375 } 382 }
376} 383}
@@ -454,7 +461,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
454 goto next_desc; 461 goto next_desc;
455 } 462 }
456 463
457 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); 464 ixgbevf_rx_checksum(rx_ring, staterr, skb);
458 465
459 /* probably a little skewed due to removing CRC */ 466 /* probably a little skewed due to removing CRC */
460 total_rx_bytes += skb->len; 467 total_rx_bytes += skb->len;
@@ -471,6 +478,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
471 } 478 }
472 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 479 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
473 480
481 /* Workaround hardware that can't do proper VEPA multicast
482 * source pruning.
483 */
484 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
485 !(compare_ether_addr(adapter->netdev->dev_addr,
486 eth_hdr(skb)->h_source))) {
487 dev_kfree_skb_irq(skb);
488 goto next_desc;
489 }
490
474 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); 491 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
475 492
476next_desc: 493next_desc:
@@ -533,9 +550,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
533 else 550 else
534 per_ring_budget = budget; 551 per_ring_budget = budget;
535 552
553 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
536 ixgbevf_for_each_ring(ring, q_vector->rx) 554 ixgbevf_for_each_ring(ring, q_vector->rx)
537 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, 555 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
538 per_ring_budget); 556 per_ring_budget);
557 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
539 558
540 /* If all work not completed, return budget and keep polling */ 559 /* If all work not completed, return budget and keep polling */
541 if (!clean_complete) 560 if (!clean_complete)
@@ -743,7 +762,6 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
743 return IRQ_HANDLED; 762 return IRQ_HANDLED;
744} 763}
745 764
746
747/** 765/**
748 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) 766 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
749 * @irq: unused 767 * @irq: unused
@@ -1065,20 +1083,20 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1065 max_frame += VLAN_HLEN; 1083 max_frame += VLAN_HLEN;
1066 1084
1067 /* 1085 /*
1068 * Make best use of allocation by using all but 1K of a 1086 * Allocate buffer sizes that fit well into 32K and
1069 * power of 2 allocation that will be used for skb->head. 1087 * take into account max frame size of 9.5K
1070 */ 1088 */
1071 if ((hw->mac.type == ixgbe_mac_X540_vf) && 1089 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1072 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) 1090 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1073 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1091 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1074 else if (max_frame <= IXGBEVF_RXBUFFER_3K) 1092 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1075 rx_buf_len = IXGBEVF_RXBUFFER_3K; 1093 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1076 else if (max_frame <= IXGBEVF_RXBUFFER_7K) 1094 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1077 rx_buf_len = IXGBEVF_RXBUFFER_7K; 1095 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1078 else if (max_frame <= IXGBEVF_RXBUFFER_15K) 1096 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1079 rx_buf_len = IXGBEVF_RXBUFFER_15K; 1097 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1080 else 1098 else
1081 rx_buf_len = IXGBEVF_MAX_RXBUFFER; 1099 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1082 1100
1083 for (i = 0; i < adapter->num_rx_queues; i++) 1101 for (i = 0; i < adapter->num_rx_queues; i++)
1084 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1102 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
@@ -1128,15 +1146,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1128 struct ixgbe_hw *hw = &adapter->hw; 1146 struct ixgbe_hw *hw = &adapter->hw;
1129 int err; 1147 int err;
1130 1148
1131 if (!hw->mac.ops.set_vfta) 1149 spin_lock_bh(&adapter->mbx_lock);
1132 return -EOPNOTSUPP;
1133
1134 spin_lock(&adapter->mbx_lock);
1135 1150
1136 /* add VID to filter table */ 1151 /* add VID to filter table */
1137 err = hw->mac.ops.set_vfta(hw, vid, 0, true); 1152 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1138 1153
1139 spin_unlock(&adapter->mbx_lock); 1154 spin_unlock_bh(&adapter->mbx_lock);
1140 1155
1141 /* translate error return types so error makes sense */ 1156 /* translate error return types so error makes sense */
1142 if (err == IXGBE_ERR_MBX) 1157 if (err == IXGBE_ERR_MBX)
@@ -1156,13 +1171,12 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1156 struct ixgbe_hw *hw = &adapter->hw; 1171 struct ixgbe_hw *hw = &adapter->hw;
1157 int err = -EOPNOTSUPP; 1172 int err = -EOPNOTSUPP;
1158 1173
1159 spin_lock(&adapter->mbx_lock); 1174 spin_lock_bh(&adapter->mbx_lock);
1160 1175
1161 /* remove VID from filter table */ 1176 /* remove VID from filter table */
1162 if (hw->mac.ops.set_vfta) 1177 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1163 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1164 1178
1165 spin_unlock(&adapter->mbx_lock); 1179 spin_unlock_bh(&adapter->mbx_lock);
1166 1180
1167 clear_bit(vid, adapter->active_vlans); 1181 clear_bit(vid, adapter->active_vlans);
1168 1182
@@ -1206,27 +1220,27 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1206} 1220}
1207 1221
1208/** 1222/**
1209 * ixgbevf_set_rx_mode - Multicast set 1223 * ixgbevf_set_rx_mode - Multicast and unicast set
1210 * @netdev: network interface device structure 1224 * @netdev: network interface device structure
1211 * 1225 *
1212 * The set_rx_method entry point is called whenever the multicast address 1226 * The set_rx_method entry point is called whenever the multicast address
1213 * list or the network interface flags are updated. This routine is 1227 * list, unicast address list or the network interface flags are updated.
1214 * responsible for configuring the hardware for proper multicast mode. 1228 * This routine is responsible for configuring the hardware for proper
1229 * multicast mode and configuring requested unicast filters.
1215 **/ 1230 **/
1216static void ixgbevf_set_rx_mode(struct net_device *netdev) 1231static void ixgbevf_set_rx_mode(struct net_device *netdev)
1217{ 1232{
1218 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1233 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1219 struct ixgbe_hw *hw = &adapter->hw; 1234 struct ixgbe_hw *hw = &adapter->hw;
1220 1235
1221 spin_lock(&adapter->mbx_lock); 1236 spin_lock_bh(&adapter->mbx_lock);
1222 1237
1223 /* reprogram multicast list */ 1238 /* reprogram multicast list */
1224 if (hw->mac.ops.update_mc_addr_list) 1239 hw->mac.ops.update_mc_addr_list(hw, netdev);
1225 hw->mac.ops.update_mc_addr_list(hw, netdev);
1226 1240
1227 ixgbevf_write_uc_addr_list(netdev); 1241 ixgbevf_write_uc_addr_list(netdev);
1228 1242
1229 spin_unlock(&adapter->mbx_lock); 1243 spin_unlock_bh(&adapter->mbx_lock);
1230} 1244}
1231 1245
1232static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1246static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1290,8 +1304,8 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1290 "not set within the polling period\n", rxr); 1304 "not set within the polling period\n", rxr);
1291 } 1305 }
1292 1306
1293 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 1307 ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
1294 (adapter->rx_ring[rxr].count - 1)); 1308 adapter->rx_ring[rxr].count - 1);
1295} 1309}
1296 1310
1297static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1311static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1335,11 +1349,12 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1335static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) 1349static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1336{ 1350{
1337 struct ixgbe_hw *hw = &adapter->hw; 1351 struct ixgbe_hw *hw = &adapter->hw;
1338 int api[] = { ixgbe_mbox_api_10, 1352 int api[] = { ixgbe_mbox_api_11,
1353 ixgbe_mbox_api_10,
1339 ixgbe_mbox_api_unknown }; 1354 ixgbe_mbox_api_unknown };
1340 int err = 0, idx = 0; 1355 int err = 0, idx = 0;
1341 1356
1342 spin_lock(&adapter->mbx_lock); 1357 spin_lock_bh(&adapter->mbx_lock);
1343 1358
1344 while (api[idx] != ixgbe_mbox_api_unknown) { 1359 while (api[idx] != ixgbe_mbox_api_unknown) {
1345 err = ixgbevf_negotiate_api_version(hw, api[idx]); 1360 err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1348,7 +1363,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1348 idx++; 1363 idx++;
1349 } 1364 }
1350 1365
1351 spin_unlock(&adapter->mbx_lock); 1366 spin_unlock_bh(&adapter->mbx_lock);
1352} 1367}
1353 1368
1354static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1369static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1389,16 +1404,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1389 1404
1390 ixgbevf_configure_msix(adapter); 1405 ixgbevf_configure_msix(adapter);
1391 1406
1392 spin_lock(&adapter->mbx_lock); 1407 spin_lock_bh(&adapter->mbx_lock);
1393 1408
1394 if (hw->mac.ops.set_rar) { 1409 if (is_valid_ether_addr(hw->mac.addr))
1395 if (is_valid_ether_addr(hw->mac.addr)) 1410 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1396 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1411 else
1397 else 1412 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1398 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1399 }
1400 1413
1401 spin_unlock(&adapter->mbx_lock); 1414 spin_unlock_bh(&adapter->mbx_lock);
1402 1415
1403 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1416 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1404 ixgbevf_napi_enable_all(adapter); 1417 ixgbevf_napi_enable_all(adapter);
@@ -1413,12 +1426,87 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1413 mod_timer(&adapter->watchdog_timer, jiffies); 1426 mod_timer(&adapter->watchdog_timer, jiffies);
1414} 1427}
1415 1428
1429static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
1430{
1431 struct ixgbe_hw *hw = &adapter->hw;
1432 struct ixgbevf_ring *rx_ring;
1433 unsigned int def_q = 0;
1434 unsigned int num_tcs = 0;
1435 unsigned int num_rx_queues = 1;
1436 int err, i;
1437
1438 spin_lock_bh(&adapter->mbx_lock);
1439
1440 /* fetch queue configuration from the PF */
1441 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1442
1443 spin_unlock_bh(&adapter->mbx_lock);
1444
1445 if (err)
1446 return err;
1447
1448 if (num_tcs > 1) {
1449 /* update default Tx ring register index */
1450 adapter->tx_ring[0].reg_idx = def_q;
1451
1452 /* we need as many queues as traffic classes */
1453 num_rx_queues = num_tcs;
1454 }
1455
1456 /* nothing to do if we have the correct number of queues */
1457 if (adapter->num_rx_queues == num_rx_queues)
1458 return 0;
1459
1460 /* allocate new rings */
1461 rx_ring = kcalloc(num_rx_queues,
1462 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1463 if (!rx_ring)
1464 return -ENOMEM;
1465
1466 /* setup ring fields */
1467 for (i = 0; i < num_rx_queues; i++) {
1468 rx_ring[i].count = adapter->rx_ring_count;
1469 rx_ring[i].queue_index = i;
1470 rx_ring[i].reg_idx = i;
1471 rx_ring[i].dev = &adapter->pdev->dev;
1472 rx_ring[i].netdev = adapter->netdev;
1473
1474 /* allocate resources on the ring */
1475 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
1476 if (err) {
1477 while (i) {
1478 i--;
1479 ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
1480 }
1481 kfree(rx_ring);
1482 return err;
1483 }
1484 }
1485
1486 /* free the existing rings and queues */
1487 ixgbevf_free_all_rx_resources(adapter);
1488 adapter->num_rx_queues = 0;
1489 kfree(adapter->rx_ring);
1490
1491 /* move new rings into position on the adapter struct */
1492 adapter->rx_ring = rx_ring;
1493 adapter->num_rx_queues = num_rx_queues;
1494
1495 /* reset ring to vector mapping */
1496 ixgbevf_reset_q_vectors(adapter);
1497 ixgbevf_map_rings_to_vectors(adapter);
1498
1499 return 0;
1500}
1501
1416void ixgbevf_up(struct ixgbevf_adapter *adapter) 1502void ixgbevf_up(struct ixgbevf_adapter *adapter)
1417{ 1503{
1418 struct ixgbe_hw *hw = &adapter->hw; 1504 struct ixgbe_hw *hw = &adapter->hw;
1419 1505
1420 ixgbevf_negotiate_api(adapter); 1506 ixgbevf_negotiate_api(adapter);
1421 1507
1508 ixgbevf_reset_queues(adapter);
1509
1422 ixgbevf_configure(adapter); 1510 ixgbevf_configure(adapter);
1423 1511
1424 ixgbevf_up_complete(adapter); 1512 ixgbevf_up_complete(adapter);
@@ -1497,7 +1585,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1497 return; 1585 return;
1498 1586
1499 /* Free all the Tx ring sk_buffs */ 1587 /* Free all the Tx ring sk_buffs */
1500
1501 for (i = 0; i < tx_ring->count; i++) { 1588 for (i = 0; i < tx_ring->count; i++) {
1502 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1589 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1503 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 1590 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
@@ -1593,13 +1680,6 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1593 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1680 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1594 msleep(1); 1681 msleep(1);
1595 1682
1596 /*
1597 * Check if PF is up before re-init. If not then skip until
1598 * later when the PF is up and ready to service requests from
1599 * the VF via mailbox. If the VF is up and running then the
1600 * watchdog task will continue to schedule reset tasks until
1601 * the PF is up and running.
1602 */
1603 ixgbevf_down(adapter); 1683 ixgbevf_down(adapter);
1604 ixgbevf_up(adapter); 1684 ixgbevf_up(adapter);
1605 1685
@@ -1611,15 +1691,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1611 struct ixgbe_hw *hw = &adapter->hw; 1691 struct ixgbe_hw *hw = &adapter->hw;
1612 struct net_device *netdev = adapter->netdev; 1692 struct net_device *netdev = adapter->netdev;
1613 1693
1614 spin_lock(&adapter->mbx_lock);
1615
1616 if (hw->mac.ops.reset_hw(hw)) 1694 if (hw->mac.ops.reset_hw(hw))
1617 hw_dbg(hw, "PF still resetting\n"); 1695 hw_dbg(hw, "PF still resetting\n");
1618 else 1696 else
1619 hw->mac.ops.init_hw(hw); 1697 hw->mac.ops.init_hw(hw);
1620 1698
1621 spin_unlock(&adapter->mbx_lock);
1622
1623 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1699 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1624 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1700 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1625 netdev->addr_len); 1701 netdev->addr_len);
@@ -1628,10 +1704,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1628 } 1704 }
1629} 1705}
1630 1706
1631static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1707static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1632 int vectors) 1708 int vectors)
1633{ 1709{
1634 int err, vector_threshold; 1710 int err = 0;
1711 int vector_threshold;
1635 1712
1636 /* We'll want at least 2 (vector_threshold): 1713 /* We'll want at least 2 (vector_threshold):
1637 * 1) TxQ[0] + RxQ[0] handler 1714 * 1) TxQ[0] + RxQ[0] handler
@@ -1647,21 +1724,18 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1647 while (vectors >= vector_threshold) { 1724 while (vectors >= vector_threshold) {
1648 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1725 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1649 vectors); 1726 vectors);
1650 if (!err) /* Success in acquiring all requested vectors. */ 1727 if (!err || err < 0) /* Success or a nasty failure. */
1651 break; 1728 break;
1652 else if (err < 0)
1653 vectors = 0; /* Nasty failure, quit now */
1654 else /* err == number of vectors we should try again with */ 1729 else /* err == number of vectors we should try again with */
1655 vectors = err; 1730 vectors = err;
1656 } 1731 }
1657 1732
1658 if (vectors < vector_threshold) { 1733 if (vectors < vector_threshold)
1659 /* Can't allocate enough MSI-X interrupts? Oh well. 1734 err = -ENOMEM;
1660 * This just means we'll go with either a single MSI 1735
1661 * vector or fall back to legacy interrupts. 1736 if (err) {
1662 */ 1737 dev_err(&adapter->pdev->dev,
1663 hw_dbg(&adapter->hw, 1738 "Unable to allocate MSI-X interrupts\n");
1664 "Unable to allocate MSI-X interrupts\n");
1665 kfree(adapter->msix_entries); 1739 kfree(adapter->msix_entries);
1666 adapter->msix_entries = NULL; 1740 adapter->msix_entries = NULL;
1667 } else { 1741 } else {
@@ -1672,6 +1746,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1672 */ 1746 */
1673 adapter->num_msix_vectors = vectors; 1747 adapter->num_msix_vectors = vectors;
1674 } 1748 }
1749
1750 return err;
1675} 1751}
1676 1752
1677/** 1753/**
@@ -1717,6 +1793,7 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1717 for (i = 0; i < adapter->num_tx_queues; i++) { 1793 for (i = 0; i < adapter->num_tx_queues; i++) {
1718 adapter->tx_ring[i].count = adapter->tx_ring_count; 1794 adapter->tx_ring[i].count = adapter->tx_ring_count;
1719 adapter->tx_ring[i].queue_index = i; 1795 adapter->tx_ring[i].queue_index = i;
1796 /* reg_idx may be remapped later by DCB config */
1720 adapter->tx_ring[i].reg_idx = i; 1797 adapter->tx_ring[i].reg_idx = i;
1721 adapter->tx_ring[i].dev = &adapter->pdev->dev; 1798 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1722 adapter->tx_ring[i].netdev = adapter->netdev; 1799 adapter->tx_ring[i].netdev = adapter->netdev;
@@ -1774,7 +1851,9 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1774 for (vector = 0; vector < v_budget; vector++) 1851 for (vector = 0; vector < v_budget; vector++)
1775 adapter->msix_entries[vector].entry = vector; 1852 adapter->msix_entries[vector].entry = vector;
1776 1853
1777 ixgbevf_acquire_msix_vectors(adapter, v_budget); 1854 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
1855 if (err)
1856 goto out;
1778 1857
1779 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); 1858 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1780 if (err) 1859 if (err)
@@ -1834,18 +1913,13 @@ err_out:
1834 **/ 1913 **/
1835static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 1914static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1836{ 1915{
1837 int q_idx, num_q_vectors; 1916 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1838 int napi_vectors;
1839
1840 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1841 napi_vectors = adapter->num_rx_queues;
1842 1917
1843 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 1918 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1844 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 1919 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1845 1920
1846 adapter->q_vector[q_idx] = NULL; 1921 adapter->q_vector[q_idx] = NULL;
1847 if (q_idx < napi_vectors) 1922 netif_napi_del(&q_vector->napi);
1848 netif_napi_del(&q_vector->napi);
1849 kfree(q_vector); 1923 kfree(q_vector);
1850 } 1924 }
1851} 1925}
@@ -1935,7 +2009,7 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
1935 * Fields are initialized based on PCI device information and 2009 * Fields are initialized based on PCI device information and
1936 * OS network device settings (MTU size). 2010 * OS network device settings (MTU size).
1937 **/ 2011 **/
1938static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2012static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1939{ 2013{
1940 struct ixgbe_hw *hw = &adapter->hw; 2014 struct ixgbe_hw *hw = &adapter->hw;
1941 struct pci_dev *pdev = adapter->pdev; 2015 struct pci_dev *pdev = adapter->pdev;
@@ -1950,8 +2024,11 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1950 hw->subsystem_device_id = pdev->subsystem_device; 2024 hw->subsystem_device_id = pdev->subsystem_device;
1951 2025
1952 hw->mbx.ops.init_params(hw); 2026 hw->mbx.ops.init_params(hw);
1953 hw->mac.max_tx_queues = MAX_TX_QUEUES; 2027
1954 hw->mac.max_rx_queues = MAX_RX_QUEUES; 2028 /* assume legacy case in which PF would only give VF 2 queues */
2029 hw->mac.max_tx_queues = 2;
2030 hw->mac.max_rx_queues = 2;
2031
1955 err = hw->mac.ops.reset_hw(hw); 2032 err = hw->mac.ops.reset_hw(hw);
1956 if (err) { 2033 if (err) {
1957 dev_info(&pdev->dev, 2034 dev_info(&pdev->dev,
@@ -1966,7 +2043,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1966 goto out; 2043 goto out;
1967 } 2044 }
1968 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2045 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
1969 adapter->netdev->addr_len); 2046 adapter->netdev->addr_len);
1970 } 2047 }
1971 2048
1972 /* lock to protect mailbox accesses */ 2049 /* lock to protect mailbox accesses */
@@ -2016,6 +2093,7 @@ out:
2016void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2093void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2017{ 2094{
2018 struct ixgbe_hw *hw = &adapter->hw; 2095 struct ixgbe_hw *hw = &adapter->hw;
2096 int i;
2019 2097
2020 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2098 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2021 adapter->stats.vfgprc); 2099 adapter->stats.vfgprc);
@@ -2029,6 +2107,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2029 adapter->stats.vfgotc); 2107 adapter->stats.vfgotc);
2030 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2108 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2031 adapter->stats.vfmprc); 2109 adapter->stats.vfmprc);
2110
2111 for (i = 0; i < adapter->num_rx_queues; i++) {
2112 adapter->hw_csum_rx_error +=
2113 adapter->rx_ring[i].hw_csum_rx_error;
2114 adapter->hw_csum_rx_good +=
2115 adapter->rx_ring[i].hw_csum_rx_good;
2116 adapter->rx_ring[i].hw_csum_rx_error = 0;
2117 adapter->rx_ring[i].hw_csum_rx_good = 0;
2118 }
2032} 2119}
2033 2120
2034/** 2121/**
@@ -2103,6 +2190,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2103 struct ixgbe_hw *hw = &adapter->hw; 2190 struct ixgbe_hw *hw = &adapter->hw;
2104 u32 link_speed = adapter->link_speed; 2191 u32 link_speed = adapter->link_speed;
2105 bool link_up = adapter->link_up; 2192 bool link_up = adapter->link_up;
2193 s32 need_reset;
2106 2194
2107 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2195 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2108 2196
@@ -2110,29 +2198,19 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2110 * Always check the link on the watchdog because we have 2198 * Always check the link on the watchdog because we have
2111 * no LSC interrupt 2199 * no LSC interrupt
2112 */ 2200 */
2113 if (hw->mac.ops.check_link) { 2201 spin_lock_bh(&adapter->mbx_lock);
2114 s32 need_reset;
2115
2116 spin_lock(&adapter->mbx_lock);
2117 2202
2118 need_reset = hw->mac.ops.check_link(hw, &link_speed, 2203 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2119 &link_up, false);
2120 2204
2121 spin_unlock(&adapter->mbx_lock); 2205 spin_unlock_bh(&adapter->mbx_lock);
2122 2206
2123 if (need_reset) { 2207 if (need_reset) {
2124 adapter->link_up = link_up; 2208 adapter->link_up = link_up;
2125 adapter->link_speed = link_speed; 2209 adapter->link_speed = link_speed;
2126 netif_carrier_off(netdev); 2210 netif_carrier_off(netdev);
2127 netif_tx_stop_all_queues(netdev); 2211 netif_tx_stop_all_queues(netdev);
2128 schedule_work(&adapter->reset_task); 2212 schedule_work(&adapter->reset_task);
2129 goto pf_has_reset; 2213 goto pf_has_reset;
2130 }
2131 } else {
2132 /* always assume link is up, if no check link
2133 * function */
2134 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2135 link_up = true;
2136 } 2214 }
2137 adapter->link_up = link_up; 2215 adapter->link_up = link_up;
2138 adapter->link_speed = link_speed; 2216 adapter->link_speed = link_speed;
@@ -2377,6 +2455,63 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2377 &adapter->rx_ring[i]); 2455 &adapter->rx_ring[i]);
2378} 2456}
2379 2457
2458static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
2459{
2460 struct ixgbe_hw *hw = &adapter->hw;
2461 struct ixgbevf_ring *rx_ring;
2462 unsigned int def_q = 0;
2463 unsigned int num_tcs = 0;
2464 unsigned int num_rx_queues = 1;
2465 int err, i;
2466
2467 spin_lock_bh(&adapter->mbx_lock);
2468
2469 /* fetch queue configuration from the PF */
2470 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2471
2472 spin_unlock_bh(&adapter->mbx_lock);
2473
2474 if (err)
2475 return err;
2476
2477 if (num_tcs > 1) {
2478 /* update default Tx ring register index */
2479 adapter->tx_ring[0].reg_idx = def_q;
2480
2481 /* we need as many queues as traffic classes */
2482 num_rx_queues = num_tcs;
2483 }
2484
2485 /* nothing to do if we have the correct number of queues */
2486 if (adapter->num_rx_queues == num_rx_queues)
2487 return 0;
2488
2489 /* allocate new rings */
2490 rx_ring = kcalloc(num_rx_queues,
2491 sizeof(struct ixgbevf_ring), GFP_KERNEL);
2492 if (!rx_ring)
2493 return -ENOMEM;
2494
2495 /* setup ring fields */
2496 for (i = 0; i < num_rx_queues; i++) {
2497 rx_ring[i].count = adapter->rx_ring_count;
2498 rx_ring[i].queue_index = i;
2499 rx_ring[i].reg_idx = i;
2500 rx_ring[i].dev = &adapter->pdev->dev;
2501 rx_ring[i].netdev = adapter->netdev;
2502 }
2503
2504 /* free the existing ring and queues */
2505 adapter->num_rx_queues = 0;
2506 kfree(adapter->rx_ring);
2507
2508 /* move new rings into position on the adapter struct */
2509 adapter->rx_ring = rx_ring;
2510 adapter->num_rx_queues = num_rx_queues;
2511
2512 return 0;
2513}
2514
2380/** 2515/**
2381 * ixgbevf_open - Called when a network interface is made active 2516 * ixgbevf_open - Called when a network interface is made active
2382 * @netdev: network interface device structure 2517 * @netdev: network interface device structure
@@ -2413,6 +2548,11 @@ static int ixgbevf_open(struct net_device *netdev)
2413 2548
2414 ixgbevf_negotiate_api(adapter); 2549 ixgbevf_negotiate_api(adapter);
2415 2550
2551 /* setup queue reg_idx and Rx queue count */
2552 err = ixgbevf_setup_queues(adapter);
2553 if (err)
2554 goto err_setup_queues;
2555
2416 /* allocate transmit descriptors */ 2556 /* allocate transmit descriptors */
2417 err = ixgbevf_setup_all_tx_resources(adapter); 2557 err = ixgbevf_setup_all_tx_resources(adapter);
2418 if (err) 2558 if (err)
@@ -2451,6 +2591,7 @@ err_setup_rx:
2451 ixgbevf_free_all_rx_resources(adapter); 2591 ixgbevf_free_all_rx_resources(adapter);
2452err_setup_tx: 2592err_setup_tx:
2453 ixgbevf_free_all_tx_resources(adapter); 2593 ixgbevf_free_all_tx_resources(adapter);
2594err_setup_queues:
2454 ixgbevf_reset(adapter); 2595 ixgbevf_reset(adapter);
2455 2596
2456err_setup_reset: 2597err_setup_reset:
@@ -2562,9 +2703,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2562static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, 2703static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2563 struct sk_buff *skb, u32 tx_flags) 2704 struct sk_buff *skb, u32 tx_flags)
2564{ 2705{
2565
2566
2567
2568 u32 vlan_macip_lens = 0; 2706 u32 vlan_macip_lens = 0;
2569 u32 mss_l4len_idx = 0; 2707 u32 mss_l4len_idx = 0;
2570 u32 type_tucmd = 0; 2708 u32 type_tucmd = 0;
@@ -2678,10 +2816,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2678 tx_buffer_info->dma = 2816 tx_buffer_info->dma =
2679 skb_frag_dma_map(tx_ring->dev, frag, 2817 skb_frag_dma_map(tx_ring->dev, frag,
2680 offset, size, DMA_TO_DEVICE); 2818 offset, size, DMA_TO_DEVICE);
2681 tx_buffer_info->mapped_as_page = true;
2682 if (dma_mapping_error(tx_ring->dev, 2819 if (dma_mapping_error(tx_ring->dev,
2683 tx_buffer_info->dma)) 2820 tx_buffer_info->dma))
2684 goto dma_error; 2821 goto dma_error;
2822 tx_buffer_info->mapped_as_page = true;
2685 tx_buffer_info->next_to_watch = i; 2823 tx_buffer_info->next_to_watch = i;
2686 2824
2687 len -= size; 2825 len -= size;
@@ -2754,7 +2892,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2754 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2892 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2755 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 2893 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2756 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; 2894 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2757
2758 } 2895 }
2759 2896
2760 /* 2897 /*
@@ -2823,6 +2960,11 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2823#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD 2960#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2824 unsigned short f; 2961 unsigned short f;
2825#endif 2962#endif
2963 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
2964 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
2965 dev_kfree_skb(skb);
2966 return NETDEV_TX_OK;
2967 }
2826 2968
2827 tx_ring = &adapter->tx_ring[r_idx]; 2969 tx_ring = &adapter->tx_ring[r_idx];
2828 2970
@@ -2902,12 +3044,11 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2902 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3044 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2903 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3045 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2904 3046
2905 spin_lock(&adapter->mbx_lock); 3047 spin_lock_bh(&adapter->mbx_lock);
2906 3048
2907 if (hw->mac.ops.set_rar) 3049 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2908 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2909 3050
2910 spin_unlock(&adapter->mbx_lock); 3051 spin_unlock_bh(&adapter->mbx_lock);
2911 3052
2912 return 0; 3053 return 0;
2913} 3054}
@@ -2925,8 +3066,15 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2925 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3066 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2926 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3067 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2927 3068
2928 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3069 switch (adapter->hw.api_version) {
3070 case ixgbe_mbox_api_11:
2929 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3071 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3072 break;
3073 default:
3074 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3075 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3076 break;
3077 }
2930 3078
2931 /* MTU < 68 is an error and causes problems on some kernels */ 3079 /* MTU < 68 is an error and causes problems on some kernels */
2932 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3080 if ((new_mtu < 68) || (max_frame > max_possible_frame))
@@ -3094,8 +3242,7 @@ static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3094 * The OS initialization, configuring of the adapter private structure, 3242 * The OS initialization, configuring of the adapter private structure,
3095 * and a hardware reset occur. 3243 * and a hardware reset occur.
3096 **/ 3244 **/
3097static int __devinit ixgbevf_probe(struct pci_dev *pdev, 3245static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3098 const struct pci_device_id *ent)
3099{ 3246{
3100 struct net_device *netdev; 3247 struct net_device *netdev;
3101 struct ixgbevf_adapter *adapter = NULL; 3248 struct ixgbevf_adapter *adapter = NULL;
@@ -3223,10 +3370,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3223 if (err) 3370 if (err)
3224 goto err_sw_init; 3371 goto err_sw_init;
3225 3372
3226 /* pick up the PCI bus settings for reporting later */
3227 if (hw->mac.ops.get_bus_info)
3228 hw->mac.ops.get_bus_info(hw);
3229
3230 strcpy(netdev->name, "eth%d"); 3373 strcpy(netdev->name, "eth%d");
3231 3374
3232 err = register_netdev(netdev); 3375 err = register_netdev(netdev);
@@ -3270,7 +3413,7 @@ err_dma:
3270 * Hot-Plug event, or because the driver is going to be removed from 3413 * Hot-Plug event, or because the driver is going to be removed from
3271 * memory. 3414 * memory.
3272 **/ 3415 **/
3273static void __devexit ixgbevf_remove(struct pci_dev *pdev) 3416static void ixgbevf_remove(struct pci_dev *pdev)
3274{ 3417{
3275 struct net_device *netdev = pci_get_drvdata(pdev); 3418 struct net_device *netdev = pci_get_drvdata(pdev);
3276 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3419 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3384,7 +3527,7 @@ static struct pci_driver ixgbevf_driver = {
3384 .name = ixgbevf_driver_name, 3527 .name = ixgbevf_driver_name,
3385 .id_table = ixgbevf_pci_tbl, 3528 .id_table = ixgbevf_pci_tbl,
3386 .probe = ixgbevf_probe, 3529 .probe = ixgbevf_probe,
3387 .remove = __devexit_p(ixgbevf_remove), 3530 .remove = ixgbevf_remove,
3388#ifdef CONFIG_PM 3531#ifdef CONFIG_PM
3389 /* Power Management Hooks */ 3532 /* Power Management Hooks */
3390 .suspend = ixgbevf_suspend, 3533 .suspend = ixgbevf_suspend,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 946ce86f337f..0bc30058ff82 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -85,6 +85,7 @@
85enum ixgbe_pfvf_api_rev { 85enum ixgbe_pfvf_api_rev {
86 ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ 86 ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
87 ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ 87 ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
88 ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
88 /* This value should always be last */ 89 /* This value should always be last */
89 ixgbe_mbox_api_unknown, /* indicates that API version is not known */ 90 ixgbe_mbox_api_unknown, /* indicates that API version is not known */
90}; 91};
@@ -100,6 +101,15 @@ enum ixgbe_pfvf_api_rev {
100#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ 101#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
101#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ 102#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
102 103
104/* mailbox API, version 1.1 VF requests */
105#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */
106
107/* GET_QUEUES return data indices within the mailbox */
108#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
109#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
110#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
111#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
112
103/* length of permanent address message returned from PF */ 113/* length of permanent address message returned from PF */
104#define IXGBE_VF_PERMADDR_MSG_LEN 4 114#define IXGBE_VF_PERMADDR_MSG_LEN 4
105/* word in permanent address message with the current multicast type */ 115/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c7447e6fcc8..0c94557b53df 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -331,6 +331,9 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
331 netdev_for_each_mc_addr(ha, netdev) { 331 netdev_for_each_mc_addr(ha, netdev) {
332 if (i == cnt) 332 if (i == cnt)
333 break; 333 break;
334 if (is_link_local_ether_addr(ha->addr))
335 continue;
336
334 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); 337 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
335 } 338 }
336 339
@@ -513,6 +516,64 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
513 return err; 516 return err;
514} 517}
515 518
519int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
520 unsigned int *default_tc)
521{
522 int err;
523 u32 msg[5];
524
525 /* do nothing if API doesn't support ixgbevf_get_queues */
526 switch (hw->api_version) {
527 case ixgbe_mbox_api_11:
528 break;
529 default:
530 return 0;
531 }
532
533 /* Fetch queue configuration from the PF */
534 msg[0] = IXGBE_VF_GET_QUEUE;
535 msg[1] = msg[2] = msg[3] = msg[4] = 0;
536 err = hw->mbx.ops.write_posted(hw, msg, 5);
537
538 if (!err)
539 err = hw->mbx.ops.read_posted(hw, msg, 5);
540
541 if (!err) {
542 msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
543
544 /*
545 * if we we didn't get an ACK there must have been
546 * some sort of mailbox error so we should treat it
547 * as such
548 */
549 if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
550 return IXGBE_ERR_MBX;
551
552 /* record and validate values from message */
553 hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
554 if (hw->mac.max_tx_queues == 0 ||
555 hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
556 hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
557
558 hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
559 if (hw->mac.max_rx_queues == 0 ||
560 hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
561 hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
562
563 *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
564 /* in case of unknown state assume we cannot tag frames */
565 if (*num_tcs > hw->mac.max_rx_queues)
566 *num_tcs = 1;
567
568 *default_tc = msg[IXGBE_VF_DEF_QUEUE];
569 /* default to queue 0 on out-of-bounds queue number */
570 if (*default_tc >= hw->mac.max_tx_queues)
571 *default_tc = 0;
572 }
573
574 return err;
575}
576
516static const struct ixgbe_mac_operations ixgbevf_mac_ops = { 577static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
517 .init_hw = ixgbevf_init_hw_vf, 578 .init_hw = ixgbevf_init_hw_vf,
518 .reset_hw = ixgbevf_reset_hw_vf, 579 .reset_hw = ixgbevf_reset_hw_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 47f11a584d8c..7b1f502d1716 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -174,5 +174,7 @@ struct ixgbevf_info {
174 174
175void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); 175void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
176int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); 176int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
177int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
178 unsigned int *default_tc);
177#endif /* __IXGBE_VF_H__ */ 179#endif /* __IXGBE_VF_H__ */
178 180
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 60ac46f4ac08..0519afa413d2 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2965,7 +2965,7 @@ static const struct net_device_ops jme_netdev_ops = {
2965#endif 2965#endif
2966}; 2966};
2967 2967
2968static int __devinit 2968static int
2969jme_init_one(struct pci_dev *pdev, 2969jme_init_one(struct pci_dev *pdev,
2970 const struct pci_device_id *ent) 2970 const struct pci_device_id *ent)
2971{ 2971{
@@ -3203,7 +3203,7 @@ err_out:
3203 return rc; 3203 return rc;
3204} 3204}
3205 3205
3206static void __devexit 3206static void
3207jme_remove_one(struct pci_dev *pdev) 3207jme_remove_one(struct pci_dev *pdev)
3208{ 3208{
3209 struct net_device *netdev = pci_get_drvdata(pdev); 3209 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3318,7 +3318,7 @@ static struct pci_driver jme_driver = {
3318 .name = DRV_NAME, 3318 .name = DRV_NAME,
3319 .id_table = jme_pci_tbl, 3319 .id_table = jme_pci_tbl,
3320 .probe = jme_init_one, 3320 .probe = jme_init_one,
3321 .remove = __devexit_p(jme_remove_one), 3321 .remove = jme_remove_one,
3322 .shutdown = jme_shutdown, 3322 .shutdown = jme_shutdown,
3323 .driver.pm = JME_PM_OPS, 3323 .driver.pm = JME_PM_OPS,
3324}; 3324};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 003c5bc7189f..c124e67a1a1c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -774,7 +774,7 @@ err_out:
774 return err; 774 return err;
775} 775}
776 776
777static int __devexit 777static int
778ltq_etop_remove(struct platform_device *pdev) 778ltq_etop_remove(struct platform_device *pdev)
779{ 779{
780 struct net_device *dev = platform_get_drvdata(pdev); 780 struct net_device *dev = platform_get_drvdata(pdev);
@@ -789,7 +789,7 @@ ltq_etop_remove(struct platform_device *pdev)
789} 789}
790 790
791static struct platform_driver ltq_mii_driver = { 791static struct platform_driver ltq_mii_driver = {
792 .remove = __devexit_p(ltq_etop_remove), 792 .remove = ltq_etop_remove,
793 .driver = { 793 .driver = {
794 .name = "ltq_etop", 794 .name = "ltq_etop",
795 .owner = THIS_MODULE, 795 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 59489722e898..10d678d3dd01 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1131,7 +1131,7 @@ static int pxa168_eth_open(struct net_device *dev)
1131 err = request_irq(dev->irq, pxa168_eth_int_handler, 1131 err = request_irq(dev->irq, pxa168_eth_int_handler,
1132 IRQF_DISABLED, dev->name, dev); 1132 IRQF_DISABLED, dev->name, dev);
1133 if (err) { 1133 if (err) {
1134 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 1134 dev_err(&dev->dev, "can't assign irq\n");
1135 return -EAGAIN; 1135 return -EAGAIN;
1136 } 1136 }
1137 pep->rx_resource_err = 0; 1137 pep->rx_resource_err = 0;
@@ -1201,9 +1201,8 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
1201 */ 1201 */
1202 pxa168_eth_stop(dev); 1202 pxa168_eth_stop(dev);
1203 if (pxa168_eth_open(dev)) { 1203 if (pxa168_eth_open(dev)) {
1204 dev_printk(KERN_ERR, &dev->dev, 1204 dev_err(&dev->dev,
1205 "fatal error on re-opening device after " 1205 "fatal error on re-opening device after MTU change\n");
1206 "MTU change\n");
1207 } 1206 }
1208 1207
1209 return 0; 1208 return 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index d19a143aa5a8..5544a1fe2f94 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3860,7 +3860,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3860 return dev; 3860 return dev;
3861} 3861}
3862 3862
3863static void __devinit skge_show_addr(struct net_device *dev) 3863static void skge_show_addr(struct net_device *dev)
3864{ 3864{
3865 const struct skge_port *skge = netdev_priv(dev); 3865 const struct skge_port *skge = netdev_priv(dev);
3866 3866
@@ -3869,8 +3869,7 @@ static void __devinit skge_show_addr(struct net_device *dev)
3869 3869
3870static int only_32bit_dma; 3870static int only_32bit_dma;
3871 3871
3872static int __devinit skge_probe(struct pci_dev *pdev, 3872static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3873 const struct pci_device_id *ent)
3874{ 3873{
3875 struct net_device *dev, *dev1; 3874 struct net_device *dev, *dev1;
3876 struct skge_hw *hw; 3875 struct skge_hw *hw;
@@ -4012,7 +4011,7 @@ err_out:
4012 return err; 4011 return err;
4013} 4012}
4014 4013
4015static void __devexit skge_remove(struct pci_dev *pdev) 4014static void skge_remove(struct pci_dev *pdev)
4016{ 4015{
4017 struct skge_hw *hw = pci_get_drvdata(pdev); 4016 struct skge_hw *hw = pci_get_drvdata(pdev);
4018 struct net_device *dev0, *dev1; 4017 struct net_device *dev0, *dev1;
@@ -4142,7 +4141,7 @@ static struct pci_driver skge_driver = {
4142 .name = DRV_NAME, 4141 .name = DRV_NAME,
4143 .id_table = skge_id_table, 4142 .id_table = skge_id_table,
4144 .probe = skge_probe, 4143 .probe = skge_probe,
4145 .remove = __devexit_p(skge_remove), 4144 .remove = skge_remove,
4146 .shutdown = skge_shutdown, 4145 .shutdown = skge_shutdown,
4147 .driver.pm = SKGE_PM_OPS, 4146 .driver.pm = SKGE_PM_OPS,
4148}; 4147};
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 78946feab4a2..3269eb38cc57 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3140,7 +3140,7 @@ static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
3140} 3140}
3141 3141
3142 3142
3143static int __devinit sky2_init(struct sky2_hw *hw) 3143static int sky2_init(struct sky2_hw *hw)
3144{ 3144{
3145 u8 t8; 3145 u8 t8;
3146 3146
@@ -4741,9 +4741,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4741}; 4741};
4742 4742
4743/* Initialize network device */ 4743/* Initialize network device */
4744static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, 4744static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4745 unsigned port, 4745 int highmem, int wol)
4746 int highmem, int wol)
4747{ 4746{
4748 struct sky2_port *sky2; 4747 struct sky2_port *sky2;
4749 struct net_device *dev = alloc_etherdev(sizeof(*sky2)); 4748 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
@@ -4807,7 +4806,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4807 return dev; 4806 return dev;
4808} 4807}
4809 4808
4810static void __devinit sky2_show_addr(struct net_device *dev) 4809static void sky2_show_addr(struct net_device *dev)
4811{ 4810{
4812 const struct sky2_port *sky2 = netdev_priv(dev); 4811 const struct sky2_port *sky2 = netdev_priv(dev);
4813 4812
@@ -4815,7 +4814,7 @@ static void __devinit sky2_show_addr(struct net_device *dev)
4815} 4814}
4816 4815
4817/* Handle software interrupt used during MSI test */ 4816/* Handle software interrupt used during MSI test */
4818static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id) 4817static irqreturn_t sky2_test_intr(int irq, void *dev_id)
4819{ 4818{
4820 struct sky2_hw *hw = dev_id; 4819 struct sky2_hw *hw = dev_id;
4821 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); 4820 u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
@@ -4834,7 +4833,7 @@ static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
4834} 4833}
4835 4834
4836/* Test interrupt path by forcing a a software IRQ */ 4835/* Test interrupt path by forcing a a software IRQ */
4837static int __devinit sky2_test_msi(struct sky2_hw *hw) 4836static int sky2_test_msi(struct sky2_hw *hw)
4838{ 4837{
4839 struct pci_dev *pdev = hw->pdev; 4838 struct pci_dev *pdev = hw->pdev;
4840 int err; 4839 int err;
@@ -4896,8 +4895,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
4896 return buf; 4895 return buf;
4897} 4896}
4898 4897
4899static int __devinit sky2_probe(struct pci_dev *pdev, 4898static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4900 const struct pci_device_id *ent)
4901{ 4899{
4902 struct net_device *dev, *dev1; 4900 struct net_device *dev, *dev1;
4903 struct sky2_hw *hw; 4901 struct sky2_hw *hw;
@@ -4919,13 +4917,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4919 err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg); 4917 err = pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
4920 if (err) { 4918 if (err) {
4921 dev_err(&pdev->dev, "PCI read config failed\n"); 4919 dev_err(&pdev->dev, "PCI read config failed\n");
4922 goto err_out; 4920 goto err_out_disable;
4923 } 4921 }
4924 4922
4925 if (~reg == 0) { 4923 if (~reg == 0) {
4926 dev_err(&pdev->dev, "PCI configuration read error\n"); 4924 dev_err(&pdev->dev, "PCI configuration read error\n");
4927 err = -EIO; 4925 err = -EIO;
4928 goto err_out; 4926 goto err_out_disable;
4929 } 4927 }
4930 4928
4931 err = pci_request_regions(pdev, DRV_NAME); 4929 err = pci_request_regions(pdev, DRV_NAME);
@@ -5012,10 +5010,11 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
5012 5010
5013 if (!disable_msi && pci_enable_msi(pdev) == 0) { 5011 if (!disable_msi && pci_enable_msi(pdev) == 0) {
5014 err = sky2_test_msi(hw); 5012 err = sky2_test_msi(hw);
5015 if (err == -EOPNOTSUPP) 5013 if (err) {
5016 pci_disable_msi(pdev); 5014 pci_disable_msi(pdev);
5017 else if (err) 5015 if (err != -EOPNOTSUPP)
5018 goto err_out_free_netdev; 5016 goto err_out_free_netdev;
5017 }
5019 } 5018 }
5020 5019
5021 err = register_netdev(dev); 5020 err = register_netdev(dev);
@@ -5063,10 +5062,10 @@ err_out_unregister_dev1:
5063err_out_free_dev1: 5062err_out_free_dev1:
5064 free_netdev(dev1); 5063 free_netdev(dev1);
5065err_out_unregister: 5064err_out_unregister:
5066 if (hw->flags & SKY2_HW_USE_MSI)
5067 pci_disable_msi(pdev);
5068 unregister_netdev(dev); 5065 unregister_netdev(dev);
5069err_out_free_netdev: 5066err_out_free_netdev:
5067 if (hw->flags & SKY2_HW_USE_MSI)
5068 pci_disable_msi(pdev);
5070 free_netdev(dev); 5069 free_netdev(dev);
5071err_out_free_pci: 5070err_out_free_pci:
5072 pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), 5071 pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
@@ -5086,7 +5085,7 @@ err_out:
5086 return err; 5085 return err;
5087} 5086}
5088 5087
5089static void __devexit sky2_remove(struct pci_dev *pdev) 5088static void sky2_remove(struct pci_dev *pdev)
5090{ 5089{
5091 struct sky2_hw *hw = pci_get_drvdata(pdev); 5090 struct sky2_hw *hw = pci_get_drvdata(pdev);
5092 int i; 5091 int i;
@@ -5207,7 +5206,7 @@ static struct pci_driver sky2_driver = {
5207 .name = DRV_NAME, 5206 .name = DRV_NAME,
5208 .id_table = sky2_id_table, 5207 .id_table = sky2_id_table,
5209 .probe = sky2_probe, 5208 .probe = sky2_probe,
5210 .remove = __devexit_p(sky2_remove), 5209 .remove = sky2_remove,
5211 .shutdown = sky2_shutdown, 5210 .shutdown = sky2_shutdown,
5212 .driver.pm = SKY2_PM_OPS, 5211 .driver.pm = SKY2_PM_OPS,
5213}; 5212};
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index d8099a7903d3..bcdbc14aeff0 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_MELLANOX 5config NET_VENDOR_MELLANOX
6 bool "Mellanox devices" 6 bool "Mellanox devices"
7 default y 7 default y
8 depends on PCI && INET 8 depends on PCI
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 5f027f95cc84..eb520ab64014 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -4,9 +4,8 @@
4 4
5config MLX4_EN 5config MLX4_EN
6 tristate "Mellanox Technologies 10Gbit Ethernet support" 6 tristate "Mellanox Technologies 10Gbit Ethernet support"
7 depends on PCI && INET 7 depends on PCI
8 select MLX4_CORE 8 select MLX4_CORE
9 select INET_LRO
10 ---help--- 9 ---help---
11 This driver supports Mellanox Technologies ConnectX Ethernet 10 This driver supports Mellanox Technologies ConnectX Ethernet
12 devices. 11 devices.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 9d0b88eea02b..03447dad07e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -43,6 +43,34 @@
43#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) 43#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
44#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) 44#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
45 45
46static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
47{
48 int i;
49 int err = 0;
50
51 for (i = 0; i < priv->tx_ring_num; i++) {
52 priv->tx_cq[i].moder_cnt = priv->tx_frames;
53 priv->tx_cq[i].moder_time = priv->tx_usecs;
54 err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
55 if (err)
56 return err;
57 }
58
59 if (priv->adaptive_rx_coal)
60 return 0;
61
62 for (i = 0; i < priv->rx_ring_num; i++) {
63 priv->rx_cq[i].moder_cnt = priv->rx_frames;
64 priv->rx_cq[i].moder_time = priv->rx_usecs;
65 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
66 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
67 if (err)
68 return err;
69 }
70
71 return err;
72}
73
46static void 74static void
47mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) 75mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
48{ 76{
@@ -381,7 +409,6 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
381 struct ethtool_coalesce *coal) 409 struct ethtool_coalesce *coal)
382{ 410{
383 struct mlx4_en_priv *priv = netdev_priv(dev); 411 struct mlx4_en_priv *priv = netdev_priv(dev);
384 int err, i;
385 412
386 priv->rx_frames = (coal->rx_max_coalesced_frames == 413 priv->rx_frames = (coal->rx_max_coalesced_frames ==
387 MLX4_EN_AUTO_CONF) ? 414 MLX4_EN_AUTO_CONF) ?
@@ -397,14 +424,6 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
397 coal->tx_max_coalesced_frames != priv->tx_frames) { 424 coal->tx_max_coalesced_frames != priv->tx_frames) {
398 priv->tx_usecs = coal->tx_coalesce_usecs; 425 priv->tx_usecs = coal->tx_coalesce_usecs;
399 priv->tx_frames = coal->tx_max_coalesced_frames; 426 priv->tx_frames = coal->tx_max_coalesced_frames;
400 for (i = 0; i < priv->tx_ring_num; i++) {
401 priv->tx_cq[i].moder_cnt = priv->tx_frames;
402 priv->tx_cq[i].moder_time = priv->tx_usecs;
403 if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
404 en_warn(priv, "Failed changing moderation "
405 "for TX cq %d\n", i);
406 }
407 }
408 } 427 }
409 428
410 /* Set adaptive coalescing params */ 429 /* Set adaptive coalescing params */
@@ -414,18 +433,8 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
414 priv->rx_usecs_high = coal->rx_coalesce_usecs_high; 433 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
415 priv->sample_interval = coal->rate_sample_interval; 434 priv->sample_interval = coal->rate_sample_interval;
416 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; 435 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
417 if (priv->adaptive_rx_coal)
418 return 0;
419 436
420 for (i = 0; i < priv->rx_ring_num; i++) { 437 return mlx4_en_moderation_update(priv);
421 priv->rx_cq[i].moder_cnt = priv->rx_frames;
422 priv->rx_cq[i].moder_time = priv->rx_usecs;
423 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
424 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
425 if (err)
426 return err;
427 }
428 return 0;
429} 438}
430 439
431static int mlx4_en_set_pauseparam(struct net_device *dev, 440static int mlx4_en_set_pauseparam(struct net_device *dev,
@@ -466,7 +475,6 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
466 u32 rx_size, tx_size; 475 u32 rx_size, tx_size;
467 int port_up = 0; 476 int port_up = 0;
468 int err = 0; 477 int err = 0;
469 int i;
470 478
471 if (param->rx_jumbo_pending || param->rx_mini_pending) 479 if (param->rx_jumbo_pending || param->rx_mini_pending)
472 return -EINVAL; 480 return -EINVAL;
@@ -505,14 +513,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
505 en_err(priv, "Failed starting port\n"); 513 en_err(priv, "Failed starting port\n");
506 } 514 }
507 515
508 for (i = 0; i < priv->rx_ring_num; i++) { 516 err = mlx4_en_moderation_update(priv);
509 priv->rx_cq[i].moder_cnt = priv->rx_frames;
510 priv->rx_cq[i].moder_time = priv->rx_usecs;
511 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
512 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
513 if (err)
514 goto out;
515 }
516 517
517out: 518out:
518 mutex_unlock(&mdev->state_lock); 519 mutex_unlock(&mdev->state_lock);
@@ -612,13 +613,17 @@ static int mlx4_en_validate_flow(struct net_device *dev,
612 struct ethtool_usrip4_spec *l3_mask; 613 struct ethtool_usrip4_spec *l3_mask;
613 struct ethtool_tcpip4_spec *l4_mask; 614 struct ethtool_tcpip4_spec *l4_mask;
614 struct ethhdr *eth_mask; 615 struct ethhdr *eth_mask;
615 u64 full_mac = ~0ull;
616 u64 zero_mac = 0;
617 616
618 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) 617 if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
619 return -EINVAL; 618 return -EINVAL;
620 619
621 switch (cmd->fs.flow_type & ~FLOW_EXT) { 620 if (cmd->fs.flow_type & FLOW_MAC_EXT) {
621 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
622 if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
623 return -EINVAL;
624 }
625
626 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
622 case TCP_V4_FLOW: 627 case TCP_V4_FLOW:
623 case UDP_V4_FLOW: 628 case UDP_V4_FLOW:
624 if (cmd->fs.m_u.tcp_ip4_spec.tos) 629 if (cmd->fs.m_u.tcp_ip4_spec.tos)
@@ -643,11 +648,11 @@ static int mlx4_en_validate_flow(struct net_device *dev,
643 case ETHER_FLOW: 648 case ETHER_FLOW:
644 eth_mask = &cmd->fs.m_u.ether_spec; 649 eth_mask = &cmd->fs.m_u.ether_spec;
645 /* source mac mask must not be set */ 650 /* source mac mask must not be set */
646 if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN)) 651 if (!is_zero_ether_addr(eth_mask->h_source))
647 return -EINVAL; 652 return -EINVAL;
648 653
649 /* dest mac mask must be ff:ff:ff:ff:ff:ff */ 654 /* dest mac mask must be ff:ff:ff:ff:ff:ff */
650 if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN)) 655 if (!is_broadcast_ether_addr(eth_mask->h_dest))
651 return -EINVAL; 656 return -EINVAL;
652 657
653 if (!all_zeros_or_all_ones(eth_mask->h_proto)) 658 if (!all_zeros_or_all_ones(eth_mask->h_proto))
@@ -746,7 +751,6 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
746 struct list_head *rule_list_h) 751 struct list_head *rule_list_h)
747{ 752{
748 int err; 753 int err;
749 u64 mac;
750 __be64 be_mac; 754 __be64 be_mac;
751 struct ethhdr *eth_spec; 755 struct ethhdr *eth_spec;
752 struct mlx4_en_priv *priv = netdev_priv(dev); 756 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -761,12 +765,16 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
761 if (!spec_l2) 765 if (!spec_l2)
762 return -ENOMEM; 766 return -ENOMEM;
763 767
764 mac = priv->mac & MLX4_MAC_MASK; 768 if (cmd->fs.flow_type & FLOW_MAC_EXT) {
765 be_mac = cpu_to_be64(mac << 16); 769 memcpy(&be_mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
770 } else {
771 u64 mac = priv->mac & MLX4_MAC_MASK;
772 be_mac = cpu_to_be64(mac << 16);
773 }
766 774
767 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH; 775 spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
768 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN); 776 memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
769 if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW) 777 if ((cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) != ETHER_FLOW)
770 memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN); 778 memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
771 779
772 if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) { 780 if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
@@ -776,7 +784,7 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
776 784
777 list_add_tail(&spec_l2->list, rule_list_h); 785 list_add_tail(&spec_l2->list, rule_list_h);
778 786
779 switch (cmd->fs.flow_type & ~FLOW_EXT) { 787 switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
780 case ETHER_FLOW: 788 case ETHER_FLOW:
781 eth_spec = &cmd->fs.h_u.ether_spec; 789 eth_spec = &cmd->fs.h_u.ether_spec;
782 memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN); 790 memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
@@ -998,6 +1006,73 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
998 return err; 1006 return err;
999} 1007}
1000 1008
1009static void mlx4_en_get_channels(struct net_device *dev,
1010 struct ethtool_channels *channel)
1011{
1012 struct mlx4_en_priv *priv = netdev_priv(dev);
1013
1014 memset(channel, 0, sizeof(*channel));
1015
1016 channel->max_rx = MAX_RX_RINGS;
1017 channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
1018
1019 channel->rx_count = priv->rx_ring_num;
1020 channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
1021}
1022
1023static int mlx4_en_set_channels(struct net_device *dev,
1024 struct ethtool_channels *channel)
1025{
1026 struct mlx4_en_priv *priv = netdev_priv(dev);
1027 struct mlx4_en_dev *mdev = priv->mdev;
1028 int port_up;
1029 int err = 0;
1030
1031 if (channel->other_count || channel->combined_count ||
1032 channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
1033 channel->rx_count > MAX_RX_RINGS ||
1034 !channel->tx_count || !channel->rx_count)
1035 return -EINVAL;
1036
1037 mutex_lock(&mdev->state_lock);
1038 if (priv->port_up) {
1039 port_up = 1;
1040 mlx4_en_stop_port(dev);
1041 }
1042
1043 mlx4_en_free_resources(priv);
1044
1045 priv->num_tx_rings_p_up = channel->tx_count;
1046 priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
1047 priv->rx_ring_num = channel->rx_count;
1048
1049 err = mlx4_en_alloc_resources(priv);
1050 if (err) {
1051 en_err(priv, "Failed reallocating port resources\n");
1052 goto out;
1053 }
1054
1055 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1056 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1057
1058 mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
1059
1060 en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
1061 en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
1062
1063 if (port_up) {
1064 err = mlx4_en_start_port(dev);
1065 if (err)
1066 en_err(priv, "Failed starting port\n");
1067 }
1068
1069 err = mlx4_en_moderation_update(priv);
1070
1071out:
1072 mutex_unlock(&mdev->state_lock);
1073 return err;
1074}
1075
1001const struct ethtool_ops mlx4_en_ethtool_ops = { 1076const struct ethtool_ops mlx4_en_ethtool_ops = {
1002 .get_drvinfo = mlx4_en_get_drvinfo, 1077 .get_drvinfo = mlx4_en_get_drvinfo,
1003 .get_settings = mlx4_en_get_settings, 1078 .get_settings = mlx4_en_get_settings,
@@ -1022,6 +1097,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
1022 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, 1097 .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
1023 .get_rxfh_indir = mlx4_en_get_rxfh_indir, 1098 .get_rxfh_indir = mlx4_en_get_rxfh_indir,
1024 .set_rxfh_indir = mlx4_en_set_rxfh_indir, 1099 .set_rxfh_indir = mlx4_en_set_rxfh_indir,
1100 .get_channels = mlx4_en_get_channels,
1101 .set_channels = mlx4_en_set_channels,
1025}; 1102};
1026 1103
1027 1104
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a52922ed85c1..3a2b8c65642d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -250,7 +250,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
250 rounddown_pow_of_two(max_t(int, MIN_RX_RINGS, 250 rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
251 min_t(int, 251 min_t(int,
252 dev->caps.num_comp_vectors, 252 dev->caps.num_comp_vectors,
253 MAX_RX_RINGS))); 253 DEF_RX_RINGS)));
254 } else { 254 } else {
255 mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two( 255 mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
256 min_t(int, dev->caps.comp_pool/ 256 min_t(int, dev->caps.comp_pool/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index edd9cb8d3e1d..7d1287f81a31 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -47,11 +47,11 @@
47#include "mlx4_en.h" 47#include "mlx4_en.h"
48#include "en_port.h" 48#include "en_port.h"
49 49
50static int mlx4_en_setup_tc(struct net_device *dev, u8 up) 50int mlx4_en_setup_tc(struct net_device *dev, u8 up)
51{ 51{
52 struct mlx4_en_priv *priv = netdev_priv(dev); 52 struct mlx4_en_priv *priv = netdev_priv(dev);
53 int i; 53 int i;
54 unsigned int q, offset = 0; 54 unsigned int offset = 0;
55 55
56 if (up && up != MLX4_EN_NUM_UP) 56 if (up && up != MLX4_EN_NUM_UP)
57 return -EINVAL; 57 return -EINVAL;
@@ -59,10 +59,9 @@ static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
59 netdev_set_num_tc(dev, up); 59 netdev_set_num_tc(dev, up);
60 60
61 /* Partition Tx queues evenly amongst UP's */ 61 /* Partition Tx queues evenly amongst UP's */
62 q = priv->tx_ring_num / up;
63 for (i = 0; i < up; i++) { 62 for (i = 0; i < up; i++) {
64 netdev_set_tc_queue(dev, i, q, offset); 63 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
65 offset += q; 64 offset += priv->num_tx_rings_p_up;
66 } 65 }
67 66
68 return 0; 67 return 0;
@@ -870,7 +869,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
870 /* If we haven't received a specific coalescing setting 869 /* If we haven't received a specific coalescing setting
871 * (module param), we set the moderation parameters as follows: 870 * (module param), we set the moderation parameters as follows:
872 * - moder_cnt is set to the number of mtu sized packets to 871 * - moder_cnt is set to the number of mtu sized packets to
873 * satisfy our coelsing target. 872 * satisfy our coalescing target.
874 * - moder_time is set to a fixed value. 873 * - moder_time is set to a fixed value.
875 */ 874 */
876 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 875 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
@@ -1114,7 +1113,7 @@ int mlx4_en_start_port(struct net_device *dev)
1114 /* Configure ring */ 1113 /* Configure ring */
1115 tx_ring = &priv->tx_ring[i]; 1114 tx_ring = &priv->tx_ring[i];
1116 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1115 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1117 i / priv->mdev->profile.num_tx_rings_p_up); 1116 i / priv->num_tx_rings_p_up);
1118 if (err) { 1117 if (err) {
1119 en_err(priv, "Failed allocating Tx ring\n"); 1118 en_err(priv, "Failed allocating Tx ring\n");
1120 mlx4_en_deactivate_cq(priv, cq); 1119 mlx4_en_deactivate_cq(priv, cq);
@@ -1564,10 +1563,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1564 int err; 1563 int err;
1565 1564
1566 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), 1565 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
1567 prof->tx_ring_num, prof->rx_ring_num); 1566 MAX_TX_RINGS, MAX_RX_RINGS);
1568 if (dev == NULL) 1567 if (dev == NULL)
1569 return -ENOMEM; 1568 return -ENOMEM;
1570 1569
1570 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
1571 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
1572
1571 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 1573 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
1572 dev->dev_id = port - 1; 1574 dev->dev_id = port - 1;
1573 1575
@@ -1586,15 +1588,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1586 priv->flags = prof->flags; 1588 priv->flags = prof->flags;
1587 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | 1589 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
1588 MLX4_WQE_CTRL_SOLICITED); 1590 MLX4_WQE_CTRL_SOLICITED);
1591 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
1589 priv->tx_ring_num = prof->tx_ring_num; 1592 priv->tx_ring_num = prof->tx_ring_num;
1590 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * 1593
1591 priv->tx_ring_num, GFP_KERNEL); 1594 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
1595 GFP_KERNEL);
1592 if (!priv->tx_ring) { 1596 if (!priv->tx_ring) {
1593 err = -ENOMEM; 1597 err = -ENOMEM;
1594 goto out; 1598 goto out;
1595 } 1599 }
1596 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * priv->tx_ring_num, 1600 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS,
1597 GFP_KERNEL); 1601 GFP_KERNEL);
1598 if (!priv->tx_cq) { 1602 if (!priv->tx_cq) {
1599 err = -ENOMEM; 1603 err = -ENOMEM;
1600 goto out; 1604 goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 5aba5ecdf1e2..f76c9671f362 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -630,7 +630,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
630 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && 630 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
631 (cqe->checksum == cpu_to_be16(0xffff))) { 631 (cqe->checksum == cpu_to_be16(0xffff))) {
632 ring->csum_ok++; 632 ring->csum_ok++;
633 /* This packet is eligible for LRO if it is: 633 /* This packet is eligible for GRO if it is:
634 * - DIX Ethernet (type interpretation) 634 * - DIX Ethernet (type interpretation)
635 * - TCP/IP (v4) 635 * - TCP/IP (v4)
636 * - without IP options 636 * - without IP options
@@ -667,7 +667,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
667 goto next; 667 goto next;
668 } 668 }
669 669
670 /* LRO not possible, complete processing here */ 670 /* GRO not possible, complete processing here */
671 ip_summed = CHECKSUM_UNNECESSARY; 671 ip_summed = CHECKSUM_UNNECESSARY;
672 } else { 672 } else {
673 ip_summed = CHECKSUM_NONE; 673 ip_summed = CHECKSUM_NONE;
@@ -710,11 +710,8 @@ next:
710 ++cq->mcq.cons_index; 710 ++cq->mcq.cons_index;
711 index = (cq->mcq.cons_index) & ring->size_mask; 711 index = (cq->mcq.cons_index) & ring->size_mask;
712 cqe = &cq->buf[index]; 712 cqe = &cq->buf[index];
713 if (++polled == budget) { 713 if (++polled == budget)
714 /* We are here because we reached the NAPI budget -
715 * flush only pending LRO sessions */
716 goto out; 714 goto out;
717 }
718 } 715 }
719 716
720out: 717out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index b35094c590ba..1f571d009155 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -523,7 +523,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
523u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 523u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
524{ 524{
525 struct mlx4_en_priv *priv = netdev_priv(dev); 525 struct mlx4_en_priv *priv = netdev_priv(dev);
526 u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up; 526 u16 rings_p_up = priv->num_tx_rings_p_up;
527 u8 up = 0; 527 u8 up = 0;
528 528
529 if (dev->num_tc) 529 if (dev->num_tc)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2aa80afd98d2..200cc0ec8052 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
98#define HCA_GLOBAL_CAP_MASK 0 98#define HCA_GLOBAL_CAP_MASK 0
99#define PF_CONTEXT_BEHAVIOUR_MASK 0 99#define PF_CONTEXT_BEHAVIOUR_MASK 0
100 100
101static char mlx4_version[] __devinitdata = 101static char mlx4_version[] =
102 DRV_NAME ": Mellanox ConnectX core driver v" 102 DRV_NAME ": Mellanox ConnectX core driver v"
103 DRV_VERSION " (" DRV_RELDATE ")\n"; 103 DRV_VERSION " (" DRV_RELDATE ")\n";
104 104
@@ -2224,8 +2224,7 @@ err_disable_pdev:
2224 return err; 2224 return err;
2225} 2225}
2226 2226
2227static int __devinit mlx4_init_one(struct pci_dev *pdev, 2227static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2228 const struct pci_device_id *id)
2229{ 2228{
2230 printk_once(KERN_INFO "%s", mlx4_version); 2229 printk_once(KERN_INFO "%s", mlx4_version);
2231 2230
@@ -2391,7 +2390,7 @@ static struct pci_driver mlx4_driver = {
2391 .name = DRV_NAME, 2390 .name = DRV_NAME,
2392 .id_table = mlx4_pci_table, 2391 .id_table = mlx4_pci_table,
2393 .probe = mlx4_init_one, 2392 .probe = mlx4_init_one,
2394 .remove = __devexit_p(mlx4_remove_one), 2393 .remove = mlx4_remove_one,
2395 .err_handler = &mlx4_err_handler, 2394 .err_handler = &mlx4_err_handler,
2396}; 2395};
2397 2396
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9d27e42264e2..334ec483480b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -67,7 +67,8 @@
67 67
68#define MLX4_EN_PAGE_SHIFT 12 68#define MLX4_EN_PAGE_SHIFT 12
69#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 69#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
70#define MAX_RX_RINGS 16 70#define DEF_RX_RINGS 16
71#define MAX_RX_RINGS 128
71#define MIN_RX_RINGS 4 72#define MIN_RX_RINGS 4
72#define TXBB_SIZE 64 73#define TXBB_SIZE 64
73#define HEADROOM (2048 / TXBB_SIZE + 1) 74#define HEADROOM (2048 / TXBB_SIZE + 1)
@@ -95,8 +96,6 @@
95#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384) 96#define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
96#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE) 97#define MLX4_EN_ALLOC_ORDER get_order(MLX4_EN_ALLOC_SIZE)
97 98
98#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
99
100/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU 99/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
101 * and 4K allocations) */ 100 * and 4K allocations) */
102enum { 101enum {
@@ -120,13 +119,15 @@ enum {
120#define MLX4_EN_NUM_UP 8 119#define MLX4_EN_NUM_UP 8
121#define MLX4_EN_DEF_TX_RING_SIZE 512 120#define MLX4_EN_DEF_TX_RING_SIZE 512
122#define MLX4_EN_DEF_RX_RING_SIZE 1024 121#define MLX4_EN_DEF_RX_RING_SIZE 1024
122#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
123 MLX4_EN_NUM_UP)
123 124
124/* Target number of packets to coalesce with interrupt moderation */ 125/* Target number of packets to coalesce with interrupt moderation */
125#define MLX4_EN_RX_COAL_TARGET 44 126#define MLX4_EN_RX_COAL_TARGET 44
126#define MLX4_EN_RX_COAL_TIME 0x10 127#define MLX4_EN_RX_COAL_TIME 0x10
127 128
128#define MLX4_EN_TX_COAL_PKTS 16 129#define MLX4_EN_TX_COAL_PKTS 16
129#define MLX4_EN_TX_COAL_TIME 0x80 130#define MLX4_EN_TX_COAL_TIME 0x10
130 131
131#define MLX4_EN_RX_RATE_LOW 400000 132#define MLX4_EN_RX_RATE_LOW 400000
132#define MLX4_EN_RX_COAL_TIME_LOW 0 133#define MLX4_EN_RX_COAL_TIME_LOW 0
@@ -290,21 +291,6 @@ struct mlx4_en_rx_ring {
290 unsigned long csum_none; 291 unsigned long csum_none;
291}; 292};
292 293
293
294static inline int mlx4_en_can_lro(__be16 status)
295{
296 return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
297 MLX4_CQE_STATUS_IPV4F |
298 MLX4_CQE_STATUS_IPV6 |
299 MLX4_CQE_STATUS_IPV4OPT |
300 MLX4_CQE_STATUS_TCP |
301 MLX4_CQE_STATUS_UDP |
302 MLX4_CQE_STATUS_IPOK)) ==
303 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
304 MLX4_CQE_STATUS_IPOK |
305 MLX4_CQE_STATUS_TCP);
306}
307
308struct mlx4_en_cq { 294struct mlx4_en_cq {
309 struct mlx4_cq mcq; 295 struct mlx4_cq mcq;
310 struct mlx4_hwq_resources wqres; 296 struct mlx4_hwq_resources wqres;
@@ -493,6 +479,7 @@ struct mlx4_en_priv {
493 u32 flags; 479 u32 flags;
494#define MLX4_EN_FLAG_PROMISC 0x1 480#define MLX4_EN_FLAG_PROMISC 0x1
495#define MLX4_EN_FLAG_MC_PROMISC 0x2 481#define MLX4_EN_FLAG_MC_PROMISC 0x2
482 u8 num_tx_rings_p_up;
496 u32 tx_ring_num; 483 u32 tx_ring_num;
497 u32 rx_ring_num; 484 u32 rx_ring_num;
498 u32 rx_skb_size; 485 u32 rx_skb_size;
@@ -613,6 +600,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
613extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; 600extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
614#endif 601#endif
615 602
603int mlx4_en_setup_tc(struct net_device *dev, u8 up);
604
616#ifdef CONFIG_RFS_ACCEL 605#ifdef CONFIG_RFS_ACCEL
617void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 606void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
618 struct mlx4_en_rx_ring *rx_ring); 607 struct mlx4_en_rx_ring *rx_ring);
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index dccae1d1743a..07a6ebc47c92 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1249,9 +1249,6 @@ ks8695_open(struct net_device *ndev)
1249 struct ks8695_priv *ksp = netdev_priv(ndev); 1249 struct ks8695_priv *ksp = netdev_priv(ndev);
1250 int ret; 1250 int ret;
1251 1251
1252 if (!is_valid_ether_addr(ndev->dev_addr))
1253 return -EADDRNOTAVAIL;
1254
1255 ks8695_reset(ksp); 1252 ks8695_reset(ksp);
1256 1253
1257 ks8695_update_mac(ksp); 1254 ks8695_update_mac(ksp);
@@ -1277,7 +1274,7 @@ ks8695_open(struct net_device *ndev)
1277 * This initialises the LAN switch in the KS8695 to a known-good 1274 * This initialises the LAN switch in the KS8695 to a known-good
1278 * set of defaults. 1275 * set of defaults.
1279 */ 1276 */
1280static void __devinit 1277static void
1281ks8695_init_switch(struct ks8695_priv *ksp) 1278ks8695_init_switch(struct ks8695_priv *ksp)
1282{ 1279{
1283 u32 ctrl; 1280 u32 ctrl;
@@ -1305,7 +1302,7 @@ ks8695_init_switch(struct ks8695_priv *ksp)
1305 * This initialises a KS8695's WAN phy to sensible values for 1302 * This initialises a KS8695's WAN phy to sensible values for
1306 * autonegotiation etc. 1303 * autonegotiation etc.
1307 */ 1304 */
1308static void __devinit 1305static void
1309ks8695_init_wan_phy(struct ks8695_priv *ksp) 1306ks8695_init_wan_phy(struct ks8695_priv *ksp)
1310{ 1307{
1311 u32 ctrl; 1308 u32 ctrl;
@@ -1349,7 +1346,7 @@ static const struct net_device_ops ks8695_netdev_ops = {
1349 * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan 1346 * wan ports, and an IORESOURCE_IRQ for the link IRQ for the wan
1350 * port. 1347 * port.
1351 */ 1348 */
1352static int __devinit 1349static int
1353ks8695_probe(struct platform_device *pdev) 1350ks8695_probe(struct platform_device *pdev)
1354{ 1351{
1355 struct ks8695_priv *ksp; 1352 struct ks8695_priv *ksp;
@@ -1597,7 +1594,7 @@ ks8695_drv_resume(struct platform_device *pdev)
1597 * 1594 *
1598 * This unregisters and releases a KS8695 ethernet device. 1595 * This unregisters and releases a KS8695 ethernet device.
1599 */ 1596 */
1600static int __devexit 1597static int
1601ks8695_drv_remove(struct platform_device *pdev) 1598ks8695_drv_remove(struct platform_device *pdev)
1602{ 1599{
1603 struct net_device *ndev = platform_get_drvdata(pdev); 1600 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -1620,7 +1617,7 @@ static struct platform_driver ks8695_driver = {
1620 .owner = THIS_MODULE, 1617 .owner = THIS_MODULE,
1621 }, 1618 },
1622 .probe = ks8695_probe, 1619 .probe = ks8695_probe,
1623 .remove = __devexit_p(ks8695_drv_remove), 1620 .remove = ks8695_drv_remove,
1624 .suspend = ks8695_drv_suspend, 1621 .suspend = ks8695_drv_suspend,
1625 .resume = ks8695_drv_resume, 1622 .resume = ks8695_drv_resume,
1626}; 1623};
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 24fb049ac2f2..b71eb39ab448 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1141,7 +1141,7 @@ static const struct ethtool_ops ks8842_ethtool_ops = {
1141 .get_link = ethtool_op_get_link, 1141 .get_link = ethtool_op_get_link,
1142}; 1142};
1143 1143
1144static int __devinit ks8842_probe(struct platform_device *pdev) 1144static int ks8842_probe(struct platform_device *pdev)
1145{ 1145{
1146 int err = -ENOMEM; 1146 int err = -ENOMEM;
1147 struct resource *iomem; 1147 struct resource *iomem;
@@ -1240,7 +1240,7 @@ err_mem_region:
1240 return err; 1240 return err;
1241} 1241}
1242 1242
1243static int __devexit ks8842_remove(struct platform_device *pdev) 1243static int ks8842_remove(struct platform_device *pdev)
1244{ 1244{
1245 struct net_device *netdev = platform_get_drvdata(pdev); 1245 struct net_device *netdev = platform_get_drvdata(pdev);
1246 struct ks8842_adapter *adapter = netdev_priv(netdev); 1246 struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1262,7 +1262,7 @@ static struct platform_driver ks8842_platform_driver = {
1262 .owner = THIS_MODULE, 1262 .owner = THIS_MODULE,
1263 }, 1263 },
1264 .probe = ks8842_probe, 1264 .probe = ks8842_probe,
1265 .remove = __devexit_p(ks8842_remove), 1265 .remove = ks8842_remove,
1266}; 1266};
1267 1267
1268module_platform_driver(ks8842_platform_driver); 1268module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 1540ebeb8669..286816a4e783 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1415,7 +1415,7 @@ static int ks8851_resume(struct spi_device *spi)
1415#define ks8851_resume NULL 1415#define ks8851_resume NULL
1416#endif 1416#endif
1417 1417
1418static int __devinit ks8851_probe(struct spi_device *spi) 1418static int ks8851_probe(struct spi_device *spi)
1419{ 1419{
1420 struct net_device *ndev; 1420 struct net_device *ndev;
1421 struct ks8851_net *ks; 1421 struct ks8851_net *ks;
@@ -1534,7 +1534,7 @@ err_irq:
1534 return ret; 1534 return ret;
1535} 1535}
1536 1536
1537static int __devexit ks8851_remove(struct spi_device *spi) 1537static int ks8851_remove(struct spi_device *spi)
1538{ 1538{
1539 struct ks8851_net *priv = dev_get_drvdata(&spi->dev); 1539 struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
1540 1540
@@ -1554,7 +1554,7 @@ static struct spi_driver ks8851_driver = {
1554 .owner = THIS_MODULE, 1554 .owner = THIS_MODULE,
1555 }, 1555 },
1556 .probe = ks8851_probe, 1556 .probe = ks8851_probe,
1557 .remove = __devexit_p(ks8851_remove), 1557 .remove = ks8851_remove,
1558 .suspend = ks8851_suspend, 1558 .suspend = ks8851_suspend,
1559 .resume = ks8851_resume, 1559 .resume = ks8851_resume,
1560}; 1560};
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 38529edfe350..ef8f9f92e547 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1506,7 +1506,7 @@ static int ks_hw_init(struct ks_net *ks)
1506} 1506}
1507 1507
1508 1508
1509static int __devinit ks8851_probe(struct platform_device *pdev) 1509static int ks8851_probe(struct platform_device *pdev)
1510{ 1510{
1511 int err = -ENOMEM; 1511 int err = -ENOMEM;
1512 struct resource *io_d, *io_c; 1512 struct resource *io_d, *io_c;
@@ -1641,7 +1641,7 @@ err_mem_region:
1641 return err; 1641 return err;
1642} 1642}
1643 1643
1644static int __devexit ks8851_remove(struct platform_device *pdev) 1644static int ks8851_remove(struct platform_device *pdev)
1645{ 1645{
1646 struct net_device *netdev = platform_get_drvdata(pdev); 1646 struct net_device *netdev = platform_get_drvdata(pdev);
1647 struct ks_net *ks = netdev_priv(netdev); 1647 struct ks_net *ks = netdev_priv(netdev);
@@ -1663,7 +1663,7 @@ static struct platform_driver ks8851_platform_driver = {
1663 .owner = THIS_MODULE, 1663 .owner = THIS_MODULE,
1664 }, 1664 },
1665 .probe = ks8851_probe, 1665 .probe = ks8851_probe,
1666 .remove = __devexit_p(ks8851_remove), 1666 .remove = ks8851_remove,
1667}; 1667};
1668 1668
1669module_platform_driver(ks8851_platform_driver); 1669module_platform_driver(ks8851_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 69e01977a1dd..093d594435e1 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1487,7 +1487,7 @@ struct dev_priv {
1487#define DRV_VERSION "1.0.0" 1487#define DRV_VERSION "1.0.0"
1488#define DRV_RELDATE "Feb 8, 2010" 1488#define DRV_RELDATE "Feb 8, 2010"
1489 1489
1490static char version[] __devinitdata = 1490static char version[] =
1491 "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")"; 1491 "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
1492 1492
1493static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 }; 1493static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
@@ -6919,8 +6919,7 @@ static void read_other_addr(struct ksz_hw *hw)
6919#define PCI_VENDOR_ID_MICREL_KS 0x16c6 6919#define PCI_VENDOR_ID_MICREL_KS 0x16c6
6920#endif 6920#endif
6921 6921
6922static int __devinit pcidev_init(struct pci_dev *pdev, 6922static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
6923 const struct pci_device_id *id)
6924{ 6923{
6925 struct net_device *dev; 6924 struct net_device *dev;
6926 struct dev_priv *priv; 6925 struct dev_priv *priv;
@@ -7243,18 +7242,7 @@ static struct pci_driver pci_device_driver = {
7243 .remove = pcidev_exit 7242 .remove = pcidev_exit
7244}; 7243};
7245 7244
7246static int __init ksz884x_init_module(void) 7245module_pci_driver(pci_device_driver);
7247{
7248 return pci_register_driver(&pci_device_driver);
7249}
7250
7251static void __exit ksz884x_cleanup_module(void)
7252{
7253 pci_unregister_driver(&pci_device_driver);
7254}
7255
7256module_init(ksz884x_init_module);
7257module_exit(ksz884x_cleanup_module);
7258 7246
7259MODULE_DESCRIPTION("KSZ8841/2 PCI network driver"); 7247MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
7260MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>"); 7248MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 6118bdad244f..a99456c3dd87 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1541,7 +1541,7 @@ static const struct net_device_ops enc28j60_netdev_ops = {
1541 .ndo_validate_addr = eth_validate_addr, 1541 .ndo_validate_addr = eth_validate_addr,
1542}; 1542};
1543 1543
1544static int __devinit enc28j60_probe(struct spi_device *spi) 1544static int enc28j60_probe(struct spi_device *spi)
1545{ 1545{
1546 struct net_device *dev; 1546 struct net_device *dev;
1547 struct enc28j60_net *priv; 1547 struct enc28j60_net *priv;
@@ -1617,7 +1617,7 @@ error_alloc:
1617 return ret; 1617 return ret;
1618} 1618}
1619 1619
1620static int __devexit enc28j60_remove(struct spi_device *spi) 1620static int enc28j60_remove(struct spi_device *spi)
1621{ 1621{
1622 struct enc28j60_net *priv = dev_get_drvdata(&spi->dev); 1622 struct enc28j60_net *priv = dev_get_drvdata(&spi->dev);
1623 1623
@@ -1637,7 +1637,7 @@ static struct spi_driver enc28j60_driver = {
1637 .owner = THIS_MODULE, 1637 .owner = THIS_MODULE,
1638 }, 1638 },
1639 .probe = enc28j60_probe, 1639 .probe = enc28j60_probe,
1640 .remove = __devexit_p(enc28j60_remove), 1640 .remove = enc28j60_remove,
1641}; 1641};
1642 1642
1643static int __init enc28j60_init(void) 1643static int __init enc28j60_init(void)
diff --git a/drivers/net/ethernet/myricom/Kconfig b/drivers/net/ethernet/myricom/Kconfig
index 540f0c6fc160..3932d081fa21 100644
--- a/drivers/net/ethernet/myricom/Kconfig
+++ b/drivers/net/ethernet/myricom/Kconfig
@@ -23,7 +23,6 @@ config MYRI10GE
23 depends on PCI && INET 23 depends on PCI && INET
24 select FW_LOADER 24 select FW_LOADER
25 select CRC32 25 select CRC32
26 select INET_LRO
27 ---help--- 26 ---help---
28 This driver supports Myricom Myri-10G Dual Protocol interface in 27 This driver supports Myricom Myri-10G Dual Protocol interface in
29 Ethernet mode. If the eeprom on your board is not recent enough, 28 Ethernet mode. If the eeprom on your board is not recent enough,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 83516e3369c9..f8408d6e961c 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -50,7 +50,6 @@
50#include <linux/etherdevice.h> 50#include <linux/etherdevice.h>
51#include <linux/if_ether.h> 51#include <linux/if_ether.h>
52#include <linux/if_vlan.h> 52#include <linux/if_vlan.h>
53#include <linux/inet_lro.h>
54#include <linux/dca.h> 53#include <linux/dca.h>
55#include <linux/ip.h> 54#include <linux/ip.h>
56#include <linux/inet.h> 55#include <linux/inet.h>
@@ -96,8 +95,6 @@ MODULE_LICENSE("Dual BSD/GPL");
96 95
97#define MYRI10GE_EEPROM_STRINGS_SIZE 256 96#define MYRI10GE_EEPROM_STRINGS_SIZE 256
98#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2) 97#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
99#define MYRI10GE_MAX_LRO_DESCRIPTORS 8
100#define MYRI10GE_LRO_MAX_PKTS 64
101 98
102#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) 99#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
103#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff 100#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
@@ -165,8 +162,6 @@ struct myri10ge_rx_done {
165 dma_addr_t bus; 162 dma_addr_t bus;
166 int cnt; 163 int cnt;
167 int idx; 164 int idx;
168 struct net_lro_mgr lro_mgr;
169 struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
170}; 165};
171 166
172struct myri10ge_slice_netstats { 167struct myri10ge_slice_netstats {
@@ -338,11 +333,6 @@ static int myri10ge_debug = -1; /* defaults above */
338module_param(myri10ge_debug, int, 0); 333module_param(myri10ge_debug, int, 0);
339MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); 334MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
340 335
341static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
342module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
343MODULE_PARM_DESC(myri10ge_lro_max_pkts,
344 "Number of LRO packets to be aggregated");
345
346static int myri10ge_fill_thresh = 256; 336static int myri10ge_fill_thresh = 256;
347module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); 337module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
348MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed"); 338MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
@@ -1197,36 +1187,6 @@ static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
1197 } 1187 }
1198} 1188}
1199 1189
1200static inline void
1201myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
1202 struct skb_frag_struct *rx_frags, int len, int hlen)
1203{
1204 struct skb_frag_struct *skb_frags;
1205
1206 skb->len = skb->data_len = len;
1207 /* attach the page(s) */
1208
1209 skb_frags = skb_shinfo(skb)->frags;
1210 while (len > 0) {
1211 memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
1212 len -= skb_frag_size(rx_frags);
1213 skb_frags++;
1214 rx_frags++;
1215 skb_shinfo(skb)->nr_frags++;
1216 }
1217
1218 /* pskb_may_pull is not available in irq context, but
1219 * skb_pull() (for ether_pad and eth_type_trans()) requires
1220 * the beginning of the packet in skb_headlen(), move it
1221 * manually */
1222 skb_copy_to_linear_data(skb, va, hlen);
1223 skb_shinfo(skb)->frags[0].page_offset += hlen;
1224 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hlen);
1225 skb->data_len -= hlen;
1226 skb->tail += hlen;
1227 skb_pull(skb, MXGEFW_PAD);
1228}
1229
1230static void 1190static void
1231myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, 1191myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1232 int bytes, int watchdog) 1192 int bytes, int watchdog)
@@ -1304,18 +1264,50 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
1304 } 1264 }
1305} 1265}
1306 1266
1307#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a 1267/*
1308 * page into an skb */ 1268 * GRO does not support acceleration of tagged vlan frames, and
1269 * this NIC does not support vlan tag offload, so we must pop
1270 * the tag ourselves to be able to achieve GRO performance that
1271 * is comparable to LRO.
1272 */
1273
1274static inline void
1275myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
1276{
1277 u8 *va;
1278 struct vlan_ethhdr *veh;
1279 struct skb_frag_struct *frag;
1280 __wsum vsum;
1281
1282 va = addr;
1283 va += MXGEFW_PAD;
1284 veh = (struct vlan_ethhdr *)va;
1285 if ((dev->features & NETIF_F_HW_VLAN_RX) == NETIF_F_HW_VLAN_RX &&
1286 veh->h_vlan_proto == htons(ETH_P_8021Q)) {
1287 /* fixup csum if needed */
1288 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1289 vsum = csum_partial(va + ETH_HLEN, VLAN_HLEN, 0);
1290 skb->csum = csum_sub(skb->csum, vsum);
1291 }
1292 /* pop tag */
1293 __vlan_hwaccel_put_tag(skb, ntohs(veh->h_vlan_TCI));
1294 memmove(va + VLAN_HLEN, va, 2 * ETH_ALEN);
1295 skb->len -= VLAN_HLEN;
1296 skb->data_len -= VLAN_HLEN;
1297 frag = skb_shinfo(skb)->frags;
1298 frag->page_offset += VLAN_HLEN;
1299 skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
1300 }
1301}
1309 1302
1310static inline int 1303static inline int
1311myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum, 1304myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
1312 bool lro_enabled)
1313{ 1305{
1314 struct myri10ge_priv *mgp = ss->mgp; 1306 struct myri10ge_priv *mgp = ss->mgp;
1315 struct sk_buff *skb; 1307 struct sk_buff *skb;
1316 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; 1308 struct skb_frag_struct *rx_frags;
1317 struct myri10ge_rx_buf *rx; 1309 struct myri10ge_rx_buf *rx;
1318 int i, idx, hlen, remainder, bytes; 1310 int i, idx, remainder, bytes;
1319 struct pci_dev *pdev = mgp->pdev; 1311 struct pci_dev *pdev = mgp->pdev;
1320 struct net_device *dev = mgp->dev; 1312 struct net_device *dev = mgp->dev;
1321 u8 *va; 1313 u8 *va;
@@ -1332,67 +1324,48 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
1332 idx = rx->cnt & rx->mask; 1324 idx = rx->cnt & rx->mask;
1333 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; 1325 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
1334 prefetch(va); 1326 prefetch(va);
1327
1328 skb = napi_get_frags(&ss->napi);
1329 if (unlikely(skb == NULL)) {
1330 ss->stats.rx_dropped++;
1331 for (i = 0, remainder = len; remainder > 0; i++) {
1332 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1333 put_page(rx->info[idx].page);
1334 rx->cnt++;
1335 idx = rx->cnt & rx->mask;
1336 remainder -= MYRI10GE_ALLOC_SIZE;
1337 }
1338 return 0;
1339 }
1340 rx_frags = skb_shinfo(skb)->frags;
1335 /* Fill skb_frag_struct(s) with data from our receive */ 1341 /* Fill skb_frag_struct(s) with data from our receive */
1336 for (i = 0, remainder = len; remainder > 0; i++) { 1342 for (i = 0, remainder = len; remainder > 0; i++) {
1337 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); 1343 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1338 __skb_frag_set_page(&rx_frags[i], rx->info[idx].page); 1344 skb_fill_page_desc(skb, i, rx->info[idx].page,
1339 rx_frags[i].page_offset = rx->info[idx].page_offset; 1345 rx->info[idx].page_offset,
1340 if (remainder < MYRI10GE_ALLOC_SIZE) 1346 remainder < MYRI10GE_ALLOC_SIZE ?
1341 skb_frag_size_set(&rx_frags[i], remainder); 1347 remainder : MYRI10GE_ALLOC_SIZE);
1342 else
1343 skb_frag_size_set(&rx_frags[i], MYRI10GE_ALLOC_SIZE);
1344 rx->cnt++; 1348 rx->cnt++;
1345 idx = rx->cnt & rx->mask; 1349 idx = rx->cnt & rx->mask;
1346 remainder -= MYRI10GE_ALLOC_SIZE; 1350 remainder -= MYRI10GE_ALLOC_SIZE;
1347 } 1351 }
1348 1352
1349 if (lro_enabled) { 1353 /* remove padding */
1350 rx_frags[0].page_offset += MXGEFW_PAD; 1354 rx_frags[0].page_offset += MXGEFW_PAD;
1351 skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD); 1355 rx_frags[0].size -= MXGEFW_PAD;
1352 len -= MXGEFW_PAD; 1356 len -= MXGEFW_PAD;
1353 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
1354 /* opaque, will come back in get_frag_header */
1355 len, len,
1356 (void *)(__force unsigned long)csum, csum);
1357
1358 return 1;
1359 }
1360
1361 hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
1362
1363 /* allocate an skb to attach the page(s) to. This is done
1364 * after trying LRO, so as to avoid skb allocation overheads */
1365
1366 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
1367 if (unlikely(skb == NULL)) {
1368 ss->stats.rx_dropped++;
1369 do {
1370 i--;
1371 __skb_frag_unref(&rx_frags[i]);
1372 } while (i != 0);
1373 return 0;
1374 }
1375 1357
1376 /* Attach the pages to the skb, and trim off any padding */ 1358 skb->len = len;
1377 myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen); 1359 skb->data_len = len;
1378 if (skb_frag_size(&skb_shinfo(skb)->frags[0]) <= 0) { 1360 skb->truesize += len;
1379 skb_frag_unref(skb, 0); 1361 if (dev->features & NETIF_F_RXCSUM) {
1380 skb_shinfo(skb)->nr_frags = 0; 1362 skb->ip_summed = CHECKSUM_COMPLETE;
1381 } else { 1363 skb->csum = csum;
1382 skb->truesize += bytes * skb_shinfo(skb)->nr_frags;
1383 } 1364 }
1384 skb->protocol = eth_type_trans(skb, dev); 1365 myri10ge_vlan_rx(mgp->dev, va, skb);
1385 skb_record_rx_queue(skb, ss - &mgp->ss[0]); 1366 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1386 1367
1387 if (dev->features & NETIF_F_RXCSUM) { 1368 napi_gro_frags(&ss->napi);
1388 if ((skb->protocol == htons(ETH_P_IP)) ||
1389 (skb->protocol == htons(ETH_P_IPV6))) {
1390 skb->csum = csum;
1391 skb->ip_summed = CHECKSUM_COMPLETE;
1392 } else
1393 myri10ge_vlan_ip_csum(skb, csum);
1394 }
1395 netif_receive_skb(skb);
1396 return 1; 1369 return 1;
1397} 1370}
1398 1371
@@ -1480,18 +1453,11 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1480 u16 length; 1453 u16 length;
1481 __wsum checksum; 1454 __wsum checksum;
1482 1455
1483 /*
1484 * Prevent compiler from generating more than one ->features memory
1485 * access to avoid theoretical race condition with functions that
1486 * change NETIF_F_LRO flag at runtime.
1487 */
1488 bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
1489
1490 while (rx_done->entry[idx].length != 0 && work_done < budget) { 1456 while (rx_done->entry[idx].length != 0 && work_done < budget) {
1491 length = ntohs(rx_done->entry[idx].length); 1457 length = ntohs(rx_done->entry[idx].length);
1492 rx_done->entry[idx].length = 0; 1458 rx_done->entry[idx].length = 0;
1493 checksum = csum_unfold(rx_done->entry[idx].checksum); 1459 checksum = csum_unfold(rx_done->entry[idx].checksum);
1494 rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled); 1460 rx_ok = myri10ge_rx_done(ss, length, checksum);
1495 rx_packets += rx_ok; 1461 rx_packets += rx_ok;
1496 rx_bytes += rx_ok * (unsigned long)length; 1462 rx_bytes += rx_ok * (unsigned long)length;
1497 cnt++; 1463 cnt++;
@@ -1503,9 +1469,6 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1503 ss->stats.rx_packets += rx_packets; 1469 ss->stats.rx_packets += rx_packets;
1504 ss->stats.rx_bytes += rx_bytes; 1470 ss->stats.rx_bytes += rx_bytes;
1505 1471
1506 if (lro_enabled)
1507 lro_flush_all(&rx_done->lro_mgr);
1508
1509 /* restock receive rings if needed */ 1472 /* restock receive rings if needed */
1510 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh) 1473 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
1511 myri10ge_alloc_rx_pages(mgp, &ss->rx_small, 1474 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
@@ -1779,7 +1742,6 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1779 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done", 1742 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1780 "rx_small_cnt", "rx_big_cnt", 1743 "rx_small_cnt", "rx_big_cnt",
1781 "wake_queue", "stop_queue", "tx_linearized", 1744 "wake_queue", "stop_queue", "tx_linearized",
1782 "LRO aggregated", "LRO flushed", "LRO avg aggr", "LRO no_desc",
1783}; 1745};
1784 1746
1785#define MYRI10GE_NET_STATS_LEN 21 1747#define MYRI10GE_NET_STATS_LEN 21
@@ -1880,14 +1842,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1880 data[i++] = (unsigned int)ss->tx.wake_queue; 1842 data[i++] = (unsigned int)ss->tx.wake_queue;
1881 data[i++] = (unsigned int)ss->tx.stop_queue; 1843 data[i++] = (unsigned int)ss->tx.stop_queue;
1882 data[i++] = (unsigned int)ss->tx.linearized; 1844 data[i++] = (unsigned int)ss->tx.linearized;
1883 data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
1884 data[i++] = ss->rx_done.lro_mgr.stats.flushed;
1885 if (ss->rx_done.lro_mgr.stats.flushed)
1886 data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
1887 ss->rx_done.lro_mgr.stats.flushed;
1888 else
1889 data[i++] = 0;
1890 data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
1891 } 1845 }
1892} 1846}
1893 1847
@@ -1931,7 +1885,7 @@ static int myri10ge_led(struct myri10ge_priv *mgp, int on)
1931 } 1885 }
1932 if (!on) 1886 if (!on)
1933 pattern = swab32(readl(mgp->sram + pattern_off + 4)); 1887 pattern = swab32(readl(mgp->sram + pattern_off + 4));
1934 writel(htonl(pattern), mgp->sram + pattern_off); 1888 writel(swab32(pattern), mgp->sram + pattern_off);
1935 return 0; 1889 return 0;
1936} 1890}
1937 1891
@@ -2271,67 +2225,6 @@ static void myri10ge_free_irq(struct myri10ge_priv *mgp)
2271 pci_disable_msix(pdev); 2225 pci_disable_msix(pdev);
2272} 2226}
2273 2227
2274static int
2275myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
2276 void **ip_hdr, void **tcpudp_hdr,
2277 u64 * hdr_flags, void *priv)
2278{
2279 struct ethhdr *eh;
2280 struct vlan_ethhdr *veh;
2281 struct iphdr *iph;
2282 u8 *va = skb_frag_address(frag);
2283 unsigned long ll_hlen;
2284 /* passed opaque through lro_receive_frags() */
2285 __wsum csum = (__force __wsum) (unsigned long)priv;
2286
2287 /* find the mac header, aborting if not IPv4 */
2288
2289 eh = (struct ethhdr *)va;
2290 *mac_hdr = eh;
2291 ll_hlen = ETH_HLEN;
2292 if (eh->h_proto != htons(ETH_P_IP)) {
2293 if (eh->h_proto == htons(ETH_P_8021Q)) {
2294 veh = (struct vlan_ethhdr *)va;
2295 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
2296 return -1;
2297
2298 ll_hlen += VLAN_HLEN;
2299
2300 /*
2301 * HW checksum starts ETH_HLEN bytes into
2302 * frame, so we must subtract off the VLAN
2303 * header's checksum before csum can be used
2304 */
2305 csum = csum_sub(csum, csum_partial(va + ETH_HLEN,
2306 VLAN_HLEN, 0));
2307 } else {
2308 return -1;
2309 }
2310 }
2311 *hdr_flags = LRO_IPV4;
2312
2313 iph = (struct iphdr *)(va + ll_hlen);
2314 *ip_hdr = iph;
2315 if (iph->protocol != IPPROTO_TCP)
2316 return -1;
2317 if (ip_is_fragment(iph))
2318 return -1;
2319 *hdr_flags |= LRO_TCP;
2320 *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
2321
2322 /* verify the IP checksum */
2323 if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl)))
2324 return -1;
2325
2326 /* verify the checksum */
2327 if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr,
2328 ntohs(iph->tot_len) - (iph->ihl << 2),
2329 IPPROTO_TCP, csum)))
2330 return -1;
2331
2332 return 0;
2333}
2334
2335static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice) 2228static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
2336{ 2229{
2337 struct myri10ge_cmd cmd; 2230 struct myri10ge_cmd cmd;
@@ -2401,8 +2294,7 @@ static int myri10ge_open(struct net_device *dev)
2401 struct myri10ge_priv *mgp = netdev_priv(dev); 2294 struct myri10ge_priv *mgp = netdev_priv(dev);
2402 struct myri10ge_cmd cmd; 2295 struct myri10ge_cmd cmd;
2403 int i, status, big_pow2, slice; 2296 int i, status, big_pow2, slice;
2404 u8 *itable; 2297 u8 __iomem *itable;
2405 struct net_lro_mgr *lro_mgr;
2406 2298
2407 if (mgp->running != MYRI10GE_ETH_STOPPED) 2299 if (mgp->running != MYRI10GE_ETH_STOPPED)
2408 return -EBUSY; 2300 return -EBUSY;
@@ -2513,19 +2405,6 @@ static int myri10ge_open(struct net_device *dev)
2513 goto abort_with_rings; 2405 goto abort_with_rings;
2514 } 2406 }
2515 2407
2516 lro_mgr = &ss->rx_done.lro_mgr;
2517 lro_mgr->dev = dev;
2518 lro_mgr->features = LRO_F_NAPI;
2519 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
2520 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2521 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
2522 lro_mgr->lro_arr = ss->rx_done.lro_desc;
2523 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2524 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2525 lro_mgr->frag_align_pad = 2;
2526 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2527 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2528
2529 /* must happen prior to any irq */ 2408 /* must happen prior to any irq */
2530 napi_enable(&(ss)->napi); 2409 napi_enable(&(ss)->napi);
2531 } 2410 }
@@ -2878,7 +2757,7 @@ again:
2878 flags_next |= next_is_first * 2757 flags_next |= next_is_first *
2879 MXGEFW_FLAGS_FIRST; 2758 MXGEFW_FLAGS_FIRST;
2880 rdma_count |= -(chop | next_is_first); 2759 rdma_count |= -(chop | next_is_first);
2881 rdma_count += chop & !next_is_first; 2760 rdma_count += chop & ~next_is_first;
2882 } else if (likely(cum_len_next >= 0)) { /* header ends */ 2761 } else if (likely(cum_len_next >= 0)) { /* header ends */
2883 int small; 2762 int small;
2884 2763
@@ -3143,15 +3022,6 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
3143 return 0; 3022 return 0;
3144} 3023}
3145 3024
3146static netdev_features_t myri10ge_fix_features(struct net_device *dev,
3147 netdev_features_t features)
3148{
3149 if (!(features & NETIF_F_RXCSUM))
3150 features &= ~NETIF_F_LRO;
3151
3152 return features;
3153}
3154
3155static int myri10ge_change_mtu(struct net_device *dev, int new_mtu) 3025static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
3156{ 3026{
3157 struct myri10ge_priv *mgp = netdev_priv(dev); 3027 struct myri10ge_priv *mgp = netdev_priv(dev);
@@ -3878,7 +3748,6 @@ static const struct net_device_ops myri10ge_netdev_ops = {
3878 .ndo_get_stats64 = myri10ge_get_stats, 3748 .ndo_get_stats64 = myri10ge_get_stats,
3879 .ndo_validate_addr = eth_validate_addr, 3749 .ndo_validate_addr = eth_validate_addr,
3880 .ndo_change_mtu = myri10ge_change_mtu, 3750 .ndo_change_mtu = myri10ge_change_mtu,
3881 .ndo_fix_features = myri10ge_fix_features,
3882 .ndo_set_rx_mode = myri10ge_set_multicast_list, 3751 .ndo_set_rx_mode = myri10ge_set_multicast_list,
3883 .ndo_set_mac_address = myri10ge_set_mac_address, 3752 .ndo_set_mac_address = myri10ge_set_mac_address,
3884}; 3753};
@@ -3967,9 +3836,9 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3967 goto abort_with_mtrr; 3836 goto abort_with_mtrr;
3968 } 3837 }
3969 hdr_offset = 3838 hdr_offset =
3970 ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc; 3839 swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
3971 ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs); 3840 ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
3972 mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset)); 3841 mgp->sram_size = swab32(readl(mgp->sram + ss_offset));
3973 if (mgp->sram_size > mgp->board_span || 3842 if (mgp->sram_size > mgp->board_span ||
3974 mgp->sram_size <= MYRI10GE_FW_OFFSET) { 3843 mgp->sram_size <= MYRI10GE_FW_OFFSET) {
3975 dev_err(&pdev->dev, 3844 dev_err(&pdev->dev,
@@ -4018,7 +3887,11 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4018 3887
4019 netdev->netdev_ops = &myri10ge_netdev_ops; 3888 netdev->netdev_ops = &myri10ge_netdev_ops;
4020 netdev->mtu = myri10ge_initial_mtu; 3889 netdev->mtu = myri10ge_initial_mtu;
4021 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM; 3890 netdev->hw_features = mgp->features | NETIF_F_RXCSUM;
3891
3892 /* fake NETIF_F_HW_VLAN_RX for good GRO performance */
3893 netdev->hw_features |= NETIF_F_HW_VLAN_RX;
3894
4022 netdev->features = netdev->hw_features; 3895 netdev->features = netdev->hw_features;
4023 3896
4024 if (dac_enabled) 3897 if (dac_enabled)
diff --git a/drivers/net/ethernet/natsemi/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c
index 3f94ddbf4dc0..923e640d604c 100644
--- a/drivers/net/ethernet/natsemi/ibmlana.c
+++ b/drivers/net/ethernet/natsemi/ibmlana.c
@@ -900,7 +900,7 @@ static short ibmlana_adapter_ids[] __initdata = {
900 0x0000 900 0x0000
901}; 901};
902 902
903static char *ibmlana_adapter_names[] __devinitdata = { 903static char *ibmlana_adapter_names[] = {
904 "IBM LAN Adapter/A", 904 "IBM LAN Adapter/A",
905 NULL 905 NULL
906}; 906};
@@ -916,7 +916,7 @@ static const struct net_device_ops ibmlana_netdev_ops = {
916 .ndo_validate_addr = eth_validate_addr, 916 .ndo_validate_addr = eth_validate_addr,
917}; 917};
918 918
919static int __devinit ibmlana_init_one(struct device *kdev) 919static int ibmlana_init_one(struct device *kdev)
920{ 920{
921 struct mca_device *mdev = to_mca_device(kdev); 921 struct mca_device *mdev = to_mca_device(kdev);
922 struct net_device *dev; 922 struct net_device *dev;
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 95dd39ffb230..b0b361546365 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -117,7 +117,7 @@ static const struct net_device_ops sonic_netdev_ops = {
117 .ndo_set_mac_address = eth_mac_addr, 117 .ndo_set_mac_address = eth_mac_addr,
118}; 118};
119 119
120static int __devinit sonic_probe1(struct net_device *dev) 120static int sonic_probe1(struct net_device *dev)
121{ 121{
122 static unsigned version_printed; 122 static unsigned version_printed;
123 unsigned int silicon_revision; 123 unsigned int silicon_revision;
@@ -220,7 +220,7 @@ out:
220 * Probe for a SONIC ethernet controller on a Mips Jazz board. 220 * Probe for a SONIC ethernet controller on a Mips Jazz board.
221 * Actually probing is superfluous but we're paranoid. 221 * Actually probing is superfluous but we're paranoid.
222 */ 222 */
223static int __devinit jazz_sonic_probe(struct platform_device *pdev) 223static int jazz_sonic_probe(struct platform_device *pdev)
224{ 224{
225 struct net_device *dev; 225 struct net_device *dev;
226 struct sonic_local *lp; 226 struct sonic_local *lp;
@@ -270,7 +270,7 @@ MODULE_ALIAS("platform:jazzsonic");
270 270
271#include "sonic.c" 271#include "sonic.c"
272 272
273static int __devexit jazz_sonic_device_remove (struct platform_device *pdev) 273static int jazz_sonic_device_remove(struct platform_device *pdev)
274{ 274{
275 struct net_device *dev = platform_get_drvdata(pdev); 275 struct net_device *dev = platform_get_drvdata(pdev);
276 struct sonic_local* lp = netdev_priv(dev); 276 struct sonic_local* lp = netdev_priv(dev);
@@ -286,7 +286,7 @@ static int __devexit jazz_sonic_device_remove (struct platform_device *pdev)
286 286
287static struct platform_driver jazz_sonic_driver = { 287static struct platform_driver jazz_sonic_driver = {
288 .probe = jazz_sonic_probe, 288 .probe = jazz_sonic_probe,
289 .remove = __devexit_p(jazz_sonic_device_remove), 289 .remove = jazz_sonic_device_remove,
290 .driver = { 290 .driver = {
291 .name = jazz_sonic_string, 291 .name = jazz_sonic_string,
292 .owner = THIS_MODULE, 292 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index b9680ba5a325..0ffde69c8d01 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -196,7 +196,7 @@ static const struct net_device_ops macsonic_netdev_ops = {
196 .ndo_set_mac_address = eth_mac_addr, 196 .ndo_set_mac_address = eth_mac_addr,
197}; 197};
198 198
199static int __devinit macsonic_init(struct net_device *dev) 199static int macsonic_init(struct net_device *dev)
200{ 200{
201 struct sonic_local* lp = netdev_priv(dev); 201 struct sonic_local* lp = netdev_priv(dev);
202 202
@@ -245,7 +245,7 @@ static int __devinit macsonic_init(struct net_device *dev)
245 memcmp(mac, "\x00\x80\x19", 3) && \ 245 memcmp(mac, "\x00\x80\x19", 3) && \
246 memcmp(mac, "\x00\x05\x02", 3)) 246 memcmp(mac, "\x00\x05\x02", 3))
247 247
248static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev) 248static void mac_onboard_sonic_ethernet_addr(struct net_device *dev)
249{ 249{
250 struct sonic_local *lp = netdev_priv(dev); 250 struct sonic_local *lp = netdev_priv(dev);
251 const int prom_addr = ONBOARD_SONIC_PROM_BASE; 251 const int prom_addr = ONBOARD_SONIC_PROM_BASE;
@@ -309,7 +309,7 @@ static void __devinit mac_onboard_sonic_ethernet_addr(struct net_device *dev)
309 eth_hw_addr_random(dev); 309 eth_hw_addr_random(dev);
310} 310}
311 311
312static int __devinit mac_onboard_sonic_probe(struct net_device *dev) 312static int mac_onboard_sonic_probe(struct net_device *dev)
313{ 313{
314 struct sonic_local* lp = netdev_priv(dev); 314 struct sonic_local* lp = netdev_priv(dev);
315 int sr; 315 int sr;
@@ -420,9 +420,8 @@ static int __devinit mac_onboard_sonic_probe(struct net_device *dev)
420 return macsonic_init(dev); 420 return macsonic_init(dev);
421} 421}
422 422
423static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev, 423static int mac_nubus_sonic_ethernet_addr(struct net_device *dev,
424 unsigned long prom_addr, 424 unsigned long prom_addr, int id)
425 int id)
426{ 425{
427 int i; 426 int i;
428 for(i = 0; i < 6; i++) 427 for(i = 0; i < 6; i++)
@@ -435,7 +434,7 @@ static int __devinit mac_nubus_sonic_ethernet_addr(struct net_device *dev,
435 return 0; 434 return 0;
436} 435}
437 436
438static int __devinit macsonic_ident(struct nubus_dev *ndev) 437static int macsonic_ident(struct nubus_dev *ndev)
439{ 438{
440 if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC && 439 if (ndev->dr_hw == NUBUS_DRHW_ASANTE_LC &&
441 ndev->dr_sw == NUBUS_DRSW_SONIC_LC) 440 ndev->dr_sw == NUBUS_DRSW_SONIC_LC)
@@ -460,7 +459,7 @@ static int __devinit macsonic_ident(struct nubus_dev *ndev)
460 return -1; 459 return -1;
461} 460}
462 461
463static int __devinit mac_nubus_sonic_probe(struct net_device *dev) 462static int mac_nubus_sonic_probe(struct net_device *dev)
464{ 463{
465 static int slots; 464 static int slots;
466 struct nubus_dev* ndev = NULL; 465 struct nubus_dev* ndev = NULL;
@@ -573,7 +572,7 @@ static int __devinit mac_nubus_sonic_probe(struct net_device *dev)
573 return macsonic_init(dev); 572 return macsonic_init(dev);
574} 573}
575 574
576static int __devinit mac_sonic_probe(struct platform_device *pdev) 575static int mac_sonic_probe(struct platform_device *pdev)
577{ 576{
578 struct net_device *dev; 577 struct net_device *dev;
579 struct sonic_local *lp; 578 struct sonic_local *lp;
@@ -619,7 +618,7 @@ MODULE_ALIAS("platform:macsonic");
619 618
620#include "sonic.c" 619#include "sonic.c"
621 620
622static int __devexit mac_sonic_device_remove (struct platform_device *pdev) 621static int mac_sonic_device_remove(struct platform_device *pdev)
623{ 622{
624 struct net_device *dev = platform_get_drvdata(pdev); 623 struct net_device *dev = platform_get_drvdata(pdev);
625 struct sonic_local* lp = netdev_priv(dev); 624 struct sonic_local* lp = netdev_priv(dev);
@@ -634,7 +633,7 @@ static int __devexit mac_sonic_device_remove (struct platform_device *pdev)
634 633
635static struct platform_driver mac_sonic_driver = { 634static struct platform_driver mac_sonic_driver = {
636 .probe = mac_sonic_probe, 635 .probe = mac_sonic_probe,
637 .remove = __devexit_p(mac_sonic_device_remove), 636 .remove = mac_sonic_device_remove,
638 .driver = { 637 .driver = {
639 .name = mac_sonic_string, 638 .name = mac_sonic_string,
640 .owner = THIS_MODULE, 639 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index dbaaa99a0d43..f4ad60c97eae 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -127,7 +127,7 @@ static int full_duplex[MAX_UNITS];
127#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */ 127#define NATSEMI_RX_LIMIT 2046 /* maximum supported by hardware */
128 128
129/* These identify the driver base version and may not be removed. */ 129/* These identify the driver base version and may not be removed. */
130static const char version[] __devinitconst = 130static const char version[] =
131 KERN_INFO DRV_NAME " dp8381x driver, version " 131 KERN_INFO DRV_NAME " dp8381x driver, version "
132 DRV_VERSION ", " DRV_RELDATE "\n" 132 DRV_VERSION ", " DRV_RELDATE "\n"
133 " originally by Donald Becker <becker@scyld.com>\n" 133 " originally by Donald Becker <becker@scyld.com>\n"
@@ -242,7 +242,7 @@ static struct {
242 const char *name; 242 const char *name;
243 unsigned long flags; 243 unsigned long flags;
244 unsigned int eeprom_size; 244 unsigned int eeprom_size;
245} natsemi_pci_info[] __devinitdata = { 245} natsemi_pci_info[] = {
246 { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 }, 246 { "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247 { "NatSemi DP8381[56]", 0, 24 }, 247 { "NatSemi DP8381[56]", 0, 24 },
248}; 248};
@@ -742,7 +742,7 @@ static void move_int_phy(struct net_device *dev, int addr)
742 udelay(1); 742 udelay(1);
743} 743}
744 744
745static void __devinit natsemi_init_media (struct net_device *dev) 745static void natsemi_init_media(struct net_device *dev)
746{ 746{
747 struct netdev_private *np = netdev_priv(dev); 747 struct netdev_private *np = netdev_priv(dev);
748 u32 tmp; 748 u32 tmp;
@@ -797,8 +797,7 @@ static const struct net_device_ops natsemi_netdev_ops = {
797#endif 797#endif
798}; 798};
799 799
800static int __devinit natsemi_probe1 (struct pci_dev *pdev, 800static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
801 const struct pci_device_id *ent)
802{ 801{
803 struct net_device *dev; 802 struct net_device *dev;
804 struct netdev_private *np; 803 struct netdev_private *np;
@@ -3214,7 +3213,7 @@ static int netdev_close(struct net_device *dev)
3214} 3213}
3215 3214
3216 3215
3217static void __devexit natsemi_remove1 (struct pci_dev *pdev) 3216static void natsemi_remove1(struct pci_dev *pdev)
3218{ 3217{
3219 struct net_device *dev = pci_get_drvdata(pdev); 3218 struct net_device *dev = pci_get_drvdata(pdev);
3220 void __iomem * ioaddr = ns_ioaddr(dev); 3219 void __iomem * ioaddr = ns_ioaddr(dev);
@@ -3353,7 +3352,7 @@ static struct pci_driver natsemi_driver = {
3353 .name = DRV_NAME, 3352 .name = DRV_NAME,
3354 .id_table = natsemi_pci_tbl, 3353 .id_table = natsemi_pci_tbl,
3355 .probe = natsemi_probe1, 3354 .probe = natsemi_probe1,
3356 .remove = __devexit_p(natsemi_remove1), 3355 .remove = natsemi_remove1,
3357#ifdef CONFIG_PM 3356#ifdef CONFIG_PM
3358 .suspend = natsemi_suspend, 3357 .suspend = natsemi_suspend,
3359 .resume = natsemi_resume, 3358 .resume = natsemi_resume,
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index d52728b3c436..77c070de621e 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1941,8 +1941,8 @@ static const struct net_device_ops netdev_ops = {
1941 .ndo_tx_timeout = ns83820_tx_timeout, 1941 .ndo_tx_timeout = ns83820_tx_timeout,
1942}; 1942};
1943 1943
1944static int __devinit ns83820_init_one(struct pci_dev *pci_dev, 1944static int ns83820_init_one(struct pci_dev *pci_dev,
1945 const struct pci_device_id *id) 1945 const struct pci_device_id *id)
1946{ 1946{
1947 struct net_device *ndev; 1947 struct net_device *ndev;
1948 struct ns83820 *dev; 1948 struct ns83820 *dev;
@@ -2241,7 +2241,7 @@ out:
2241 return err; 2241 return err;
2242} 2242}
2243 2243
2244static void __devexit ns83820_remove_one(struct pci_dev *pci_dev) 2244static void ns83820_remove_one(struct pci_dev *pci_dev)
2245{ 2245{
2246 struct net_device *ndev = pci_get_drvdata(pci_dev); 2246 struct net_device *ndev = pci_get_drvdata(pci_dev);
2247 struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */ 2247 struct ns83820 *dev = PRIV(ndev); /* ok even if NULL */
@@ -2272,7 +2272,7 @@ static struct pci_driver driver = {
2272 .name = "ns83820", 2272 .name = "ns83820",
2273 .id_table = ns83820_pci_tbl, 2273 .id_table = ns83820_pci_tbl,
2274 .probe = ns83820_init_one, 2274 .probe = ns83820_init_one,
2275 .remove = __devexit_p(ns83820_remove_one), 2275 .remove = ns83820_remove_one,
2276#if 0 /* FIXME: implement */ 2276#if 0 /* FIXME: implement */
2277 .suspend = , 2277 .suspend = ,
2278 .resume = , 2278 .resume = ,
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 7dfe88398d7d..5e4748e855f6 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -249,7 +249,7 @@ out:
249 * Actually probing is superfluous but we're paranoid. 249 * Actually probing is superfluous but we're paranoid.
250 */ 250 */
251 251
252int __devinit xtsonic_probe(struct platform_device *pdev) 252int xtsonic_probe(struct platform_device *pdev)
253{ 253{
254 struct net_device *dev; 254 struct net_device *dev;
255 struct sonic_local *lp; 255 struct sonic_local *lp;
@@ -297,7 +297,7 @@ MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
297 297
298#include "sonic.c" 298#include "sonic.c"
299 299
300static int __devexit xtsonic_device_remove (struct platform_device *pdev) 300static int xtsonic_device_remove(struct platform_device *pdev)
301{ 301{
302 struct net_device *dev = platform_get_drvdata(pdev); 302 struct net_device *dev = platform_get_drvdata(pdev);
303 struct sonic_local *lp = netdev_priv(dev); 303 struct sonic_local *lp = netdev_priv(dev);
@@ -314,7 +314,7 @@ static int __devexit xtsonic_device_remove (struct platform_device *pdev)
314 314
315static struct platform_driver xtsonic_driver = { 315static struct platform_driver xtsonic_driver = {
316 .probe = xtsonic_probe, 316 .probe = xtsonic_probe,
317 .remove = __devexit_p(xtsonic_device_remove), 317 .remove = xtsonic_device_remove,
318 .driver = { 318 .driver = {
319 .name = xtsonic_string, 319 .name = xtsonic_string,
320 }, 320 },
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index ff26b54bd3fb..87abb4f10c43 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -32,7 +32,7 @@ config S2IO
32 32
33config VXGE 33config VXGE
34 tristate "Exar X3100 Series 10GbE PCIe Server Adapter" 34 tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
35 depends on PCI && INET 35 depends on PCI
36 ---help--- 36 ---help---
37 This driver supports Exar Corp's X3100 Series 10 GbE PCIe 37 This driver supports Exar Corp's X3100 Series 10 GbE PCIe
38 I/O Virtualized Server Adapter. 38 I/O Virtualized Server Adapter.
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index de50547c187d..7c94c089212f 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -494,7 +494,7 @@ static struct pci_driver s2io_driver = {
494 .name = "S2IO", 494 .name = "S2IO",
495 .id_table = s2io_tbl, 495 .id_table = s2io_tbl,
496 .probe = s2io_init_nic, 496 .probe = s2io_init_nic,
497 .remove = __devexit_p(s2io_rem_nic), 497 .remove = s2io_rem_nic,
498 .err_handler = &s2io_err_handler, 498 .err_handler = &s2io_err_handler,
499}; 499};
500 500
@@ -1040,7 +1040,7 @@ static int s2io_verify_pci_mode(struct s2io_nic *nic)
1040static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev) 1040static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1041{ 1041{
1042 struct pci_dev *tdev = NULL; 1042 struct pci_dev *tdev = NULL;
1043 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { 1043 for_each_pci_dev(tdev) {
1044 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { 1044 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1045 if (tdev->bus == s2io_pdev->bus->parent) { 1045 if (tdev->bus == s2io_pdev->bus->parent) {
1046 pci_dev_put(tdev); 1046 pci_dev_put(tdev);
@@ -7702,7 +7702,7 @@ static const struct net_device_ops s2io_netdev_ops = {
7702 * returns 0 on success and negative on failure. 7702 * returns 0 on success and negative on failure.
7703 */ 7703 */
7704 7704
7705static int __devinit 7705static int
7706s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) 7706s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7707{ 7707{
7708 struct s2io_nic *sp; 7708 struct s2io_nic *sp;
@@ -8200,7 +8200,7 @@ mem_alloc_failed:
8200 * from memory. 8200 * from memory.
8201 */ 8201 */
8202 8202
8203static void __devexit s2io_rem_nic(struct pci_dev *pdev) 8203static void s2io_rem_nic(struct pci_dev *pdev)
8204{ 8204{
8205 struct net_device *dev = pci_get_drvdata(pdev); 8205 struct net_device *dev = pci_get_drvdata(pdev);
8206 struct s2io_nic *sp; 8206 struct s2io_nic *sp;
@@ -8239,7 +8239,8 @@ static int __init s2io_starter(void)
8239 8239
8240/** 8240/**
8241 * s2io_closer - Cleanup routine for the driver 8241 * s2io_closer - Cleanup routine for the driver
8242 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver. 8242 * Description: This function is the cleanup routine for the driver. It
8243 * unregisters the driver.
8243 */ 8244 */
8244 8245
8245static __exit void s2io_closer(void) 8246static __exit void s2io_closer(void)
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index d5596926a1ef..d89b6ed82c51 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1075,9 +1075,8 @@ static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
1075/* 1075/*
1076 * Prototype declaration. 1076 * Prototype declaration.
1077 */ 1077 */
1078static int __devinit s2io_init_nic(struct pci_dev *pdev, 1078static int s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre);
1079 const struct pci_device_id *pre); 1079static void s2io_rem_nic(struct pci_dev *pdev);
1080static void __devexit s2io_rem_nic(struct pci_dev *pdev);
1081static int init_shared_mem(struct s2io_nic *sp); 1080static int init_shared_mem(struct s2io_nic *sp);
1082static void free_shared_mem(struct s2io_nic *sp); 1081static void free_shared_mem(struct s2io_nic *sp);
1083static int init_nic(struct s2io_nic *nic); 1082static int init_nic(struct s2io_nic *nic);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index c2e420a84d22..fbe5363cb89c 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -993,7 +993,7 @@ exit:
993 * for the driver, FW version information, and the first mac address for 993 * for the driver, FW version information, and the first mac address for
994 * each vpath 994 * each vpath
995 */ 995 */
996enum vxge_hw_status __devinit 996enum vxge_hw_status
997vxge_hw_device_hw_info_get(void __iomem *bar0, 997vxge_hw_device_hw_info_get(void __iomem *bar0,
998 struct vxge_hw_device_hw_info *hw_info) 998 struct vxge_hw_device_hw_info *hw_info)
999{ 999{
@@ -1310,7 +1310,7 @@ __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1310 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW 1310 * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1311 * to enable the latter to perform Titan hardware initialization. 1311 * to enable the latter to perform Titan hardware initialization.
1312 */ 1312 */
1313enum vxge_hw_status __devinit 1313enum vxge_hw_status
1314vxge_hw_device_initialize( 1314vxge_hw_device_initialize(
1315 struct __vxge_hw_device **devh, 1315 struct __vxge_hw_device **devh,
1316 struct vxge_hw_device_attr *attr, 1316 struct vxge_hw_device_attr *attr,
@@ -2917,7 +2917,7 @@ exit:
2917 * vxge_hw_device_config_default_get - Initialize device config with defaults. 2917 * vxge_hw_device_config_default_get - Initialize device config with defaults.
2918 * Initialize Titan device config with default values. 2918 * Initialize Titan device config with default values.
2919 */ 2919 */
2920enum vxge_hw_status __devinit 2920enum vxge_hw_status
2921vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config) 2921vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2922{ 2922{
2923 u32 i; 2923 u32 i;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 9e0c1eed5dc5..6ce4412fcc1a 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1846,11 +1846,11 @@ struct vxge_hw_vpath_attr {
1846 struct vxge_hw_fifo_attr fifo_attr; 1846 struct vxge_hw_fifo_attr fifo_attr;
1847}; 1847};
1848 1848
1849enum vxge_hw_status __devinit vxge_hw_device_hw_info_get( 1849enum vxge_hw_status vxge_hw_device_hw_info_get(
1850 void __iomem *bar0, 1850 void __iomem *bar0,
1851 struct vxge_hw_device_hw_info *hw_info); 1851 struct vxge_hw_device_hw_info *hw_info);
1852 1852
1853enum vxge_hw_status __devinit vxge_hw_device_config_default_get( 1853enum vxge_hw_status vxge_hw_device_config_default_get(
1854 struct vxge_hw_device_config *device_config); 1854 struct vxge_hw_device_config *device_config);
1855 1855
1856/** 1856/**
@@ -1877,7 +1877,7 @@ u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *devh);
1877const u8 * 1877const u8 *
1878vxge_hw_device_product_name_get(struct __vxge_hw_device *devh); 1878vxge_hw_device_product_name_get(struct __vxge_hw_device *devh);
1879 1879
1880enum vxge_hw_status __devinit vxge_hw_device_initialize( 1880enum vxge_hw_status vxge_hw_device_initialize(
1881 struct __vxge_hw_device **devh, 1881 struct __vxge_hw_device **devh,
1882 struct vxge_hw_device_attr *attr, 1882 struct vxge_hw_device_attr *attr,
1883 struct vxge_hw_device_config *device_config); 1883 struct vxge_hw_device_config *device_config);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 3e5b7509502c..7c87105ca049 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3371,10 +3371,9 @@ static const struct net_device_ops vxge_netdev_ops = {
3371#endif 3371#endif
3372}; 3372};
3373 3373
3374static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3374static int vxge_device_register(struct __vxge_hw_device *hldev,
3375 struct vxge_config *config, 3375 struct vxge_config *config, int high_dma,
3376 int high_dma, int no_of_vpath, 3376 int no_of_vpath, struct vxgedev **vdev_out)
3377 struct vxgedev **vdev_out)
3378{ 3377{
3379 struct net_device *ndev; 3378 struct net_device *ndev;
3380 enum vxge_hw_status status = VXGE_HW_OK; 3379 enum vxge_hw_status status = VXGE_HW_OK;
@@ -3672,9 +3671,8 @@ static void verify_bandwidth(void)
3672/* 3671/*
3673 * Vpath configuration 3672 * Vpath configuration
3674 */ 3673 */
3675static int __devinit vxge_config_vpaths( 3674static int vxge_config_vpaths(struct vxge_hw_device_config *device_config,
3676 struct vxge_hw_device_config *device_config, 3675 u64 vpath_mask, struct vxge_config *config_param)
3677 u64 vpath_mask, struct vxge_config *config_param)
3678{ 3676{
3679 int i, no_of_vpaths = 0, default_no_vpath = 0, temp; 3677 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3680 u32 txdl_size, txdl_per_memblock; 3678 u32 txdl_size, txdl_per_memblock;
@@ -3859,9 +3857,8 @@ static int __devinit vxge_config_vpaths(
3859} 3857}
3860 3858
3861/* initialize device configuratrions */ 3859/* initialize device configuratrions */
3862static void __devinit vxge_device_config_init( 3860static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
3863 struct vxge_hw_device_config *device_config, 3861 int *intr_type)
3864 int *intr_type)
3865{ 3862{
3866 /* Used for CQRQ/SRQ. */ 3863 /* Used for CQRQ/SRQ. */
3867 device_config->dma_blockpool_initial = 3864 device_config->dma_blockpool_initial =
@@ -3912,7 +3909,7 @@ static void __devinit vxge_device_config_init(
3912 device_config->rth_it_type); 3909 device_config->rth_it_type);
3913} 3910}
3914 3911
3915static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) 3912static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3916{ 3913{
3917 int i; 3914 int i;
3918 3915
@@ -4269,7 +4266,7 @@ static int vxge_probe_fw_update(struct vxgedev *vdev)
4269 return ret; 4266 return ret;
4270} 4267}
4271 4268
4272static int __devinit is_sriov_initialized(struct pci_dev *pdev) 4269static int is_sriov_initialized(struct pci_dev *pdev)
4273{ 4270{
4274 int pos; 4271 int pos;
4275 u16 ctrl; 4272 u16 ctrl;
@@ -4300,7 +4297,7 @@ static const struct vxge_hw_uld_cbs vxge_callbacks = {
4300 * returns 0 on success and negative on failure. 4297 * returns 0 on success and negative on failure.
4301 * 4298 *
4302 */ 4299 */
4303static int __devinit 4300static int
4304vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) 4301vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4305{ 4302{
4306 struct __vxge_hw_device *hldev; 4303 struct __vxge_hw_device *hldev;
@@ -4764,7 +4761,7 @@ _exit0:
4764 * Description: This function is called by the Pci subsystem to release a 4761 * Description: This function is called by the Pci subsystem to release a
4765 * PCI device and free up all resource held up by the device. 4762 * PCI device and free up all resource held up by the device.
4766 */ 4763 */
4767static void __devexit vxge_remove(struct pci_dev *pdev) 4764static void vxge_remove(struct pci_dev *pdev)
4768{ 4765{
4769 struct __vxge_hw_device *hldev; 4766 struct __vxge_hw_device *hldev;
4770 struct vxgedev *vdev; 4767 struct vxgedev *vdev;
@@ -4809,7 +4806,7 @@ static struct pci_driver vxge_driver = {
4809 .name = VXGE_DRIVER_NAME, 4806 .name = VXGE_DRIVER_NAME,
4810 .id_table = vxge_id_table, 4807 .id_table = vxge_id_table,
4811 .probe = vxge_probe, 4808 .probe = vxge_probe,
4812 .remove = __devexit_p(vxge_remove), 4809 .remove = vxge_remove,
4813#ifdef CONFIG_PM 4810#ifdef CONFIG_PM
4814 .suspend = vxge_pm_suspend, 4811 .suspend = vxge_pm_suspend,
4815 .resume = vxge_pm_resume, 4812 .resume = vxge_pm_resume,
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 6893a65ae55f..cbd6a529d0c0 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -978,7 +978,7 @@ static int w90p910_ether_setup(struct net_device *dev)
978 return 0; 978 return 0;
979} 979}
980 980
981static int __devinit w90p910_ether_probe(struct platform_device *pdev) 981static int w90p910_ether_probe(struct platform_device *pdev)
982{ 982{
983 struct w90p910_ether *ether; 983 struct w90p910_ether *ether;
984 struct net_device *dev; 984 struct net_device *dev;
@@ -1071,7 +1071,7 @@ failed_free:
1071 return error; 1071 return error;
1072} 1072}
1073 1073
1074static int __devexit w90p910_ether_remove(struct platform_device *pdev) 1074static int w90p910_ether_remove(struct platform_device *pdev)
1075{ 1075{
1076 struct net_device *dev = platform_get_drvdata(pdev); 1076 struct net_device *dev = platform_get_drvdata(pdev);
1077 struct w90p910_ether *ether = netdev_priv(dev); 1077 struct w90p910_ether *ether = netdev_priv(dev);
@@ -1096,7 +1096,7 @@ static int __devexit w90p910_ether_remove(struct platform_device *pdev)
1096 1096
1097static struct platform_driver w90p910_ether_driver = { 1097static struct platform_driver w90p910_ether_driver = {
1098 .probe = w90p910_ether_probe, 1098 .probe = w90p910_ether_probe,
1099 .remove = __devexit_p(w90p910_ether_remove), 1099 .remove = w90p910_ether_remove,
1100 .driver = { 1100 .driver = {
1101 .name = "nuc900-emc", 1101 .name = "nuc900-emc",
1102 .owner = THIS_MODULE, 1102 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 876beceaf2d7..653487dc7b52 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5520,7 +5520,7 @@ static const struct net_device_ops nv_netdev_ops_optimized = {
5520#endif 5520#endif
5521}; 5521};
5522 5522
5523static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) 5523static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5524{ 5524{
5525 struct net_device *dev; 5525 struct net_device *dev;
5526 struct fe_priv *np; 5526 struct fe_priv *np;
@@ -5995,7 +5995,7 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
5995 base + NvRegTransmitPoll); 5995 base + NvRegTransmitPoll);
5996} 5996}
5997 5997
5998static void __devexit nv_remove(struct pci_dev *pci_dev) 5998static void nv_remove(struct pci_dev *pci_dev)
5999{ 5999{
6000 struct net_device *dev = pci_get_drvdata(pci_dev); 6000 struct net_device *dev = pci_get_drvdata(pci_dev);
6001 6001
@@ -6271,7 +6271,7 @@ static struct pci_driver driver = {
6271 .name = DRV_NAME, 6271 .name = DRV_NAME,
6272 .id_table = pci_tbl, 6272 .id_table = pci_tbl,
6273 .probe = nv_probe, 6273 .probe = nv_probe,
6274 .remove = __devexit_p(nv_remove), 6274 .remove = nv_remove,
6275 .shutdown = nv_shutdown, 6275 .shutdown = nv_shutdown,
6276 .driver.pm = NV_PM_OPS, 6276 .driver.pm = NV_PM_OPS,
6277}; 6277};
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index af8b4142088c..3466ca1e8f6c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1219,9 +1219,6 @@ static int lpc_eth_open(struct net_device *ndev)
1219 if (netif_msg_ifup(pldat)) 1219 if (netif_msg_ifup(pldat))
1220 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); 1220 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1221 1221
1222 if (!is_valid_ether_addr(ndev->dev_addr))
1223 return -EADDRNOTAVAIL;
1224
1225 __lpc_eth_clock_enable(pldat, true); 1222 __lpc_eth_clock_enable(pldat, true);
1226 1223
1227 /* Reset and initialize */ 1224 /* Reset and initialize */
@@ -1301,6 +1298,7 @@ static const struct net_device_ops lpc_netdev_ops = {
1301 .ndo_set_rx_mode = lpc_eth_set_multicast_list, 1298 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1302 .ndo_do_ioctl = lpc_eth_ioctl, 1299 .ndo_do_ioctl = lpc_eth_ioctl,
1303 .ndo_set_mac_address = lpc_set_mac_address, 1300 .ndo_set_mac_address = lpc_set_mac_address,
1301 .ndo_validate_addr = eth_validate_addr,
1304 .ndo_change_mtu = eth_change_mtu, 1302 .ndo_change_mtu = eth_change_mtu,
1305}; 1303};
1306 1304
@@ -1597,7 +1595,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match);
1597 1595
1598static struct platform_driver lpc_eth_driver = { 1596static struct platform_driver lpc_eth_driver = {
1599 .probe = lpc_eth_drv_probe, 1597 .probe = lpc_eth_drv_probe,
1600 .remove = __devexit_p(lpc_eth_drv_remove), 1598 .remove = lpc_eth_drv_remove,
1601#ifdef CONFIG_PM 1599#ifdef CONFIG_PM
1602 .suspend = lpc_eth_drv_suspend, 1600 .suspend = lpc_eth_drv_suspend,
1603 .resume = lpc_eth_drv_resume, 1601 .resume = lpc_eth_drv_resume,
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index f97719c48516..b5499198e029 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1419,7 +1419,7 @@ static const struct net_device_ops octeon_mgmt_ops = {
1419#endif 1419#endif
1420}; 1420};
1421 1421
1422static int __devinit octeon_mgmt_probe(struct platform_device *pdev) 1422static int octeon_mgmt_probe(struct platform_device *pdev)
1423{ 1423{
1424 struct net_device *netdev; 1424 struct net_device *netdev;
1425 struct octeon_mgmt *p; 1425 struct octeon_mgmt *p;
@@ -1559,7 +1559,7 @@ err:
1559 return result; 1559 return result;
1560} 1560}
1561 1561
1562static int __devexit octeon_mgmt_remove(struct platform_device *pdev) 1562static int octeon_mgmt_remove(struct platform_device *pdev)
1563{ 1563{
1564 struct net_device *netdev = dev_get_drvdata(&pdev->dev); 1564 struct net_device *netdev = dev_get_drvdata(&pdev->dev);
1565 1565
@@ -1583,7 +1583,7 @@ static struct platform_driver octeon_mgmt_driver = {
1583 .of_match_table = octeon_mgmt_match, 1583 .of_match_table = octeon_mgmt_match,
1584 }, 1584 },
1585 .probe = octeon_mgmt_probe, 1585 .probe = octeon_mgmt_probe,
1586 .remove = __devexit_p(octeon_mgmt_remove), 1586 .remove = octeon_mgmt_remove,
1587}; 1587};
1588 1588
1589extern void octeon_mdiobus_force_mod_depencency(void); 1589extern void octeon_mdiobus_force_mod_depencency(void);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 5296cc8d3cba..34d05bf72b2e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -7,6 +7,7 @@ config PCH_GBE
7 depends on PCI 7 depends on PCI
8 select NET_CORE 8 select NET_CORE
9 select MII 9 select MII
10 select PTP_1588_CLOCK_PCH
10 ---help--- 11 ---help---
11 This is a gigabit ethernet driver for EG20T PCH. 12 This is a gigabit ethernet driver for EG20T PCH.
12 EG20T PCH is the platform controller hub that is used in Intel's 13 EG20T PCH is the platform controller hub that is used in Intel's
@@ -20,19 +21,3 @@ config PCH_GBE
20 purpose use. 21 purpose use.
21 ML7223/ML7831 is companion chip for Intel Atom E6xx series. 22 ML7223/ML7831 is companion chip for Intel Atom E6xx series.
22 ML7223/ML7831 is completely compatible for Intel EG20T PCH. 23 ML7223/ML7831 is completely compatible for Intel EG20T PCH.
23
24if PCH_GBE
25
26config PCH_PTP
27 bool "PCH PTP clock support"
28 default n
29 depends on EXPERIMENTAL
30 select PPS
31 select PTP_1588_CLOCK
32 select PTP_1588_CLOCK_PCH
33 ---help---
34 Say Y here if you want to use Precision Time Protocol (PTP) in the
35 driver. PTP is a method to precisely synchronize distributed clocks
36 over Ethernet networks.
37
38endif # PCH_GBE
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index b07311eaa693..7fb7e178c74e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -649,7 +649,6 @@ extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
649extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, 649extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
650 struct pch_gbe_rx_ring *rx_ring); 650 struct pch_gbe_rx_ring *rx_ring);
651extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter); 651extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
652#ifdef CONFIG_PCH_PTP
653extern u32 pch_ch_control_read(struct pci_dev *pdev); 652extern u32 pch_ch_control_read(struct pci_dev *pdev);
654extern void pch_ch_control_write(struct pci_dev *pdev, u32 val); 653extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
655extern u32 pch_ch_event_read(struct pci_dev *pdev); 654extern u32 pch_ch_event_read(struct pci_dev *pdev);
@@ -659,7 +658,6 @@ extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
659extern u64 pch_rx_snap_read(struct pci_dev *pdev); 658extern u64 pch_rx_snap_read(struct pci_dev *pdev);
660extern u64 pch_tx_snap_read(struct pci_dev *pdev); 659extern u64 pch_tx_snap_read(struct pci_dev *pdev);
661extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev); 660extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
662#endif
663 661
664/* pch_gbe_param.c */ 662/* pch_gbe_param.c */
665extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter); 663extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 4c4fe5b1a29a..39ab4d09faaa 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -21,10 +21,8 @@
21#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include "pch_gbe_api.h" 22#include "pch_gbe_api.h"
23#include <linux/module.h> 23#include <linux/module.h>
24#ifdef CONFIG_PCH_PTP
25#include <linux/net_tstamp.h> 24#include <linux/net_tstamp.h>
26#include <linux/ptp_classify.h> 25#include <linux/ptp_classify.h>
27#endif
28 26
29#define DRV_VERSION "1.01" 27#define DRV_VERSION "1.01"
30const char pch_driver_version[] = DRV_VERSION; 28const char pch_driver_version[] = DRV_VERSION;
@@ -98,7 +96,6 @@ const char pch_driver_version[] = DRV_VERSION;
98 96
99#define PCH_GBE_INT_DISABLE_ALL 0 97#define PCH_GBE_INT_DISABLE_ALL 0
100 98
101#ifdef CONFIG_PCH_PTP
102/* Macros for ieee1588 */ 99/* Macros for ieee1588 */
103/* 0x40 Time Synchronization Channel Control Register Bits */ 100/* 0x40 Time Synchronization Channel Control Register Bits */
104#define MASTER_MODE (1<<0) 101#define MASTER_MODE (1<<0)
@@ -113,7 +110,6 @@ const char pch_driver_version[] = DRV_VERSION;
113 110
114#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81" 111#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
115#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00" 112#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
116#endif
117 113
118static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 114static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
119 115
@@ -122,7 +118,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
122 int data); 118 int data);
123static void pch_gbe_set_multi(struct net_device *netdev); 119static void pch_gbe_set_multi(struct net_device *netdev);
124 120
125#ifdef CONFIG_PCH_PTP
126static struct sock_filter ptp_filter[] = { 121static struct sock_filter ptp_filter[] = {
127 PTP_FILTER 122 PTP_FILTER
128}; 123};
@@ -291,7 +286,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
291 286
292 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; 287 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
293} 288}
294#endif
295 289
296inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) 290inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
297{ 291{
@@ -1244,9 +1238,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1244 (int)sizeof(struct pch_gbe_tx_desc) * ring_num, 1238 (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
1245 &hw->reg->TX_DSC_SW_P); 1239 &hw->reg->TX_DSC_SW_P);
1246 1240
1247#ifdef CONFIG_PCH_PTP
1248 pch_tx_timestamp(adapter, skb); 1241 pch_tx_timestamp(adapter, skb);
1249#endif
1250 1242
1251 dev_kfree_skb_any(skb); 1243 dev_kfree_skb_any(skb);
1252} 1244}
@@ -1730,9 +1722,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1730 /* Write meta date of skb */ 1722 /* Write meta date of skb */
1731 skb_put(skb, length); 1723 skb_put(skb, length);
1732 1724
1733#ifdef CONFIG_PCH_PTP
1734 pch_rx_timestamp(adapter, skb); 1725 pch_rx_timestamp(adapter, skb);
1735#endif
1736 1726
1737 skb->protocol = eth_type_trans(skb, netdev); 1727 skb->protocol = eth_type_trans(skb, netdev);
1738 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) 1728 if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
@@ -2334,10 +2324,8 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2334 2324
2335 pr_debug("cmd : 0x%04x\n", cmd); 2325 pr_debug("cmd : 0x%04x\n", cmd);
2336 2326
2337#ifdef CONFIG_PCH_PTP
2338 if (cmd == SIOCSHWTSTAMP) 2327 if (cmd == SIOCSHWTSTAMP)
2339 return hwtstamp_ioctl(netdev, ifr, cmd); 2328 return hwtstamp_ioctl(netdev, ifr, cmd);
2340#endif
2341 2329
2342 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); 2330 return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
2343} 2331}
@@ -2623,14 +2611,12 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2623 goto err_free_netdev; 2611 goto err_free_netdev;
2624 } 2612 }
2625 2613
2626#ifdef CONFIG_PCH_PTP
2627 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number, 2614 adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
2628 PCI_DEVFN(12, 4)); 2615 PCI_DEVFN(12, 4));
2629 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { 2616 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
2630 pr_err("Bad ptp filter\n"); 2617 pr_err("Bad ptp filter\n");
2631 return -EINVAL; 2618 return -EINVAL;
2632 } 2619 }
2633#endif
2634 2620
2635 netdev->netdev_ops = &pch_gbe_netdev_ops; 2621 netdev->netdev_ops = &pch_gbe_netdev_ops;
2636 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; 2622 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index c2367158350e..bf829ee30077 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -166,7 +166,7 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
166#include <asm/unaligned.h> 166#include <asm/unaligned.h>
167#include <asm/cache.h> 167#include <asm/cache.h>
168 168
169static const char version[] __devinitconst = 169static const char version[] =
170KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 170KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
171" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n" 171" Some modifications by Eric kasten <kasten@nscl.msu.edu>\n"
172" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n"; 172" Further modifications by Keith Underwood <keithu@parl.clemson.edu>\n";
@@ -576,8 +576,8 @@ static const struct net_device_ops hamachi_netdev_ops = {
576}; 576};
577 577
578 578
579static int __devinit hamachi_init_one (struct pci_dev *pdev, 579static int hamachi_init_one(struct pci_dev *pdev,
580 const struct pci_device_id *ent) 580 const struct pci_device_id *ent)
581{ 581{
582 struct hamachi_private *hmp; 582 struct hamachi_private *hmp;
583 int option, i, rx_int_var, tx_int_var, boguscnt; 583 int option, i, rx_int_var, tx_int_var, boguscnt;
@@ -791,7 +791,7 @@ err_out:
791 return ret; 791 return ret;
792} 792}
793 793
794static int __devinit read_eeprom(void __iomem *ioaddr, int location) 794static int read_eeprom(void __iomem *ioaddr, int location)
795{ 795{
796 int bogus_cnt = 1000; 796 int bogus_cnt = 1000;
797 797
@@ -1894,7 +1894,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1894} 1894}
1895 1895
1896 1896
1897static void __devexit hamachi_remove_one (struct pci_dev *pdev) 1897static void hamachi_remove_one(struct pci_dev *pdev)
1898{ 1898{
1899 struct net_device *dev = pci_get_drvdata(pdev); 1899 struct net_device *dev = pci_get_drvdata(pdev);
1900 1900
@@ -1923,7 +1923,7 @@ static struct pci_driver hamachi_driver = {
1923 .name = DRV_NAME, 1923 .name = DRV_NAME,
1924 .id_table = hamachi_pci_tbl, 1924 .id_table = hamachi_pci_tbl,
1925 .probe = hamachi_init_one, 1925 .probe = hamachi_init_one,
1926 .remove = __devexit_p(hamachi_remove_one), 1926 .remove = hamachi_remove_one,
1927}; 1927};
1928 1928
1929static int __init hamachi_init (void) 1929static int __init hamachi_init (void)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 04e622fd468d..fbaed4fa72fa 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -106,7 +106,7 @@ static int gx_fix;
106#include <asm/io.h> 106#include <asm/io.h>
107 107
108/* These identify the driver base version and may not be removed. */ 108/* These identify the driver base version and may not be removed. */
109static const char version[] __devinitconst = 109static const char version[] =
110 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n" 110 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
111 " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; 111 " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
112 112
@@ -367,8 +367,8 @@ static const struct net_device_ops netdev_ops = {
367 .ndo_tx_timeout = yellowfin_tx_timeout, 367 .ndo_tx_timeout = yellowfin_tx_timeout,
368}; 368};
369 369
370static int __devinit yellowfin_init_one(struct pci_dev *pdev, 370static int yellowfin_init_one(struct pci_dev *pdev,
371 const struct pci_device_id *ent) 371 const struct pci_device_id *ent)
372{ 372{
373 struct net_device *dev; 373 struct net_device *dev;
374 struct yellowfin_private *np; 374 struct yellowfin_private *np;
@@ -522,7 +522,7 @@ err_out_free_netdev:
522 return -ENODEV; 522 return -ENODEV;
523} 523}
524 524
525static int __devinit read_eeprom(void __iomem *ioaddr, int location) 525static int read_eeprom(void __iomem *ioaddr, int location)
526{ 526{
527 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */ 527 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
528 528
@@ -1372,7 +1372,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1372} 1372}
1373 1373
1374 1374
1375static void __devexit yellowfin_remove_one (struct pci_dev *pdev) 1375static void yellowfin_remove_one(struct pci_dev *pdev)
1376{ 1376{
1377 struct net_device *dev = pci_get_drvdata(pdev); 1377 struct net_device *dev = pci_get_drvdata(pdev);
1378 struct yellowfin_private *np; 1378 struct yellowfin_private *np;
@@ -1399,7 +1399,7 @@ static struct pci_driver yellowfin_driver = {
1399 .name = DRV_NAME, 1399 .name = DRV_NAME,
1400 .id_table = yellowfin_pci_tbl, 1400 .id_table = yellowfin_pci_tbl,
1401 .probe = yellowfin_init_one, 1401 .probe = yellowfin_init_one,
1402 .remove = __devexit_p(yellowfin_remove_one), 1402 .remove = yellowfin_remove_one,
1403}; 1403};
1404 1404
1405 1405
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 6fa74d530e44..0be5844d6372 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1727,7 +1727,7 @@ static const struct net_device_ops pasemi_netdev_ops = {
1727#endif 1727#endif
1728}; 1728};
1729 1729
1730static int __devinit 1730static int
1731pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1731pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1732{ 1732{
1733 struct net_device *dev; 1733 struct net_device *dev;
@@ -1849,7 +1849,7 @@ out_disable_device:
1849 1849
1850} 1850}
1851 1851
1852static void __devexit pasemi_mac_remove(struct pci_dev *pdev) 1852static void pasemi_mac_remove(struct pci_dev *pdev)
1853{ 1853{
1854 struct net_device *netdev = pci_get_drvdata(pdev); 1854 struct net_device *netdev = pci_get_drvdata(pdev);
1855 struct pasemi_mac *mac; 1855 struct pasemi_mac *mac;
@@ -1884,7 +1884,7 @@ static struct pci_driver pasemi_mac_driver = {
1884 .name = "pasemi_mac", 1884 .name = "pasemi_mac",
1885 .id_table = pasemi_mac_pci_tbl, 1885 .id_table = pasemi_mac_pci_tbl,
1886 .probe = pasemi_mac_probe, 1886 .probe = pasemi_mac_probe,
1887 .remove = __devexit_p(pasemi_mac_remove), 1887 .remove = pasemi_mac_remove,
1888}; 1888};
1889 1889
1890static void __exit pasemi_mac_cleanup_module(void) 1890static void __exit pasemi_mac_cleanup_module(void)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 10468e7932dd..4ca2c196c98a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -218,7 +218,7 @@ skip:
218 check_sfp_module = netif_running(dev) && 218 check_sfp_module = netif_running(dev) &&
219 adapter->has_link_events; 219 adapter->has_link_events;
220 } else { 220 } else {
221 ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); 221 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
222 ecmd->advertising |= 222 ecmd->advertising |=
223 (ADVERTISED_TP | ADVERTISED_Autoneg); 223 (ADVERTISED_TP | ADVERTISED_Autoneg);
224 ecmd->port = PORT_TP; 224 ecmd->port = PORT_TP;
@@ -381,7 +381,7 @@ static u32 netxen_nic_test_link(struct net_device *dev)
381 381
382static int 382static int
383netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 383netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
384 u8 * bytes) 384 u8 *bytes)
385{ 385{
386 struct netxen_adapter *adapter = netdev_priv(dev); 386 struct netxen_adapter *adapter = netdev_priv(dev);
387 int offset; 387 int offset;
@@ -488,6 +488,8 @@ netxen_nic_get_pauseparam(struct net_device *dev,
488 __u32 val; 488 __u32 val;
489 int port = adapter->physical_port; 489 int port = adapter->physical_port;
490 490
491 pause->autoneg = 0;
492
491 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 493 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
492 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS)) 494 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
493 return; 495 return;
@@ -496,19 +498,19 @@ netxen_nic_get_pauseparam(struct net_device *dev,
496 pause->rx_pause = netxen_gb_get_rx_flowctl(val); 498 pause->rx_pause = netxen_gb_get_rx_flowctl(val);
497 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); 499 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
498 switch (port) { 500 switch (port) {
499 case 0: 501 case 0:
500 pause->tx_pause = !(netxen_gb_get_gb0_mask(val)); 502 pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
501 break; 503 break;
502 case 1: 504 case 1:
503 pause->tx_pause = !(netxen_gb_get_gb1_mask(val)); 505 pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
504 break; 506 break;
505 case 2: 507 case 2:
506 pause->tx_pause = !(netxen_gb_get_gb2_mask(val)); 508 pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
507 break; 509 break;
508 case 3: 510 case 3:
509 default: 511 default:
510 pause->tx_pause = !(netxen_gb_get_gb3_mask(val)); 512 pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
511 break; 513 break;
512 } 514 }
513 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 515 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
514 if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS)) 516 if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
@@ -532,6 +534,11 @@ netxen_nic_set_pauseparam(struct net_device *dev,
532 struct netxen_adapter *adapter = netdev_priv(dev); 534 struct netxen_adapter *adapter = netdev_priv(dev);
533 __u32 val; 535 __u32 val;
534 int port = adapter->physical_port; 536 int port = adapter->physical_port;
537
538 /* not supported */
539 if (pause->autoneg)
540 return -EINVAL;
541
535 /* read mode */ 542 /* read mode */
536 if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 543 if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
537 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS)) 544 if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
@@ -549,31 +556,31 @@ netxen_nic_set_pauseparam(struct net_device *dev,
549 /* set autoneg */ 556 /* set autoneg */
550 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL); 557 val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
551 switch (port) { 558 switch (port) {
552 case 0: 559 case 0:
553 if (pause->tx_pause) 560 if (pause->tx_pause)
554 netxen_gb_unset_gb0_mask(val); 561 netxen_gb_unset_gb0_mask(val);
555 else 562 else
556 netxen_gb_set_gb0_mask(val); 563 netxen_gb_set_gb0_mask(val);
557 break; 564 break;
558 case 1: 565 case 1:
559 if (pause->tx_pause) 566 if (pause->tx_pause)
560 netxen_gb_unset_gb1_mask(val); 567 netxen_gb_unset_gb1_mask(val);
561 else 568 else
562 netxen_gb_set_gb1_mask(val); 569 netxen_gb_set_gb1_mask(val);
563 break; 570 break;
564 case 2: 571 case 2:
565 if (pause->tx_pause) 572 if (pause->tx_pause)
566 netxen_gb_unset_gb2_mask(val); 573 netxen_gb_unset_gb2_mask(val);
567 else 574 else
568 netxen_gb_set_gb2_mask(val); 575 netxen_gb_set_gb2_mask(val);
569 break; 576 break;
570 case 3: 577 case 3:
571 default: 578 default:
572 if (pause->tx_pause) 579 if (pause->tx_pause)
573 netxen_gb_unset_gb3_mask(val); 580 netxen_gb_unset_gb3_mask(val);
574 else 581 else
575 netxen_gb_set_gb3_mask(val); 582 netxen_gb_set_gb3_mask(val);
576 break; 583 break;
577 } 584 }
578 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val); 585 NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
579 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 586 } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
@@ -636,7 +643,7 @@ static int netxen_get_sset_count(struct net_device *dev, int sset)
636 643
637static void 644static void
638netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test, 645netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
639 u64 * data) 646 u64 *data)
640{ 647{
641 memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN); 648 memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
642 if ((data[0] = netxen_nic_reg_test(dev))) 649 if ((data[0] = netxen_nic_reg_test(dev)))
@@ -647,7 +654,7 @@ netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
647} 654}
648 655
649static void 656static void
650netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data) 657netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
651{ 658{
652 int index; 659 int index;
653 660
@@ -668,7 +675,7 @@ netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
668 675
669static void 676static void
670netxen_nic_get_ethtool_stats(struct net_device *dev, 677netxen_nic_get_ethtool_stats(struct net_device *dev,
671 struct ethtool_stats *stats, u64 * data) 678 struct ethtool_stats *stats, u64 *data)
672{ 679{
673 struct netxen_adapter *adapter = netdev_priv(dev); 680 struct netxen_adapter *adapter = netdev_priv(dev);
674 int index; 681 int index;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index df450616ab37..6098fd4adfeb 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -60,9 +60,9 @@ static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
60module_param(auto_fw_reset, int, 0644); 60module_param(auto_fw_reset, int, 0644);
61MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled"); 61MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
62 62
63static int __devinit netxen_nic_probe(struct pci_dev *pdev, 63static int netxen_nic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent); 64 const struct pci_device_id *ent);
65static void __devexit netxen_nic_remove(struct pci_dev *pdev); 65static void netxen_nic_remove(struct pci_dev *pdev);
66static int netxen_nic_open(struct net_device *netdev); 66static int netxen_nic_open(struct net_device *netdev);
67static int netxen_nic_close(struct net_device *netdev); 67static int netxen_nic_close(struct net_device *netdev);
68static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *, 68static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
@@ -1397,7 +1397,7 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
1397} 1397}
1398#endif 1398#endif
1399 1399
1400static int __devinit 1400static int
1401netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1401netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1402{ 1402{
1403 struct net_device *netdev = NULL; 1403 struct net_device *netdev = NULL;
@@ -1569,7 +1569,7 @@ void netxen_cleanup_minidump(struct netxen_adapter *adapter)
1569 } 1569 }
1570} 1570}
1571 1571
1572static void __devexit netxen_nic_remove(struct pci_dev *pdev) 1572static void netxen_nic_remove(struct pci_dev *pdev)
1573{ 1573{
1574 struct netxen_adapter *adapter; 1574 struct netxen_adapter *adapter;
1575 struct net_device *netdev; 1575 struct net_device *netdev;
@@ -3350,7 +3350,7 @@ static struct pci_driver netxen_driver = {
3350 .name = netxen_nic_driver_name, 3350 .name = netxen_nic_driver_name,
3351 .id_table = netxen_pci_tbl, 3351 .id_table = netxen_pci_tbl,
3352 .probe = netxen_nic_probe, 3352 .probe = netxen_nic_probe,
3353 .remove = __devexit_p(netxen_nic_remove), 3353 .remove = netxen_nic_remove,
3354#ifdef CONFIG_PM 3354#ifdef CONFIG_PM
3355 .suspend = netxen_nic_suspend, 3355 .suspend = netxen_nic_suspend,
3356 .resume = netxen_nic_resume, 3356 .resume = netxen_nic_resume,
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 6407d0d77e81..67a679aaf29a 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1920,7 +1920,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1920{ 1920{
1921 struct ql_tx_buf_cb *tx_cb; 1921 struct ql_tx_buf_cb *tx_cb;
1922 int i; 1922 int i;
1923 int retval = 0;
1924 1923
1925 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1924 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1926 netdev_warn(qdev->ndev, 1925 netdev_warn(qdev->ndev,
@@ -1935,7 +1934,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1935 "Frame too short to be legal, frame not sent\n"); 1934 "Frame too short to be legal, frame not sent\n");
1936 1935
1937 qdev->ndev->stats.tx_errors++; 1936 qdev->ndev->stats.tx_errors++;
1938 retval = -EIO;
1939 goto frame_not_sent; 1937 goto frame_not_sent;
1940 } 1938 }
1941 1939
@@ -1944,7 +1942,6 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1944 mac_rsp->transaction_id); 1942 mac_rsp->transaction_id);
1945 1943
1946 qdev->ndev->stats.tx_errors++; 1944 qdev->ndev->stats.tx_errors++;
1947 retval = -EIO;
1948 goto invalid_seg_count; 1945 goto invalid_seg_count;
1949 } 1946 }
1950 1947
@@ -3772,8 +3769,8 @@ static const struct net_device_ops ql3xxx_netdev_ops = {
3772 .ndo_tx_timeout = ql3xxx_tx_timeout, 3769 .ndo_tx_timeout = ql3xxx_tx_timeout,
3773}; 3770};
3774 3771
3775static int __devinit ql3xxx_probe(struct pci_dev *pdev, 3772static int ql3xxx_probe(struct pci_dev *pdev,
3776 const struct pci_device_id *pci_entry) 3773 const struct pci_device_id *pci_entry)
3777{ 3774{
3778 struct net_device *ndev = NULL; 3775 struct net_device *ndev = NULL;
3779 struct ql3_adapter *qdev = NULL; 3776 struct ql3_adapter *qdev = NULL;
@@ -3928,7 +3925,7 @@ err_out:
3928 return err; 3925 return err;
3929} 3926}
3930 3927
3931static void __devexit ql3xxx_remove(struct pci_dev *pdev) 3928static void ql3xxx_remove(struct pci_dev *pdev)
3932{ 3929{
3933 struct net_device *ndev = pci_get_drvdata(pdev); 3930 struct net_device *ndev = pci_get_drvdata(pdev);
3934 struct ql3_adapter *qdev = netdev_priv(ndev); 3931 struct ql3_adapter *qdev = netdev_priv(ndev);
@@ -3955,18 +3952,7 @@ static struct pci_driver ql3xxx_driver = {
3955 .name = DRV_NAME, 3952 .name = DRV_NAME,
3956 .id_table = ql3xxx_pci_tbl, 3953 .id_table = ql3xxx_pci_tbl,
3957 .probe = ql3xxx_probe, 3954 .probe = ql3xxx_probe,
3958 .remove = __devexit_p(ql3xxx_remove), 3955 .remove = ql3xxx_remove,
3959}; 3956};
3960 3957
3961static int __init ql3xxx_init_module(void) 3958module_pci_driver(ql3xxx_driver);
3962{
3963 return pci_register_driver(&ql3xxx_driver);
3964}
3965
3966static void __exit ql3xxx_exit(void)
3967{
3968 pci_unregister_driver(&ql3xxx_driver);
3969}
3970
3971module_init(ql3xxx_init_module);
3972module_exit(ql3xxx_exit);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index ddba83ef3f44..c4b8ced83829 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -5,4 +5,5 @@
5obj-$(CONFIG_QLCNIC) := qlcnic.o 5obj-$(CONFIG_QLCNIC) := qlcnic.o
6 6
7qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \ 7qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o 8 qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
9 qlcnic_sysfs.o qlcnic_minidump.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index eaa1db9fec32..537902479689 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -89,16 +89,6 @@
89#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048 89#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
90#define QLCNIC_LRO_BUFFER_EXTRA 2048 90#define QLCNIC_LRO_BUFFER_EXTRA 2048
91 91
92/* Opcodes to be used with the commands */
93#define TX_ETHER_PKT 0x01
94#define TX_TCP_PKT 0x02
95#define TX_UDP_PKT 0x03
96#define TX_IP_PKT 0x04
97#define TX_TCP_LSO 0x05
98#define TX_TCP_LSO6 0x06
99#define TX_TCPV6_PKT 0x0b
100#define TX_UDPV6_PKT 0x0c
101
102/* Tx defines */ 92/* Tx defines */
103#define QLCNIC_MAX_FRAGS_PER_TX 14 93#define QLCNIC_MAX_FRAGS_PER_TX 14
104#define MAX_TSO_HEADER_DESC 2 94#define MAX_TSO_HEADER_DESC 2
@@ -147,28 +137,6 @@
147 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when 137 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
148 * we are doing LSO (above the 1500 size packet) only. 138 * we are doing LSO (above the 1500 size packet) only.
149 */ 139 */
150
151#define FLAGS_VLAN_TAGGED 0x10
152#define FLAGS_VLAN_OOB 0x40
153
154#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
155 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
156#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
157 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
158#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
159 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
160
161#define qlcnic_set_tx_port(_desc, _port) \
162 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
163
164#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
165 ((_desc)->flags_opcode |= \
166 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
167
168#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
169 ((_desc)->nfrags__length = \
170 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
171
172struct cmd_desc_type0 { 140struct cmd_desc_type0 {
173 u8 tcp_hdr_offset; /* For LSO only */ 141 u8 tcp_hdr_offset; /* For LSO only */
174 u8 ip_hdr_offset; /* For LSO only */ 142 u8 ip_hdr_offset; /* For LSO only */
@@ -203,65 +171,6 @@ struct rcv_desc {
203 __le64 addr_buffer; 171 __le64 addr_buffer;
204} __packed; 172} __packed;
205 173
206/* opcode field in status_desc */
207#define QLCNIC_SYN_OFFLOAD 0x03
208#define QLCNIC_RXPKT_DESC 0x04
209#define QLCNIC_OLD_RXPKT_DESC 0x3f
210#define QLCNIC_RESPONSE_DESC 0x05
211#define QLCNIC_LRO_DESC 0x12
212
213/* for status field in status_desc */
214#define STATUS_CKSUM_LOOP 0
215#define STATUS_CKSUM_OK 2
216
217/* owner bits of status_desc */
218#define STATUS_OWNER_HOST (0x1ULL << 56)
219#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
220
221/* Status descriptor:
222 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
223 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
224 53-55 desc_cnt, 56-57 owner, 58-63 opcode
225 */
226#define qlcnic_get_sts_port(sts_data) \
227 ((sts_data) & 0x0F)
228#define qlcnic_get_sts_status(sts_data) \
229 (((sts_data) >> 4) & 0x0F)
230#define qlcnic_get_sts_type(sts_data) \
231 (((sts_data) >> 8) & 0x0F)
232#define qlcnic_get_sts_totallength(sts_data) \
233 (((sts_data) >> 12) & 0xFFFF)
234#define qlcnic_get_sts_refhandle(sts_data) \
235 (((sts_data) >> 28) & 0xFFFF)
236#define qlcnic_get_sts_prot(sts_data) \
237 (((sts_data) >> 44) & 0x0F)
238#define qlcnic_get_sts_pkt_offset(sts_data) \
239 (((sts_data) >> 48) & 0x1F)
240#define qlcnic_get_sts_desc_cnt(sts_data) \
241 (((sts_data) >> 53) & 0x7)
242#define qlcnic_get_sts_opcode(sts_data) \
243 (((sts_data) >> 58) & 0x03F)
244
245#define qlcnic_get_lro_sts_refhandle(sts_data) \
246 ((sts_data) & 0x0FFFF)
247#define qlcnic_get_lro_sts_length(sts_data) \
248 (((sts_data) >> 16) & 0x0FFFF)
249#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
250 (((sts_data) >> 32) & 0x0FF)
251#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
252 (((sts_data) >> 40) & 0x0FF)
253#define qlcnic_get_lro_sts_timestamp(sts_data) \
254 (((sts_data) >> 48) & 0x1)
255#define qlcnic_get_lro_sts_type(sts_data) \
256 (((sts_data) >> 49) & 0x7)
257#define qlcnic_get_lro_sts_push_flag(sts_data) \
258 (((sts_data) >> 52) & 0x1)
259#define qlcnic_get_lro_sts_seq_number(sts_data) \
260 ((sts_data) & 0x0FFFFFFFF)
261#define qlcnic_get_lro_sts_mss(sts_data1) \
262 ((sts_data1 >> 32) & 0x0FFFF)
263
264
265struct status_desc { 174struct status_desc {
266 __le64 status_desc_data[2]; 175 __le64 status_desc_data[2];
267} __attribute__ ((aligned(16))); 176} __attribute__ ((aligned(16)));
@@ -280,16 +189,16 @@ struct status_desc {
280#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29 189#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
281 190
282struct uni_table_desc{ 191struct uni_table_desc{
283 u32 findex; 192 __le32 findex;
284 u32 num_entries; 193 __le32 num_entries;
285 u32 entry_size; 194 __le32 entry_size;
286 u32 reserved[5]; 195 __le32 reserved[5];
287}; 196};
288 197
289struct uni_data_desc{ 198struct uni_data_desc{
290 u32 findex; 199 __le32 findex;
291 u32 size; 200 __le32 size;
292 u32 reserved[5]; 201 __le32 reserved[5];
293}; 202};
294 203
295/* Flash Defines and Structures */ 204/* Flash Defines and Structures */
@@ -416,19 +325,19 @@ struct qlcnic_nic_intr_coalesce {
416}; 325};
417 326
418struct qlcnic_dump_template_hdr { 327struct qlcnic_dump_template_hdr {
419 __le32 type; 328 u32 type;
420 __le32 offset; 329 u32 offset;
421 __le32 size; 330 u32 size;
422 __le32 cap_mask; 331 u32 cap_mask;
423 __le32 num_entries; 332 u32 num_entries;
424 __le32 version; 333 u32 version;
425 __le32 timestamp; 334 u32 timestamp;
426 __le32 checksum; 335 u32 checksum;
427 __le32 drv_cap_mask; 336 u32 drv_cap_mask;
428 __le32 sys_info[3]; 337 u32 sys_info[3];
429 __le32 saved_state[16]; 338 u32 saved_state[16];
430 __le32 cap_sizes[8]; 339 u32 cap_sizes[8];
431 __le32 rsvd[0]; 340 u32 rsvd[0];
432}; 341};
433 342
434struct qlcnic_fw_dump { 343struct qlcnic_fw_dump {
@@ -456,11 +365,41 @@ struct qlcnic_hardware_context {
456 u8 pci_func; 365 u8 pci_func;
457 u8 linkup; 366 u8 linkup;
458 u8 loopback_state; 367 u8 loopback_state;
368 u8 beacon_state;
369 u8 has_link_events;
370 u8 fw_type;
371 u8 physical_port;
372 u8 reset_context;
373 u8 msix_supported;
374 u8 max_mac_filters;
375 u8 mc_enabled;
376 u8 max_mc_count;
377 u8 diag_test;
378 u8 num_msix;
379 u8 nic_mode;
380 char diag_cnt;
381
459 u16 port_type; 382 u16 port_type;
460 u16 board_type; 383 u16 board_type;
461 384
462 u8 beacon_state; 385 u16 link_speed;
386 u16 link_duplex;
387 u16 link_autoneg;
388 u16 module_type;
463 389
390 u16 op_mode;
391 u16 switch_mode;
392 u16 max_tx_ques;
393 u16 max_rx_ques;
394 u16 max_mtu;
395 u32 msg_enable;
396 u16 act_pci_func;
397
398 u32 capabilities;
399 u32 temp;
400 u32 int_vec_bit;
401 u32 fw_hal_version;
402 struct qlcnic_hardware_ops *hw_ops;
464 struct qlcnic_nic_intr_coalesce coal; 403 struct qlcnic_nic_intr_coalesce coal;
465 struct qlcnic_fw_dump fw_dump; 404 struct qlcnic_fw_dump fw_dump;
466}; 405};
@@ -521,6 +460,7 @@ struct qlcnic_host_sds_ring {
521} ____cacheline_internodealigned_in_smp; 460} ____cacheline_internodealigned_in_smp;
522 461
523struct qlcnic_host_tx_ring { 462struct qlcnic_host_tx_ring {
463 u16 ctx_id;
524 u32 producer; 464 u32 producer;
525 u32 sw_consumer; 465 u32 sw_consumer;
526 u32 num_desc; 466 u32 num_desc;
@@ -985,6 +925,7 @@ struct qlcnic_adapter {
985 unsigned long state; 925 unsigned long state;
986 u32 flags; 926 u32 flags;
987 927
928 int max_drv_tx_rings;
988 u16 num_txd; 929 u16 num_txd;
989 u16 num_rxd; 930 u16 num_rxd;
990 u16 num_jumbo_rxd; 931 u16 num_jumbo_rxd;
@@ -993,57 +934,28 @@ struct qlcnic_adapter {
993 934
994 u8 max_rds_rings; 935 u8 max_rds_rings;
995 u8 max_sds_rings; 936 u8 max_sds_rings;
996 u8 msix_supported;
997 u8 portnum; 937 u8 portnum;
998 u8 physical_port;
999 u8 reset_context;
1000 938
1001 u8 mc_enabled;
1002 u8 max_mc_count;
1003 u8 fw_wait_cnt; 939 u8 fw_wait_cnt;
1004 u8 fw_fail_cnt; 940 u8 fw_fail_cnt;
1005 u8 tx_timeo_cnt; 941 u8 tx_timeo_cnt;
1006 u8 need_fw_reset; 942 u8 need_fw_reset;
1007 943
1008 u8 has_link_events;
1009 u8 fw_type;
1010 u16 tx_context_id;
1011 u16 is_up; 944 u16 is_up;
1012
1013 u16 link_speed;
1014 u16 link_duplex;
1015 u16 link_autoneg;
1016 u16 module_type;
1017
1018 u16 op_mode;
1019 u16 switch_mode;
1020 u16 max_tx_ques;
1021 u16 max_rx_ques;
1022 u16 max_mtu;
1023 u16 pvid; 945 u16 pvid;
1024 946
1025 u32 fw_hal_version;
1026 u32 capabilities;
1027 u32 irq; 947 u32 irq;
1028 u32 temp;
1029
1030 u32 int_vec_bit;
1031 u32 heartbeat; 948 u32 heartbeat;
1032 949
1033 u8 max_mac_filters;
1034 u8 dev_state; 950 u8 dev_state;
1035 u8 diag_test;
1036 char diag_cnt;
1037 u8 reset_ack_timeo; 951 u8 reset_ack_timeo;
1038 u8 dev_init_timeo; 952 u8 dev_init_timeo;
1039 u16 msg_enable;
1040 953
1041 u8 mac_addr[ETH_ALEN]; 954 u8 mac_addr[ETH_ALEN];
1042 955
1043 u64 dev_rst_time; 956 u64 dev_rst_time;
1044 u8 mac_learn; 957 u8 mac_learn;
1045 unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)]; 958 unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
1046
1047 struct qlcnic_npar_info *npars; 959 struct qlcnic_npar_info *npars;
1048 struct qlcnic_eswitch *eswitch; 960 struct qlcnic_eswitch *eswitch;
1049 struct qlcnic_nic_template *nic_ops; 961 struct qlcnic_nic_template *nic_ops;
@@ -1057,24 +969,22 @@ struct qlcnic_adapter {
1057 void __iomem *isr_int_vec; 969 void __iomem *isr_int_vec;
1058 970
1059 struct msix_entry *msix_entries; 971 struct msix_entry *msix_entries;
1060
1061 struct delayed_work fw_work; 972 struct delayed_work fw_work;
1062 973
1063
1064 struct qlcnic_filter_hash fhash; 974 struct qlcnic_filter_hash fhash;
1065 975
1066 spinlock_t tx_clean_lock; 976 spinlock_t tx_clean_lock;
1067 spinlock_t mac_learn_lock; 977 spinlock_t mac_learn_lock;
1068 __le32 file_prd_off; /*File fw product offset*/ 978 u32 file_prd_off; /*File fw product offset*/
1069 u32 fw_version; 979 u32 fw_version;
1070 const struct firmware *fw; 980 const struct firmware *fw;
1071}; 981};
1072 982
1073struct qlcnic_info { 983struct qlcnic_info_le {
1074 __le16 pci_func; 984 __le16 pci_func;
1075 __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */ 985 __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
1076 __le16 phys_port; 986 __le16 phys_port;
1077 __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */ 987 __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
1078 988
1079 __le32 capabilities; 989 __le32 capabilities;
1080 u8 max_mac_filters; 990 u8 max_mac_filters;
@@ -1088,13 +998,28 @@ struct qlcnic_info {
1088 u8 reserved2[104]; 998 u8 reserved2[104];
1089} __packed; 999} __packed;
1090 1000
1091struct qlcnic_pci_info { 1001struct qlcnic_info {
1092 __le16 id; /* pci function id */ 1002 u16 pci_func;
1093 __le16 active; /* 1 = Enabled */ 1003 u16 op_mode;
1094 __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */ 1004 u16 phys_port;
1095 __le16 default_port; /* default port number */ 1005 u16 switch_mode;
1006 u32 capabilities;
1007 u8 max_mac_filters;
1008 u8 reserved1;
1009 u16 max_mtu;
1010 u16 max_tx_ques;
1011 u16 max_rx_ques;
1012 u16 min_tx_bw;
1013 u16 max_tx_bw;
1014};
1015
1016struct qlcnic_pci_info_le {
1017 __le16 id; /* pci function id */
1018 __le16 active; /* 1 = Enabled */
1019 __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
1020 __le16 default_port; /* default port number */
1096 1021
1097 __le16 tx_min_bw; /* Multiple of 100mbpc */ 1022 __le16 tx_min_bw; /* Multiple of 100mbpc */
1098 __le16 tx_max_bw; 1023 __le16 tx_max_bw;
1099 __le16 reserved1[2]; 1024 __le16 reserved1[2];
1100 1025
@@ -1102,6 +1027,16 @@ struct qlcnic_pci_info {
1102 u8 reserved2[106]; 1027 u8 reserved2[106];
1103} __packed; 1028} __packed;
1104 1029
1030struct qlcnic_pci_info {
1031 u16 id;
1032 u16 active;
1033 u16 type;
1034 u16 default_port;
1035 u16 tx_min_bw;
1036 u16 tx_max_bw;
1037 u8 mac[ETH_ALEN];
1038};
1039
1105struct qlcnic_npar_info { 1040struct qlcnic_npar_info {
1106 u16 pvid; 1041 u16 pvid;
1107 u16 min_bw; 1042 u16 min_bw;
@@ -1116,6 +1051,7 @@ struct qlcnic_npar_info {
1116 u8 mac_anti_spoof; 1051 u8 mac_anti_spoof;
1117 u8 promisc_mode; 1052 u8 promisc_mode;
1118 u8 offload_flags; 1053 u8 offload_flags;
1054 u8 pci_func;
1119}; 1055};
1120 1056
1121struct qlcnic_eswitch { 1057struct qlcnic_eswitch {
@@ -1208,7 +1144,7 @@ do { \
1208 (VAL1) += (VAL2); \ 1144 (VAL1) += (VAL2); \
1209} while (0) 1145} while (0)
1210 1146
1211struct qlcnic_mac_statistics{ 1147struct qlcnic_mac_statistics_le {
1212 __le64 mac_tx_frames; 1148 __le64 mac_tx_frames;
1213 __le64 mac_tx_bytes; 1149 __le64 mac_tx_bytes;
1214 __le64 mac_tx_mcast_pkts; 1150 __le64 mac_tx_mcast_pkts;
@@ -1248,7 +1184,45 @@ struct qlcnic_mac_statistics{
1248 __le64 mac_align_error; 1184 __le64 mac_align_error;
1249} __packed; 1185} __packed;
1250 1186
1251struct __qlcnic_esw_statistics { 1187struct qlcnic_mac_statistics {
1188 u64 mac_tx_frames;
1189 u64 mac_tx_bytes;
1190 u64 mac_tx_mcast_pkts;
1191 u64 mac_tx_bcast_pkts;
1192 u64 mac_tx_pause_cnt;
1193 u64 mac_tx_ctrl_pkt;
1194 u64 mac_tx_lt_64b_pkts;
1195 u64 mac_tx_lt_127b_pkts;
1196 u64 mac_tx_lt_255b_pkts;
1197 u64 mac_tx_lt_511b_pkts;
1198 u64 mac_tx_lt_1023b_pkts;
1199 u64 mac_tx_lt_1518b_pkts;
1200 u64 mac_tx_gt_1518b_pkts;
1201 u64 rsvd1[3];
1202 u64 mac_rx_frames;
1203 u64 mac_rx_bytes;
1204 u64 mac_rx_mcast_pkts;
1205 u64 mac_rx_bcast_pkts;
1206 u64 mac_rx_pause_cnt;
1207 u64 mac_rx_ctrl_pkt;
1208 u64 mac_rx_lt_64b_pkts;
1209 u64 mac_rx_lt_127b_pkts;
1210 u64 mac_rx_lt_255b_pkts;
1211 u64 mac_rx_lt_511b_pkts;
1212 u64 mac_rx_lt_1023b_pkts;
1213 u64 mac_rx_lt_1518b_pkts;
1214 u64 mac_rx_gt_1518b_pkts;
1215 u64 rsvd2[3];
1216 u64 mac_rx_length_error;
1217 u64 mac_rx_length_small;
1218 u64 mac_rx_length_large;
1219 u64 mac_rx_jabber;
1220 u64 mac_rx_dropped;
1221 u64 mac_rx_crc_error;
1222 u64 mac_align_error;
1223};
1224
1225struct qlcnic_esw_stats_le {
1252 __le16 context_id; 1226 __le16 context_id;
1253 __le16 version; 1227 __le16 version;
1254 __le16 size; 1228 __le16 size;
@@ -1263,147 +1237,27 @@ struct __qlcnic_esw_statistics {
1263 __le64 rsvd[3]; 1237 __le64 rsvd[3];
1264} __packed; 1238} __packed;
1265 1239
1240struct __qlcnic_esw_statistics {
1241 u16 context_id;
1242 u16 version;
1243 u16 size;
1244 u16 unused;
1245 u64 unicast_frames;
1246 u64 multicast_frames;
1247 u64 broadcast_frames;
1248 u64 dropped_frames;
1249 u64 errors;
1250 u64 local_frames;
1251 u64 numbytes;
1252 u64 rsvd[3];
1253};
1254
1266struct qlcnic_esw_statistics { 1255struct qlcnic_esw_statistics {
1267 struct __qlcnic_esw_statistics rx; 1256 struct __qlcnic_esw_statistics rx;
1268 struct __qlcnic_esw_statistics tx; 1257 struct __qlcnic_esw_statistics tx;
1269}; 1258};
1270 1259
1271struct qlcnic_common_entry_hdr {
1272 __le32 type;
1273 __le32 offset;
1274 __le32 cap_size;
1275 u8 mask;
1276 u8 rsvd[2];
1277 u8 flags;
1278} __packed;
1279
1280struct __crb {
1281 __le32 addr;
1282 u8 stride;
1283 u8 rsvd1[3];
1284 __le32 data_size;
1285 __le32 no_ops;
1286 __le32 rsvd2[4];
1287} __packed;
1288
1289struct __ctrl {
1290 __le32 addr;
1291 u8 stride;
1292 u8 index_a;
1293 __le16 timeout;
1294 __le32 data_size;
1295 __le32 no_ops;
1296 u8 opcode;
1297 u8 index_v;
1298 u8 shl_val;
1299 u8 shr_val;
1300 __le32 val1;
1301 __le32 val2;
1302 __le32 val3;
1303} __packed;
1304
1305struct __cache {
1306 __le32 addr;
1307 __le16 stride;
1308 __le16 init_tag_val;
1309 __le32 size;
1310 __le32 no_ops;
1311 __le32 ctrl_addr;
1312 __le32 ctrl_val;
1313 __le32 read_addr;
1314 u8 read_addr_stride;
1315 u8 read_addr_num;
1316 u8 rsvd1[2];
1317} __packed;
1318
1319struct __ocm {
1320 u8 rsvd[8];
1321 __le32 size;
1322 __le32 no_ops;
1323 u8 rsvd1[8];
1324 __le32 read_addr;
1325 __le32 read_addr_stride;
1326} __packed;
1327
1328struct __mem {
1329 u8 rsvd[24];
1330 __le32 addr;
1331 __le32 size;
1332} __packed;
1333
1334struct __mux {
1335 __le32 addr;
1336 u8 rsvd[4];
1337 __le32 size;
1338 __le32 no_ops;
1339 __le32 val;
1340 __le32 val_stride;
1341 __le32 read_addr;
1342 u8 rsvd2[4];
1343} __packed;
1344
1345struct __queue {
1346 __le32 sel_addr;
1347 __le16 stride;
1348 u8 rsvd[2];
1349 __le32 size;
1350 __le32 no_ops;
1351 u8 rsvd2[8];
1352 __le32 read_addr;
1353 u8 read_addr_stride;
1354 u8 read_addr_cnt;
1355 u8 rsvd3[2];
1356} __packed;
1357
1358struct qlcnic_dump_entry {
1359 struct qlcnic_common_entry_hdr hdr;
1360 union {
1361 struct __crb crb;
1362 struct __cache cache;
1363 struct __ocm ocm;
1364 struct __mem mem;
1365 struct __mux mux;
1366 struct __queue que;
1367 struct __ctrl ctrl;
1368 } region;
1369} __packed;
1370
1371enum op_codes {
1372 QLCNIC_DUMP_NOP = 0,
1373 QLCNIC_DUMP_READ_CRB = 1,
1374 QLCNIC_DUMP_READ_MUX = 2,
1375 QLCNIC_DUMP_QUEUE = 3,
1376 QLCNIC_DUMP_BRD_CONFIG = 4,
1377 QLCNIC_DUMP_READ_OCM = 6,
1378 QLCNIC_DUMP_PEG_REG = 7,
1379 QLCNIC_DUMP_L1_DTAG = 8,
1380 QLCNIC_DUMP_L1_ITAG = 9,
1381 QLCNIC_DUMP_L1_DATA = 11,
1382 QLCNIC_DUMP_L1_INST = 12,
1383 QLCNIC_DUMP_L2_DTAG = 21,
1384 QLCNIC_DUMP_L2_ITAG = 22,
1385 QLCNIC_DUMP_L2_DATA = 23,
1386 QLCNIC_DUMP_L2_INST = 24,
1387 QLCNIC_DUMP_READ_ROM = 71,
1388 QLCNIC_DUMP_READ_MEM = 72,
1389 QLCNIC_DUMP_READ_CTRL = 98,
1390 QLCNIC_DUMP_TLHDR = 99,
1391 QLCNIC_DUMP_RDEND = 255
1392};
1393
1394#define QLCNIC_DUMP_WCRB BIT_0
1395#define QLCNIC_DUMP_RWCRB BIT_1
1396#define QLCNIC_DUMP_ANDCRB BIT_2
1397#define QLCNIC_DUMP_ORCRB BIT_3
1398#define QLCNIC_DUMP_POLLCRB BIT_4
1399#define QLCNIC_DUMP_RD_SAVE BIT_5
1400#define QLCNIC_DUMP_WRT_SAVED BIT_6
1401#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
1402#define QLCNIC_DUMP_SKIP BIT_7
1403
1404#define QLCNIC_DUMP_MASK_MIN 3
1405#define QLCNIC_DUMP_MASK_DEF 0x1f 1260#define QLCNIC_DUMP_MASK_DEF 0x1f
1406#define QLCNIC_DUMP_MASK_MAX 0xff
1407#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed 1261#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
1408#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed 1262#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
1409#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed 1263#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
@@ -1411,12 +1265,6 @@ enum op_codes {
1411#define QLCNIC_SET_QUIESCENT 0xadd00010 1265#define QLCNIC_SET_QUIESCENT 0xadd00010
1412#define QLCNIC_RESET_QUIESCENT 0xadd00020 1266#define QLCNIC_RESET_QUIESCENT 0xadd00020
1413 1267
1414struct qlcnic_dump_operations {
1415 enum op_codes opcode;
1416 u32 (*handler)(struct qlcnic_adapter *,
1417 struct qlcnic_dump_entry *, u32 *);
1418};
1419
1420struct _cdrp_cmd { 1268struct _cdrp_cmd {
1421 u32 cmd; 1269 u32 cmd;
1422 u32 arg1; 1270 u32 arg1;
@@ -1432,7 +1280,7 @@ struct qlcnic_cmd_args {
1432int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); 1280int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
1433int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config); 1281int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
1434 1282
1435u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off); 1283int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
1436int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); 1284int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
1437int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); 1285int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
1438int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); 1286int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
@@ -1474,6 +1322,8 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
1474#define __QLCNIC_MAX_LED_RATE 0xf 1322#define __QLCNIC_MAX_LED_RATE 0xf
1475#define __QLCNIC_MAX_LED_STATE 0x2 1323#define __QLCNIC_MAX_LED_STATE 0x2
1476 1324
1325#define MAX_CTL_CHECK 1000
1326
1477int qlcnic_get_board_info(struct qlcnic_adapter *adapter); 1327int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
1478int qlcnic_wol_supported(struct qlcnic_adapter *adapter); 1328int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
1479int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate); 1329int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
@@ -1496,7 +1346,7 @@ int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
1496int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter); 1346int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
1497void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter); 1347void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
1498 1348
1499void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32); 1349void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *, u32);
1500 1350
1501int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter); 1351int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
1502void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter); 1352void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
@@ -1530,9 +1380,8 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
1530int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable); 1380int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
1531int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable); 1381int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1532int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1382int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1533void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 1383void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
1534 struct qlcnic_host_tx_ring *tx_ring); 1384void qlcnic_fetch_mac(u32, u32, u8, u8 *);
1535void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
1536void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring); 1385void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1537void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter); 1386void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
1538int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode); 1387int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
@@ -1571,12 +1420,32 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
1571int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *); 1420int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
1572extern int qlcnic_config_tso; 1421extern int qlcnic_config_tso;
1573 1422
1423int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
1424void qlcnic_napi_del(struct qlcnic_adapter *adapter);
1425void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
1426void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
1427int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
1428void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
1429void qlcnic_free_tx_rings(struct qlcnic_adapter *);
1430int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
1431
1432void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
1433void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
1434void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
1435void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
1436int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
1437int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
1438void qlcnic_set_vlan_config(struct qlcnic_adapter *,
1439 struct qlcnic_esw_func_cfg *);
1440void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
1441 struct qlcnic_esw_func_cfg *);
1442
1574/* 1443/*
1575 * QLOGIC Board information 1444 * QLOGIC Board information
1576 */ 1445 */
1577 1446
1578#define QLCNIC_MAX_BOARD_NAME_LEN 100 1447#define QLCNIC_MAX_BOARD_NAME_LEN 100
1579struct qlcnic_brdinfo { 1448struct qlcnic_board_info {
1580 unsigned short vendor; 1449 unsigned short vendor;
1581 unsigned short device; 1450 unsigned short device;
1582 unsigned short sub_vendor; 1451 unsigned short sub_vendor;
@@ -1584,30 +1453,6 @@ struct qlcnic_brdinfo {
1584 char short_name[QLCNIC_MAX_BOARD_NAME_LEN]; 1453 char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
1585}; 1454};
1586 1455
1587static const struct qlcnic_brdinfo qlcnic_boards[] = {
1588 {0x1077, 0x8020, 0x1077, 0x203,
1589 "8200 Series Single Port 10GbE Converged Network Adapter "
1590 "(TCP/IP Networking)"},
1591 {0x1077, 0x8020, 0x1077, 0x207,
1592 "8200 Series Dual Port 10GbE Converged Network Adapter "
1593 "(TCP/IP Networking)"},
1594 {0x1077, 0x8020, 0x1077, 0x20b,
1595 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
1596 {0x1077, 0x8020, 0x1077, 0x20c,
1597 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
1598 {0x1077, 0x8020, 0x1077, 0x20f,
1599 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1600 {0x1077, 0x8020, 0x103c, 0x3733,
1601 "NC523SFP 10Gb 2-port Server Adapter"},
1602 {0x1077, 0x8020, 0x103c, 0x3346,
1603 "CN1000Q Dual Port Converged Network Adapter"},
1604 {0x1077, 0x8020, 0x1077, 0x210,
1605 "QME8242-k 10GbE Dual Port Mezzanine Card"},
1606 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1607};
1608
1609#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
1610
1611static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) 1456static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1612{ 1457{
1613 if (likely(tx_ring->producer < tx_ring->sw_consumer)) 1458 if (likely(tx_ring->producer < tx_ring->sw_consumer))
@@ -1617,6 +1462,21 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1617 tx_ring->producer; 1462 tx_ring->producer;
1618} 1463}
1619 1464
1465static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
1466{
1467 writel(0, sds_ring->crb_intr_mask);
1468}
1469
1470static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
1471{
1472 struct qlcnic_adapter *adapter = sds_ring->adapter;
1473
1474 writel(0x1, sds_ring->crb_intr_mask);
1475
1476 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1477 writel(0xfbff, adapter->tgt_mask_reg);
1478}
1479
1620extern const struct ethtool_ops qlcnic_ethtool_ops; 1480extern const struct ethtool_ops qlcnic_ethtool_ops;
1621extern const struct ethtool_ops qlcnic_ethtool_failed_ops; 1481extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1622 1482
@@ -1627,10 +1487,17 @@ struct qlcnic_nic_template {
1627}; 1487};
1628 1488
1629#define QLCDB(adapter, lvl, _fmt, _args...) do { \ 1489#define QLCDB(adapter, lvl, _fmt, _args...) do { \
1630 if (NETIF_MSG_##lvl & adapter->msg_enable) \ 1490 if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
1631 printk(KERN_INFO "%s: %s: " _fmt, \ 1491 printk(KERN_INFO "%s: %s: " _fmt, \
1632 dev_name(&adapter->pdev->dev), \ 1492 dev_name(&adapter->pdev->dev), \
1633 __func__, ##_args); \ 1493 __func__, ##_args); \
1634 } while (0) 1494 } while (0)
1635 1495
1496#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
1497static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1498{
1499 unsigned short device = adapter->pdev->device;
1500 return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
1501}
1502
1636#endif /* __QLCNIC_H_ */ 1503#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 2a179d087207..58f094ca052e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -7,6 +7,18 @@
7 7
8#include "qlcnic.h" 8#include "qlcnic.h"
9 9
10static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
11{
12 int i;
13
14 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
15 if (adapter->npars[i].pci_func == pci_func)
16 return i;
17 }
18
19 return -1;
20}
21
10static u32 22static u32
11qlcnic_poll_rsp(struct qlcnic_adapter *adapter) 23qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
12{ 24{
@@ -35,7 +47,7 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
35 struct qlcnic_hardware_context *ahw = adapter->ahw; 47 struct qlcnic_hardware_context *ahw = adapter->ahw;
36 48
37 signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func, 49 signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
38 adapter->fw_hal_version); 50 adapter->ahw->fw_hal_version);
39 51
40 /* Acquire semaphore before accessing CRB */ 52 /* Acquire semaphore before accessing CRB */
41 if (qlcnic_api_lock(adapter)) { 53 if (qlcnic_api_lock(adapter)) {
@@ -103,7 +115,7 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
103 115
104} 116}
105 117
106static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size) 118static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
107{ 119{
108 uint64_t sum = 0; 120 uint64_t sum = 0;
109 int count = temp_size / sizeof(uint32_t); 121 int count = temp_size / sizeof(uint32_t);
@@ -117,9 +129,9 @@ static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
117int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) 129int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
118{ 130{
119 int err, i; 131 int err, i;
120 u16 temp_size;
121 void *tmp_addr; 132 void *tmp_addr;
122 u32 version, csum, *template, *tmp_buf; 133 u32 temp_size, version, csum, *template;
134 __le32 *tmp_buf;
123 struct qlcnic_cmd_args cmd; 135 struct qlcnic_cmd_args cmd;
124 struct qlcnic_hardware_context *ahw; 136 struct qlcnic_hardware_context *ahw;
125 struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl; 137 struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
@@ -163,13 +175,6 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
163 goto error; 175 goto error;
164 } 176 }
165 tmp_tmpl = tmp_addr; 177 tmp_tmpl = tmp_addr;
166 csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
167 if (csum) {
168 dev_err(&adapter->pdev->dev,
169 "Template header checksum validation failed\n");
170 err = -EIO;
171 goto error;
172 }
173 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); 178 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
174 if (!ahw->fw_dump.tmpl_hdr) { 179 if (!ahw->fw_dump.tmpl_hdr) {
175 err = -EIO; 180 err = -EIO;
@@ -180,6 +185,14 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
180 for (i = 0; i < temp_size/sizeof(u32); i++) 185 for (i = 0; i < temp_size/sizeof(u32); i++)
181 *template++ = __le32_to_cpu(*tmp_buf++); 186 *template++ = __le32_to_cpu(*tmp_buf++);
182 187
188 csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size);
189 if (csum) {
190 dev_err(&adapter->pdev->dev,
191 "Template header checksum validation failed\n");
192 err = -EIO;
193 goto error;
194 }
195
183 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 196 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
184 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF; 197 tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
185 ahw->fw_dump.enable = 1; 198 ahw->fw_dump.enable = 1;
@@ -231,6 +244,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
231 size_t rq_size, rsp_size; 244 size_t rq_size, rsp_size;
232 u32 cap, reg, val, reg2; 245 u32 cap, reg, val, reg2;
233 int err; 246 int err;
247 u16 temp;
234 248
235 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 249 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
236 250
@@ -267,8 +281,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
267 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) 281 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
268 cap |= QLCNIC_CAP0_LRO_MSS; 282 cap |= QLCNIC_CAP0_LRO_MSS;
269 283
270 prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx, 284 temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
271 msix_handler); 285 prq->valid_field_offset = cpu_to_le16(temp);
272 prq->txrx_sds_binding = nsds_rings - 1; 286 prq->txrx_sds_binding = nsds_rings - 1;
273 287
274 prq->capabilities[0] = cpu_to_le32(cap); 288 prq->capabilities[0] = cpu_to_le32(cap);
@@ -453,8 +467,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
453 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); 467 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
454 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp; 468 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
455 469
456 adapter->tx_context_id = 470 adapter->tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
457 le16_to_cpu(prsp->context_id);
458 } else { 471 } else {
459 dev_err(&adapter->pdev->dev, 472 dev_err(&adapter->pdev->dev,
460 "Failed to create tx ctx in firmware%d\n", err); 473 "Failed to create tx ctx in firmware%d\n", err);
@@ -476,7 +489,7 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
476 struct qlcnic_cmd_args cmd; 489 struct qlcnic_cmd_args cmd;
477 490
478 memset(&cmd, 0, sizeof(cmd)); 491 memset(&cmd, 0, sizeof(cmd));
479 cmd.req.arg1 = adapter->tx_context_id; 492 cmd.req.arg1 = adapter->tx_ring->ctx_id;
480 cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET; 493 cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
481 cmd.req.arg3 = 0; 494 cmd.req.arg3 = 0;
482 cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX; 495 cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
@@ -671,7 +684,7 @@ int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
671 err = cmd.rsp.cmd; 684 err = cmd.rsp.cmd;
672 685
673 if (err == QLCNIC_RCODE_SUCCESS) 686 if (err == QLCNIC_RCODE_SUCCESS)
674 qlcnic_fetch_mac(adapter, cmd.rsp.arg1, cmd.rsp.arg2, 0, mac); 687 qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
675 else { 688 else {
676 dev_err(&adapter->pdev->dev, 689 dev_err(&adapter->pdev->dev,
677 "Failed to get mac address%d\n", err); 690 "Failed to get mac address%d\n", err);
@@ -687,10 +700,10 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
687{ 700{
688 int err; 701 int err;
689 dma_addr_t nic_dma_t; 702 dma_addr_t nic_dma_t;
690 struct qlcnic_info *nic_info; 703 struct qlcnic_info_le *nic_info;
691 void *nic_info_addr; 704 void *nic_info_addr;
692 struct qlcnic_cmd_args cmd; 705 struct qlcnic_cmd_args cmd;
693 size_t nic_size = sizeof(struct qlcnic_info); 706 size_t nic_size = sizeof(struct qlcnic_info_le);
694 707
695 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 708 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
696 &nic_dma_t, GFP_KERNEL); 709 &nic_dma_t, GFP_KERNEL);
@@ -745,10 +758,10 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
745 dma_addr_t nic_dma_t; 758 dma_addr_t nic_dma_t;
746 void *nic_info_addr; 759 void *nic_info_addr;
747 struct qlcnic_cmd_args cmd; 760 struct qlcnic_cmd_args cmd;
748 struct qlcnic_info *nic_info; 761 struct qlcnic_info_le *nic_info;
749 size_t nic_size = sizeof(struct qlcnic_info); 762 size_t nic_size = sizeof(struct qlcnic_info_le);
750 763
751 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 764 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
752 return err; 765 return err;
753 766
754 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 767 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
@@ -796,9 +809,9 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
796 int err = 0, i; 809 int err = 0, i;
797 struct qlcnic_cmd_args cmd; 810 struct qlcnic_cmd_args cmd;
798 dma_addr_t pci_info_dma_t; 811 dma_addr_t pci_info_dma_t;
799 struct qlcnic_pci_info *npar; 812 struct qlcnic_pci_info_le *npar;
800 void *pci_info_addr; 813 void *pci_info_addr;
801 size_t npar_size = sizeof(struct qlcnic_pci_info); 814 size_t npar_size = sizeof(struct qlcnic_pci_info_le);
802 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 815 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
803 816
804 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 817 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
@@ -816,11 +829,14 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
816 qlcnic_issue_cmd(adapter, &cmd); 829 qlcnic_issue_cmd(adapter, &cmd);
817 err = cmd.rsp.cmd; 830 err = cmd.rsp.cmd;
818 831
832 adapter->ahw->act_pci_func = 0;
819 if (err == QLCNIC_RCODE_SUCCESS) { 833 if (err == QLCNIC_RCODE_SUCCESS) {
820 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) { 834 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++, npar++, pci_info++) {
821 pci_info->id = le16_to_cpu(npar->id); 835 pci_info->id = le16_to_cpu(npar->id);
822 pci_info->active = le16_to_cpu(npar->active); 836 pci_info->active = le16_to_cpu(npar->active);
823 pci_info->type = le16_to_cpu(npar->type); 837 pci_info->type = le16_to_cpu(npar->type);
838 if (pci_info->type == QLCNIC_TYPE_NIC)
839 adapter->ahw->act_pci_func++;
824 pci_info->default_port = 840 pci_info->default_port =
825 le16_to_cpu(npar->default_port); 841 le16_to_cpu(npar->default_port);
826 pci_info->tx_min_bw = 842 pci_info->tx_min_bw =
@@ -848,8 +864,8 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
848 u32 arg1; 864 u32 arg1;
849 struct qlcnic_cmd_args cmd; 865 struct qlcnic_cmd_args cmd;
850 866
851 if (adapter->op_mode != QLCNIC_MGMT_FUNC || 867 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
852 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) 868 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
853 return err; 869 return err;
854 870
855 arg1 = id | (enable_mirroring ? BIT_4 : 0); 871 arg1 = id | (enable_mirroring ? BIT_4 : 0);
@@ -877,8 +893,8 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
877int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func, 893int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
878 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 894 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
879 895
880 size_t stats_size = sizeof(struct __qlcnic_esw_statistics); 896 size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
881 struct __qlcnic_esw_statistics *stats; 897 struct qlcnic_esw_stats_le *stats;
882 dma_addr_t stats_dma_t; 898 dma_addr_t stats_dma_t;
883 void *stats_addr; 899 void *stats_addr;
884 u32 arg1; 900 u32 arg1;
@@ -888,8 +904,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
888 if (esw_stats == NULL) 904 if (esw_stats == NULL)
889 return -ENOMEM; 905 return -ENOMEM;
890 906
891 if (adapter->op_mode != QLCNIC_MGMT_FUNC && 907 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
892 func != adapter->ahw->pci_func) { 908 (func != adapter->ahw->pci_func)) {
893 dev_err(&adapter->pdev->dev, 909 dev_err(&adapter->pdev->dev,
894 "Not privilege to query stats for func=%d", func); 910 "Not privilege to query stats for func=%d", func);
895 return -EIO; 911 return -EIO;
@@ -939,9 +955,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
939int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter, 955int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
940 struct qlcnic_mac_statistics *mac_stats) 956 struct qlcnic_mac_statistics *mac_stats)
941{ 957{
942 struct qlcnic_mac_statistics *stats; 958 struct qlcnic_mac_statistics_le *stats;
943 struct qlcnic_cmd_args cmd; 959 struct qlcnic_cmd_args cmd;
944 size_t stats_size = sizeof(struct qlcnic_mac_statistics); 960 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
945 dma_addr_t stats_dma_t; 961 dma_addr_t stats_dma_t;
946 void *stats_addr; 962 void *stats_addr;
947 int err; 963 int err;
@@ -1000,7 +1016,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1000 1016
1001 if (esw_stats == NULL) 1017 if (esw_stats == NULL)
1002 return -ENOMEM; 1018 return -ENOMEM;
1003 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1019 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1004 return -EIO; 1020 return -EIO;
1005 if (adapter->npars == NULL) 1021 if (adapter->npars == NULL)
1006 return -EIO; 1022 return -EIO;
@@ -1015,12 +1031,13 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1015 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL; 1031 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
1016 esw_stats->context_id = eswitch; 1032 esw_stats->context_id = eswitch;
1017 1033
1018 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 1034 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
1019 if (adapter->npars[i].phy_port != eswitch) 1035 if (adapter->npars[i].phy_port != eswitch)
1020 continue; 1036 continue;
1021 1037
1022 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics)); 1038 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1023 if (qlcnic_get_port_stats(adapter, i, rx_tx, &port_stats)) 1039 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
1040 rx_tx, &port_stats))
1024 continue; 1041 continue;
1025 1042
1026 esw_stats->size = port_stats.size; 1043 esw_stats->size = port_stats.size;
@@ -1051,7 +1068,7 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1051 u32 arg1; 1068 u32 arg1;
1052 struct qlcnic_cmd_args cmd; 1069 struct qlcnic_cmd_args cmd;
1053 1070
1054 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1071 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1055 return -EIO; 1072 return -EIO;
1056 1073
1057 if (func_esw == QLCNIC_STATS_PORT) { 1074 if (func_esw == QLCNIC_STATS_PORT) {
@@ -1119,15 +1136,18 @@ op_type = 1 for port vlan_id
1119int qlcnic_config_switch_port(struct qlcnic_adapter *adapter, 1136int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1120 struct qlcnic_esw_func_cfg *esw_cfg) 1137 struct qlcnic_esw_func_cfg *esw_cfg)
1121{ 1138{
1122 int err = -EIO; 1139 int err = -EIO, index;
1123 u32 arg1, arg2 = 0; 1140 u32 arg1, arg2 = 0;
1124 struct qlcnic_cmd_args cmd; 1141 struct qlcnic_cmd_args cmd;
1125 u8 pci_func; 1142 u8 pci_func;
1126 1143
1127 if (adapter->op_mode != QLCNIC_MGMT_FUNC) 1144 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1128 return err; 1145 return err;
1129 pci_func = esw_cfg->pci_func; 1146 pci_func = esw_cfg->pci_func;
1130 arg1 = (adapter->npars[pci_func].phy_port & BIT_0); 1147 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1148 if (index < 0)
1149 return err;
1150 arg1 = (adapter->npars[index].phy_port & BIT_0);
1131 arg1 |= (pci_func << 8); 1151 arg1 |= (pci_func << 8);
1132 1152
1133 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1153 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
@@ -1139,7 +1159,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1139 case QLCNIC_PORT_DEFAULTS: 1159 case QLCNIC_PORT_DEFAULTS:
1140 arg1 |= (BIT_4 | BIT_6 | BIT_7); 1160 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1141 arg2 |= (BIT_0 | BIT_1); 1161 arg2 |= (BIT_0 | BIT_1);
1142 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) 1162 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1143 arg2 |= (BIT_2 | BIT_3); 1163 arg2 |= (BIT_2 | BIT_3);
1144 if (!(esw_cfg->discard_tagged)) 1164 if (!(esw_cfg->discard_tagged))
1145 arg1 &= ~BIT_4; 1165 arg1 &= ~BIT_4;
@@ -1191,11 +1211,17 @@ qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1191 struct qlcnic_esw_func_cfg *esw_cfg) 1211 struct qlcnic_esw_func_cfg *esw_cfg)
1192{ 1212{
1193 u32 arg1, arg2; 1213 u32 arg1, arg2;
1214 int index;
1194 u8 phy_port; 1215 u8 phy_port;
1195 if (adapter->op_mode == QLCNIC_MGMT_FUNC) 1216
1196 phy_port = adapter->npars[esw_cfg->pci_func].phy_port; 1217 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
1197 else 1218 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
1198 phy_port = adapter->physical_port; 1219 if (index < 0)
1220 return -EIO;
1221 phy_port = adapter->npars[index].phy_port;
1222 } else {
1223 phy_port = adapter->ahw->physical_port;
1224 }
1199 arg1 = phy_port; 1225 arg1 = phy_port;
1200 arg1 |= (esw_cfg->pci_func << 8); 1226 arg1 |= (esw_cfg->pci_func << 8);
1201 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2)) 1227 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 9e9e78a5c4d7..74b98110c5b4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -208,9 +208,9 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
208 ADVERTISED_1000baseT_Half | 208 ADVERTISED_1000baseT_Half |
209 ADVERTISED_1000baseT_Full); 209 ADVERTISED_1000baseT_Full);
210 210
211 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 211 ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
212 ecmd->duplex = adapter->link_duplex; 212 ecmd->duplex = adapter->ahw->link_duplex;
213 ecmd->autoneg = adapter->link_autoneg; 213 ecmd->autoneg = adapter->ahw->link_autoneg;
214 214
215 } else if (adapter->ahw->port_type == QLCNIC_XGBE) { 215 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
216 u32 val; 216 u32 val;
@@ -224,10 +224,10 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
224 ecmd->advertising = ADVERTISED_10000baseT_Full; 224 ecmd->advertising = ADVERTISED_10000baseT_Full;
225 } 225 }
226 226
227 if (netif_running(dev) && adapter->has_link_events) { 227 if (netif_running(dev) && adapter->ahw->has_link_events) {
228 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 228 ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
229 ecmd->autoneg = adapter->link_autoneg; 229 ecmd->autoneg = adapter->ahw->link_autoneg;
230 ecmd->duplex = adapter->link_duplex; 230 ecmd->duplex = adapter->ahw->link_duplex;
231 goto skip; 231 goto skip;
232 } 232 }
233 233
@@ -238,7 +238,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
238 return -EIO; 238 return -EIO;
239 239
240skip: 240skip:
241 ecmd->phy_address = adapter->physical_port; 241 ecmd->phy_address = adapter->ahw->physical_port;
242 ecmd->transceiver = XCVR_EXTERNAL; 242 ecmd->transceiver = XCVR_EXTERNAL;
243 243
244 switch (adapter->ahw->board_type) { 244 switch (adapter->ahw->board_type) {
@@ -254,7 +254,7 @@ skip:
254 ecmd->supported |= SUPPORTED_TP; 254 ecmd->supported |= SUPPORTED_TP;
255 ecmd->advertising |= ADVERTISED_TP; 255 ecmd->advertising |= ADVERTISED_TP;
256 ecmd->port = PORT_TP; 256 ecmd->port = PORT_TP;
257 ecmd->autoneg = adapter->link_autoneg; 257 ecmd->autoneg = adapter->ahw->link_autoneg;
258 break; 258 break;
259 case QLCNIC_BRDTYPE_P3P_IMEZ: 259 case QLCNIC_BRDTYPE_P3P_IMEZ:
260 case QLCNIC_BRDTYPE_P3P_XG_LOM: 260 case QLCNIC_BRDTYPE_P3P_XG_LOM:
@@ -270,7 +270,7 @@ skip:
270 ecmd->advertising |= ADVERTISED_TP; 270 ecmd->advertising |= ADVERTISED_TP;
271 ecmd->supported |= SUPPORTED_TP; 271 ecmd->supported |= SUPPORTED_TP;
272 check_sfp_module = netif_running(dev) && 272 check_sfp_module = netif_running(dev) &&
273 adapter->has_link_events; 273 adapter->ahw->has_link_events;
274 case QLCNIC_BRDTYPE_P3P_10G_XFP: 274 case QLCNIC_BRDTYPE_P3P_10G_XFP:
275 ecmd->supported |= SUPPORTED_FIBRE; 275 ecmd->supported |= SUPPORTED_FIBRE;
276 ecmd->advertising |= ADVERTISED_FIBRE; 276 ecmd->advertising |= ADVERTISED_FIBRE;
@@ -285,7 +285,7 @@ skip:
285 (ADVERTISED_FIBRE | ADVERTISED_TP); 285 (ADVERTISED_FIBRE | ADVERTISED_TP);
286 ecmd->port = PORT_FIBRE; 286 ecmd->port = PORT_FIBRE;
287 check_sfp_module = netif_running(dev) && 287 check_sfp_module = netif_running(dev) &&
288 adapter->has_link_events; 288 adapter->ahw->has_link_events;
289 } else { 289 } else {
290 ecmd->autoneg = AUTONEG_ENABLE; 290 ecmd->autoneg = AUTONEG_ENABLE;
291 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); 291 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
@@ -301,7 +301,7 @@ skip:
301 } 301 }
302 302
303 if (check_sfp_module) { 303 if (check_sfp_module) {
304 switch (adapter->module_type) { 304 switch (adapter->ahw->module_type) {
305 case LINKEVENT_MODULE_OPTICAL_UNKNOWN: 305 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
306 case LINKEVENT_MODULE_OPTICAL_SRLR: 306 case LINKEVENT_MODULE_OPTICAL_SRLR:
307 case LINKEVENT_MODULE_OPTICAL_LRM: 307 case LINKEVENT_MODULE_OPTICAL_LRM:
@@ -359,9 +359,9 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
359 else if (ret) 359 else if (ret)
360 return -EIO; 360 return -EIO;
361 361
362 adapter->link_speed = ethtool_cmd_speed(ecmd); 362 adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
363 adapter->link_duplex = ecmd->duplex; 363 adapter->ahw->link_duplex = ecmd->duplex;
364 adapter->link_autoneg = ecmd->autoneg; 364 adapter->ahw->link_autoneg = ecmd->autoneg;
365 365
366 if (!netif_running(dev)) 366 if (!netif_running(dev))
367 return 0; 367 return 0;
@@ -508,14 +508,15 @@ qlcnic_set_ringparam(struct net_device *dev,
508static void qlcnic_get_channels(struct net_device *dev, 508static void qlcnic_get_channels(struct net_device *dev,
509 struct ethtool_channels *channel) 509 struct ethtool_channels *channel)
510{ 510{
511 int min;
511 struct qlcnic_adapter *adapter = netdev_priv(dev); 512 struct qlcnic_adapter *adapter = netdev_priv(dev);
512 513
513 channel->max_rx = rounddown_pow_of_two(min_t(int, 514 min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
514 adapter->max_rx_ques, num_online_cpus())); 515 channel->max_rx = rounddown_pow_of_two(min);
515 channel->max_tx = adapter->max_tx_ques; 516 channel->max_tx = adapter->ahw->max_tx_ques;
516 517
517 channel->rx_count = adapter->max_sds_rings; 518 channel->rx_count = adapter->max_sds_rings;
518 channel->tx_count = adapter->max_tx_ques; 519 channel->tx_count = adapter->ahw->max_tx_ques;
519} 520}
520 521
521static int qlcnic_set_channels(struct net_device *dev, 522static int qlcnic_set_channels(struct net_device *dev,
@@ -543,7 +544,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
543 struct ethtool_pauseparam *pause) 544 struct ethtool_pauseparam *pause)
544{ 545{
545 struct qlcnic_adapter *adapter = netdev_priv(netdev); 546 struct qlcnic_adapter *adapter = netdev_priv(netdev);
546 int port = adapter->physical_port; 547 int port = adapter->ahw->physical_port;
547 __u32 val; 548 __u32 val;
548 549
549 if (adapter->ahw->port_type == QLCNIC_GBE) { 550 if (adapter->ahw->port_type == QLCNIC_GBE) {
@@ -588,7 +589,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
588 struct ethtool_pauseparam *pause) 589 struct ethtool_pauseparam *pause)
589{ 590{
590 struct qlcnic_adapter *adapter = netdev_priv(netdev); 591 struct qlcnic_adapter *adapter = netdev_priv(netdev);
591 int port = adapter->physical_port; 592 int port = adapter->ahw->physical_port;
592 __u32 val; 593 __u32 val;
593 594
594 /* read mode */ 595 /* read mode */
@@ -703,7 +704,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
703 if (ret) 704 if (ret)
704 goto clear_it; 705 goto clear_it;
705 706
706 adapter->diag_cnt = 0; 707 adapter->ahw->diag_cnt = 0;
707 memset(&cmd, 0, sizeof(cmd)); 708 memset(&cmd, 0, sizeof(cmd));
708 cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST; 709 cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
709 cmd.req.arg1 = adapter->ahw->pci_func; 710 cmd.req.arg1 = adapter->ahw->pci_func;
@@ -715,7 +716,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
715 716
716 msleep(10); 717 msleep(10);
717 718
718 ret = !adapter->diag_cnt; 719 ret = !adapter->ahw->diag_cnt;
719 720
720done: 721done:
721 qlcnic_diag_free_res(netdev, max_sds_rings); 722 qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -761,7 +762,7 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
761 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); 762 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
762 skb_put(skb, QLCNIC_ILB_PKT_SIZE); 763 skb_put(skb, QLCNIC_ILB_PKT_SIZE);
763 764
764 adapter->diag_cnt = 0; 765 adapter->ahw->diag_cnt = 0;
765 qlcnic_xmit_frame(skb, adapter->netdev); 766 qlcnic_xmit_frame(skb, adapter->netdev);
766 767
767 loop = 0; 768 loop = 0;
@@ -770,11 +771,11 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
770 qlcnic_process_rcv_ring_diag(sds_ring); 771 qlcnic_process_rcv_ring_diag(sds_ring);
771 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) 772 if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP)
772 break; 773 break;
773 } while (!adapter->diag_cnt); 774 } while (!adapter->ahw->diag_cnt);
774 775
775 dev_kfree_skb_any(skb); 776 dev_kfree_skb_any(skb);
776 777
777 if (!adapter->diag_cnt) 778 if (!adapter->ahw->diag_cnt)
778 QLCDB(adapter, DRV, 779 QLCDB(adapter, DRV,
779 "LB Test: packet #%d was not received\n", i + 1); 780 "LB Test: packet #%d was not received\n", i + 1);
780 else 781 else
@@ -800,14 +801,15 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
800 int loop = 0; 801 int loop = 0;
801 int ret; 802 int ret;
802 803
803 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) { 804 if (!(adapter->ahw->capabilities &
805 QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
804 netdev_info(netdev, "Firmware is not loopback test capable\n"); 806 netdev_info(netdev, "Firmware is not loopback test capable\n");
805 return -EOPNOTSUPP; 807 return -EOPNOTSUPP;
806 } 808 }
807 809
808 QLCDB(adapter, DRV, "%s loopback test in progress\n", 810 QLCDB(adapter, DRV, "%s loopback test in progress\n",
809 mode == QLCNIC_ILB_MODE ? "internal" : "external"); 811 mode == QLCNIC_ILB_MODE ? "internal" : "external");
810 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { 812 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
811 netdev_warn(netdev, "Loopback test not supported for non " 813 netdev_warn(netdev, "Loopback test not supported for non "
812 "privilege function\n"); 814 "privilege function\n");
813 return 0; 815 return 0;
@@ -826,7 +828,7 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
826 if (ret) 828 if (ret)
827 goto free_res; 829 goto free_res;
828 830
829 adapter->diag_cnt = 0; 831 adapter->ahw->diag_cnt = 0;
830 do { 832 do {
831 msleep(500); 833 msleep(500);
832 qlcnic_process_rcv_ring_diag(sds_ring); 834 qlcnic_process_rcv_ring_diag(sds_ring);
@@ -835,8 +837,8 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
835 " configure request\n"); 837 " configure request\n");
836 ret = -QLCNIC_FW_NOT_RESPOND; 838 ret = -QLCNIC_FW_NOT_RESPOND;
837 goto free_res; 839 goto free_res;
838 } else if (adapter->diag_cnt) { 840 } else if (adapter->ahw->diag_cnt) {
839 ret = adapter->diag_cnt; 841 ret = adapter->ahw->diag_cnt;
840 goto free_res; 842 goto free_res;
841 } 843 }
842 } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state)); 844 } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
@@ -1028,7 +1030,7 @@ static int qlcnic_set_led(struct net_device *dev,
1028 int max_sds_rings = adapter->max_sds_rings; 1030 int max_sds_rings = adapter->max_sds_rings;
1029 int err = -EIO, active = 1; 1031 int err = -EIO, active = 1;
1030 1032
1031 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { 1033 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1032 netdev_warn(dev, "LED test not supported for non " 1034 netdev_warn(dev, "LED test not supported for non "
1033 "privilege function\n"); 1035 "privilege function\n");
1034 return -EOPNOTSUPP; 1036 return -EOPNOTSUPP;
@@ -1207,14 +1209,14 @@ static u32 qlcnic_get_msglevel(struct net_device *netdev)
1207{ 1209{
1208 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1210 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1209 1211
1210 return adapter->msg_enable; 1212 return adapter->ahw->msg_enable;
1211} 1213}
1212 1214
1213static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl) 1215static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
1214{ 1216{
1215 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1217 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1216 1218
1217 adapter->msg_enable = msglvl; 1219 adapter->ahw->msg_enable = msglvl;
1218} 1220}
1219 1221
1220static int 1222static int
@@ -1247,7 +1249,8 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1247 void *buffer) 1249 void *buffer)
1248{ 1250{
1249 int i, copy_sz; 1251 int i, copy_sz;
1250 u32 *hdr_ptr, *data; 1252 u32 *hdr_ptr;
1253 __le32 *data;
1251 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1254 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1252 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1255 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1253 1256
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 28a6b28192e3..49cc1ac4f057 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -792,22 +792,6 @@ static const u32 MIU_TEST_READ_DATA[] = {
792#define QLCNIC_FLASH_SEM2_ULK 0x0013C014 792#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
793#define QLCNIC_FLASH_LOCK_ID 0x001B2100 793#define QLCNIC_FLASH_LOCK_ID 0x001B2100
794 794
795#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \
796 writel((addr & 0xFFFF0000), (void *) (bar0 + \
797 QLCNIC_FW_DUMP_REG1)); \
798 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
799 *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \
800 LSW(addr))); \
801} while (0)
802
803#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \
804 writel((addr & 0xFFFF0000), (void *) (bar0 + \
805 QLCNIC_FW_DUMP_REG1)); \
806 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
807 writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\
808 readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \
809} while (0)
810
811/* PCI function operational mode */ 795/* PCI function operational mode */
812enum { 796enum {
813 QLCNIC_MGMT_FUNC = 0, 797 QLCNIC_MGMT_FUNC = 0,
@@ -832,55 +816,63 @@ enum {
832#define LSD(x) ((uint32_t)((uint64_t)(x))) 816#define LSD(x) ((uint32_t)((uint64_t)(x)))
833#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) 817#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
834 818
819#define QLCNIC_MS_CTRL 0x41000090
820#define QLCNIC_MS_ADDR_LO 0x41000094
821#define QLCNIC_MS_ADDR_HI 0x41000098
822#define QLCNIC_MS_WRTDATA_LO 0x410000A0
823#define QLCNIC_MS_WRTDATA_HI 0x410000A4
824#define QLCNIC_MS_WRTDATA_ULO 0x410000B0
825#define QLCNIC_MS_WRTDATA_UHI 0x410000B4
826#define QLCNIC_MS_RDDATA_LO 0x410000A8
827#define QLCNIC_MS_RDDATA_HI 0x410000AC
828#define QLCNIC_MS_RDDATA_ULO 0x410000B8
829#define QLCNIC_MS_RDDATA_UHI 0x410000BC
830
831#define QLCNIC_TA_WRITE_ENABLE (TA_CTL_ENABLE | TA_CTL_WRITE)
832#define QLCNIC_TA_WRITE_START (TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE)
833#define QLCNIC_TA_START_ENABLE (TA_CTL_START | TA_CTL_ENABLE)
834
835#define QLCNIC_LEGACY_INTR_CONFIG \ 835#define QLCNIC_LEGACY_INTR_CONFIG \
836{ \ 836{ \
837 { \ 837 { \
838 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \ 838 .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
839 .tgt_status_reg = ISR_INT_TARGET_STATUS, \ 839 .tgt_status_reg = ISR_INT_TARGET_STATUS, \
840 .tgt_mask_reg = ISR_INT_TARGET_MASK, \ 840 .tgt_mask_reg = ISR_INT_TARGET_MASK, }, \
841 .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
842 \ 841 \
843 { \ 842 { \
844 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \ 843 .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
845 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \ 844 .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
846 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \ 845 .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, }, \
847 .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
848 \ 846 \
849 { \ 847 { \
850 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \ 848 .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
851 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \ 849 .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
852 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \ 850 .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, }, \
853 .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
854 \ 851 \
855 { \ 852 { \
856 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \ 853 .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
857 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \ 854 .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
858 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \ 855 .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, }, \
859 .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
860 \ 856 \
861 { \ 857 { \
862 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \ 858 .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
863 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \ 859 .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
864 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \ 860 .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, }, \
865 .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
866 \ 861 \
867 { \ 862 { \
868 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \ 863 .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
869 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \ 864 .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
870 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \ 865 .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, }, \
871 .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
872 \ 866 \
873 { \ 867 { \
874 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \ 868 .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
875 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \ 869 .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
876 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \ 870 .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, }, \
877 .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
878 \ 871 \
879 { \ 872 { \
880 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \ 873 .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
881 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \ 874 .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
882 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \ 875 .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, }, \
883 .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
884} 876}
885 877
886/* NIU REGS */ 878/* NIU REGS */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 2a0c9dc48eb3..fc48e000f35f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include "qlcnic.h" 8#include "qlcnic.h"
9#include "qlcnic_hdr.h"
9 10
10#include <linux/slab.h> 11#include <linux/slab.h>
11#include <net/ip.h> 12#include <net/ip.h>
@@ -22,6 +23,15 @@
22#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) 23#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
23#define CRB_INDIRECT_2M (0x1e0000UL) 24#define CRB_INDIRECT_2M (0x1e0000UL)
24 25
26struct qlcnic_ms_reg_ctrl {
27 u32 ocm_window;
28 u32 control;
29 u32 hi;
30 u32 low;
31 u32 rd[4];
32 u32 wd[4];
33 u64 off;
34};
25 35
26#ifndef readq 36#ifndef readq
27static inline u64 readq(void __iomem *addr) 37static inline u64 readq(void __iomem *addr)
@@ -266,10 +276,44 @@ static const unsigned crb_hub_agt[64] = {
266 0, 276 0,
267}; 277};
268 278
279static const u32 msi_tgt_status[8] = {
280 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
281 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
282 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
283 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
284};
285
269/* PCI Windowing for DDR regions. */ 286/* PCI Windowing for DDR regions. */
270 287
271#define QLCNIC_PCIE_SEM_TIMEOUT 10000 288#define QLCNIC_PCIE_SEM_TIMEOUT 10000
272 289
290static void qlcnic_read_window_reg(u32 addr, void __iomem *bar0, u32 *data)
291{
292 u32 dest;
293 void __iomem *val;
294
295 dest = addr & 0xFFFF0000;
296 val = bar0 + QLCNIC_FW_DUMP_REG1;
297 writel(dest, val);
298 readl(val);
299 val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
300 *data = readl(val);
301}
302
303static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
304{
305 u32 dest;
306 void __iomem *val;
307
308 dest = addr & 0xFFFF0000;
309 val = bar0 + QLCNIC_FW_DUMP_REG1;
310 writel(dest, val);
311 readl(val);
312 val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
313 writel(data, val);
314 readl(val);
315}
316
273int 317int
274qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) 318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
275{ 319{
@@ -300,6 +344,23 @@ qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
300 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem))); 344 QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
301} 345}
302 346
347static int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
348{
349 u32 data;
350
351 if (qlcnic_82xx_check(adapter))
352 qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
353 else
354 return -EIO;
355 return data;
356}
357
358static void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
359{
360 if (qlcnic_82xx_check(adapter))
361 qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
362}
363
303static int 364static int
304qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter, 365qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
305 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) 366 struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
@@ -350,7 +411,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
350 411
351 tx_ring->producer = producer; 412 tx_ring->producer = producer;
352 413
353 qlcnic_update_cmd_producer(adapter, tx_ring); 414 qlcnic_update_cmd_producer(tx_ring);
354 415
355 __netif_tx_unlock_bh(tx_ring->txq); 416 __netif_tx_unlock_bh(tx_ring->txq);
356 417
@@ -434,7 +495,7 @@ void qlcnic_set_multi(struct net_device *netdev)
434 } 495 }
435 496
436 if ((netdev->flags & IFF_ALLMULTI) || 497 if ((netdev->flags & IFF_ALLMULTI) ||
437 (netdev_mc_count(netdev) > adapter->max_mc_count)) { 498 (netdev_mc_count(netdev) > adapter->ahw->max_mc_count)) {
438 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 499 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
439 goto send_fw_cmd; 500 goto send_fw_cmd;
440 } 501 }
@@ -540,7 +601,7 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
540 } 601 }
541} 602}
542 603
543int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag) 604static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
544{ 605{
545 struct qlcnic_nic_req req; 606 struct qlcnic_nic_req req;
546 int rv; 607 int rv;
@@ -863,9 +924,8 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
863 * 0 if no window access is needed. 'off' is set to 2M addr 924 * 0 if no window access is needed. 'off' is set to 2M addr
864 * In: 'off' is offset from base in 128M pci map 925 * In: 'off' is offset from base in 128M pci map
865 */ 926 */
866static int 927static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_hardware_context *ahw,
867qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter, 928 ulong off, void __iomem **addr)
868 ulong off, void __iomem **addr)
869{ 929{
870 const struct crb_128M_2M_sub_block_map *m; 930 const struct crb_128M_2M_sub_block_map *m;
871 931
@@ -880,7 +940,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
880 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)]; 940 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
881 941
882 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) { 942 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
883 *addr = adapter->ahw->pci_base0 + m->start_2M + 943 *addr = ahw->pci_base0 + m->start_2M +
884 (off - m->start_128M); 944 (off - m->start_128M);
885 return 0; 945 return 0;
886 } 946 }
@@ -888,7 +948,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
888 /* 948 /*
889 * Not in direct map, use crb window 949 * Not in direct map, use crb window
890 */ 950 */
891 *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16)); 951 *addr = ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
892 return 1; 952 return 1;
893} 953}
894 954
@@ -929,7 +989,7 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
929 int rv; 989 int rv;
930 void __iomem *addr = NULL; 990 void __iomem *addr = NULL;
931 991
932 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); 992 rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
933 993
934 if (rv == 0) { 994 if (rv == 0) {
935 writel(data, addr); 995 writel(data, addr);
@@ -954,15 +1014,14 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
954 return -EIO; 1014 return -EIO;
955} 1015}
956 1016
957u32 1017int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
958qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
959{ 1018{
960 unsigned long flags; 1019 unsigned long flags;
961 int rv; 1020 int rv;
962 u32 data = -1; 1021 u32 data = -1;
963 void __iomem *addr = NULL; 1022 void __iomem *addr = NULL;
964 1023
965 rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr); 1024 rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
966 1025
967 if (rv == 0) 1026 if (rv == 0)
968 return readl(addr); 1027 return readl(addr);
@@ -985,46 +1044,28 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
985} 1044}
986 1045
987 1046
988void __iomem * 1047void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
989qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset) 1048 u32 offset)
990{ 1049{
991 void __iomem *addr = NULL; 1050 void __iomem *addr = NULL;
992 1051
993 WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr)); 1052 WARN_ON(qlcnic_pci_get_crb_addr_2M(ahw, offset, &addr));
994 1053
995 return addr; 1054 return addr;
996} 1055}
997 1056
998 1057static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
999static int 1058 u32 window, u64 off, u64 *data, int op)
1000qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
1001 u64 addr, u32 *start)
1002{
1003 u32 window;
1004
1005 window = OCM_WIN_P3P(addr);
1006
1007 writel(window, adapter->ahw->ocm_win_crb);
1008 /* read back to flush */
1009 readl(adapter->ahw->ocm_win_crb);
1010
1011 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
1012 return 0;
1013}
1014
1015static int
1016qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
1017 u64 *data, int op)
1018{ 1059{
1019 void __iomem *addr; 1060 void __iomem *addr;
1020 int ret;
1021 u32 start; 1061 u32 start;
1022 1062
1023 mutex_lock(&adapter->ahw->mem_lock); 1063 mutex_lock(&adapter->ahw->mem_lock);
1024 1064
1025 ret = qlcnic_pci_set_window_2M(adapter, off, &start); 1065 writel(window, adapter->ahw->ocm_win_crb);
1026 if (ret != 0) 1066 /* read back to flush */
1027 goto unlock; 1067 readl(adapter->ahw->ocm_win_crb);
1068 start = QLCNIC_PCI_OCM0_2M + off;
1028 1069
1029 addr = adapter->ahw->pci_base0 + start; 1070 addr = adapter->ahw->pci_base0 + start;
1030 1071
@@ -1033,10 +1074,12 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
1033 else /* write */ 1074 else /* write */
1034 writeq(*data, addr); 1075 writeq(*data, addr);
1035 1076
1036unlock: 1077 /* Set window to 0 */
1037 mutex_unlock(&adapter->ahw->mem_lock); 1078 writel(0, adapter->ahw->ocm_win_crb);
1079 readl(adapter->ahw->ocm_win_crb);
1038 1080
1039 return ret; 1081 mutex_unlock(&adapter->ahw->mem_lock);
1082 return 0;
1040} 1083}
1041 1084
1042void 1085void
@@ -1061,54 +1104,74 @@ qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
1061 mutex_unlock(&adapter->ahw->mem_lock); 1104 mutex_unlock(&adapter->ahw->mem_lock);
1062} 1105}
1063 1106
1064#define MAX_CTL_CHECK 1000
1065 1107
1066int 1108
1067qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, 1109/* Set MS memory control data for different adapters */
1068 u64 off, u64 data) 1110static void qlcnic_set_ms_controls(struct qlcnic_adapter *adapter, u64 off,
1111 struct qlcnic_ms_reg_ctrl *ms)
1112{
1113 ms->control = QLCNIC_MS_CTRL;
1114 ms->low = QLCNIC_MS_ADDR_LO;
1115 ms->hi = QLCNIC_MS_ADDR_HI;
1116 if (off & 0xf) {
1117 ms->wd[0] = QLCNIC_MS_WRTDATA_LO;
1118 ms->rd[0] = QLCNIC_MS_RDDATA_LO;
1119 ms->wd[1] = QLCNIC_MS_WRTDATA_HI;
1120 ms->rd[1] = QLCNIC_MS_RDDATA_HI;
1121 ms->wd[2] = QLCNIC_MS_WRTDATA_ULO;
1122 ms->wd[3] = QLCNIC_MS_WRTDATA_UHI;
1123 ms->rd[2] = QLCNIC_MS_RDDATA_ULO;
1124 ms->rd[3] = QLCNIC_MS_RDDATA_UHI;
1125 } else {
1126 ms->wd[0] = QLCNIC_MS_WRTDATA_ULO;
1127 ms->rd[0] = QLCNIC_MS_RDDATA_ULO;
1128 ms->wd[1] = QLCNIC_MS_WRTDATA_UHI;
1129 ms->rd[1] = QLCNIC_MS_RDDATA_UHI;
1130 ms->wd[2] = QLCNIC_MS_WRTDATA_LO;
1131 ms->wd[3] = QLCNIC_MS_WRTDATA_HI;
1132 ms->rd[2] = QLCNIC_MS_RDDATA_LO;
1133 ms->rd[3] = QLCNIC_MS_RDDATA_HI;
1134 }
1135
1136 ms->ocm_window = OCM_WIN_P3P(off);
1137 ms->off = GET_MEM_OFFS_2M(off);
1138}
1139
1140int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
1069{ 1141{
1070 int i, j, ret; 1142 int j, ret = 0;
1071 u32 temp, off8; 1143 u32 temp, off8;
1072 void __iomem *mem_crb; 1144 struct qlcnic_ms_reg_ctrl ms;
1073 1145
1074 /* Only 64-bit aligned access */ 1146 /* Only 64-bit aligned access */
1075 if (off & 7) 1147 if (off & 7)
1076 return -EIO; 1148 return -EIO;
1077 1149
1078 /* P3 onward, test agent base for MIU and SIU is same */ 1150 memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
1079 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, 1151 if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1080 QLCNIC_ADDR_QDR_NET_MAX)) { 1152 QLCNIC_ADDR_QDR_NET_MAX) ||
1081 mem_crb = qlcnic_get_ioaddr(adapter, 1153 ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
1082 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); 1154 QLCNIC_ADDR_DDR_NET_MAX)))
1083 goto correct; 1155 return -EIO;
1084 }
1085 1156
1086 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) { 1157 qlcnic_set_ms_controls(adapter, off, &ms);
1087 mem_crb = qlcnic_get_ioaddr(adapter,
1088 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1089 goto correct;
1090 }
1091 1158
1092 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) 1159 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
1093 return qlcnic_pci_mem_access_direct(adapter, off, &data, 1); 1160 return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
1094 1161 ms.off, &data, 1);
1095 return -EIO;
1096 1162
1097correct:
1098 off8 = off & ~0xf; 1163 off8 = off & ~0xf;
1099 1164
1100 mutex_lock(&adapter->ahw->mem_lock); 1165 mutex_lock(&adapter->ahw->mem_lock);
1101 1166
1102 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); 1167 qlcnic_ind_wr(adapter, ms.low, off8);
1103 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 1168 qlcnic_ind_wr(adapter, ms.hi, 0);
1104 1169
1105 i = 0; 1170 qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
1106 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); 1171 qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
1107 writel((TA_CTL_START | TA_CTL_ENABLE),
1108 (mem_crb + TEST_AGT_CTRL));
1109 1172
1110 for (j = 0; j < MAX_CTL_CHECK; j++) { 1173 for (j = 0; j < MAX_CTL_CHECK; j++) {
1111 temp = readl(mem_crb + TEST_AGT_CTRL); 1174 temp = qlcnic_ind_rd(adapter, ms.control);
1112 if ((temp & TA_CTL_BUSY) == 0) 1175 if ((temp & TA_CTL_BUSY) == 0)
1113 break; 1176 break;
1114 } 1177 }
@@ -1118,24 +1181,18 @@ correct:
1118 goto done; 1181 goto done;
1119 } 1182 }
1120 1183
1121 i = (off & 0xf) ? 0 : 2; 1184 /* This is the modify part of read-modify-write */
1122 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), 1185 qlcnic_ind_wr(adapter, ms.wd[0], qlcnic_ind_rd(adapter, ms.rd[0]));
1123 mem_crb + MIU_TEST_AGT_WRDATA(i)); 1186 qlcnic_ind_wr(adapter, ms.wd[1], qlcnic_ind_rd(adapter, ms.rd[1]));
1124 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)), 1187 /* This is the write part of read-modify-write */
1125 mem_crb + MIU_TEST_AGT_WRDATA(i+1)); 1188 qlcnic_ind_wr(adapter, ms.wd[2], data & 0xffffffff);
1126 i = (off & 0xf) ? 2 : 0; 1189 qlcnic_ind_wr(adapter, ms.wd[3], (data >> 32) & 0xffffffff);
1127
1128 writel(data & 0xffffffff,
1129 mem_crb + MIU_TEST_AGT_WRDATA(i));
1130 writel((data >> 32) & 0xffffffff,
1131 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1132 1190
1133 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); 1191 qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_ENABLE);
1134 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), 1192 qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_START);
1135 (mem_crb + TEST_AGT_CTRL));
1136 1193
1137 for (j = 0; j < MAX_CTL_CHECK; j++) { 1194 for (j = 0; j < MAX_CTL_CHECK; j++) {
1138 temp = readl(mem_crb + TEST_AGT_CTRL); 1195 temp = qlcnic_ind_rd(adapter, ms.control);
1139 if ((temp & TA_CTL_BUSY) == 0) 1196 if ((temp & TA_CTL_BUSY) == 0)
1140 break; 1197 break;
1141 } 1198 }
@@ -1154,52 +1211,41 @@ done:
1154 return ret; 1211 return ret;
1155} 1212}
1156 1213
1157int 1214int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
1158qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1159 u64 off, u64 *data)
1160{ 1215{
1161 int j, ret; 1216 int j, ret;
1162 u32 temp, off8; 1217 u32 temp, off8;
1163 u64 val; 1218 u64 val;
1164 void __iomem *mem_crb; 1219 struct qlcnic_ms_reg_ctrl ms;
1165 1220
1166 /* Only 64-bit aligned access */ 1221 /* Only 64-bit aligned access */
1167 if (off & 7) 1222 if (off & 7)
1168 return -EIO; 1223 return -EIO;
1224 if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1225 QLCNIC_ADDR_QDR_NET_MAX) ||
1226 ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
1227 QLCNIC_ADDR_DDR_NET_MAX)))
1228 return -EIO;
1169 1229
1170 /* P3 onward, test agent base for MIU and SIU is same */ 1230 memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
1171 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, 1231 qlcnic_set_ms_controls(adapter, off, &ms);
1172 QLCNIC_ADDR_QDR_NET_MAX)) {
1173 mem_crb = qlcnic_get_ioaddr(adapter,
1174 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1175 goto correct;
1176 }
1177
1178 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
1179 mem_crb = qlcnic_get_ioaddr(adapter,
1180 QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1181 goto correct;
1182 }
1183 1232
1184 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) { 1233 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
1185 return qlcnic_pci_mem_access_direct(adapter, 1234 return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
1186 off, data, 0); 1235 ms.off, data, 0);
1187 }
1188 1236
1189 return -EIO; 1237 mutex_lock(&adapter->ahw->mem_lock);
1190 1238
1191correct:
1192 off8 = off & ~0xf; 1239 off8 = off & ~0xf;
1193 1240
1194 mutex_lock(&adapter->ahw->mem_lock); 1241 qlcnic_ind_wr(adapter, ms.low, off8);
1242 qlcnic_ind_wr(adapter, ms.hi, 0);
1195 1243
1196 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); 1244 qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
1197 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 1245 qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
1198 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1199 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1200 1246
1201 for (j = 0; j < MAX_CTL_CHECK; j++) { 1247 for (j = 0; j < MAX_CTL_CHECK; j++) {
1202 temp = readl(mem_crb + TEST_AGT_CTRL); 1248 temp = qlcnic_ind_rd(adapter, ms.control);
1203 if ((temp & TA_CTL_BUSY) == 0) 1249 if ((temp & TA_CTL_BUSY) == 0)
1204 break; 1250 break;
1205 } 1251 }
@@ -1210,13 +1256,10 @@ correct:
1210 "failed to read through agent\n"); 1256 "failed to read through agent\n");
1211 ret = -EIO; 1257 ret = -EIO;
1212 } else { 1258 } else {
1213 off8 = MIU_TEST_AGT_RDDATA_LO;
1214 if (off & 0xf)
1215 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1216 1259
1217 temp = readl(mem_crb + off8 + 4); 1260 temp = qlcnic_ind_rd(adapter, ms.rd[3]);
1218 val = (u64)temp << 32; 1261 val = (u64)temp << 32;
1219 val |= readl(mem_crb + off8); 1262 val |= qlcnic_ind_rd(adapter, ms.rd[2]);
1220 *data = val; 1263 *data = val;
1221 ret = 0; 1264 ret = 0;
1222 } 1265 }
@@ -1320,469 +1363,3 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1320 1363
1321 return rv; 1364 return rv;
1322} 1365}
1323
1324/* FW dump related functions */
1325static u32
1326qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1327 u32 *buffer)
1328{
1329 int i;
1330 u32 addr, data;
1331 struct __crb *crb = &entry->region.crb;
1332 void __iomem *base = adapter->ahw->pci_base0;
1333
1334 addr = crb->addr;
1335
1336 for (i = 0; i < crb->no_ops; i++) {
1337 QLCNIC_RD_DUMP_REG(addr, base, &data);
1338 *buffer++ = cpu_to_le32(addr);
1339 *buffer++ = cpu_to_le32(data);
1340 addr += crb->stride;
1341 }
1342 return crb->no_ops * 2 * sizeof(u32);
1343}
1344
1345static u32
1346qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
1347 struct qlcnic_dump_entry *entry, u32 *buffer)
1348{
1349 int i, k, timeout = 0;
1350 void __iomem *base = adapter->ahw->pci_base0;
1351 u32 addr, data;
1352 u8 opcode, no_ops;
1353 struct __ctrl *ctr = &entry->region.ctrl;
1354 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
1355
1356 addr = ctr->addr;
1357 no_ops = ctr->no_ops;
1358
1359 for (i = 0; i < no_ops; i++) {
1360 k = 0;
1361 opcode = 0;
1362 for (k = 0; k < 8; k++) {
1363 if (!(ctr->opcode & (1 << k)))
1364 continue;
1365 switch (1 << k) {
1366 case QLCNIC_DUMP_WCRB:
1367 QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
1368 break;
1369 case QLCNIC_DUMP_RWCRB:
1370 QLCNIC_RD_DUMP_REG(addr, base, &data);
1371 QLCNIC_WR_DUMP_REG(addr, base, data);
1372 break;
1373 case QLCNIC_DUMP_ANDCRB:
1374 QLCNIC_RD_DUMP_REG(addr, base, &data);
1375 QLCNIC_WR_DUMP_REG(addr, base,
1376 (data & ctr->val2));
1377 break;
1378 case QLCNIC_DUMP_ORCRB:
1379 QLCNIC_RD_DUMP_REG(addr, base, &data);
1380 QLCNIC_WR_DUMP_REG(addr, base,
1381 (data | ctr->val3));
1382 break;
1383 case QLCNIC_DUMP_POLLCRB:
1384 while (timeout <= ctr->timeout) {
1385 QLCNIC_RD_DUMP_REG(addr, base, &data);
1386 if ((data & ctr->val2) == ctr->val1)
1387 break;
1388 msleep(1);
1389 timeout++;
1390 }
1391 if (timeout > ctr->timeout) {
1392 dev_info(&adapter->pdev->dev,
1393 "Timed out, aborting poll CRB\n");
1394 return -EINVAL;
1395 }
1396 break;
1397 case QLCNIC_DUMP_RD_SAVE:
1398 if (ctr->index_a)
1399 addr = t_hdr->saved_state[ctr->index_a];
1400 QLCNIC_RD_DUMP_REG(addr, base, &data);
1401 t_hdr->saved_state[ctr->index_v] = data;
1402 break;
1403 case QLCNIC_DUMP_WRT_SAVED:
1404 if (ctr->index_v)
1405 data = t_hdr->saved_state[ctr->index_v];
1406 else
1407 data = ctr->val1;
1408 if (ctr->index_a)
1409 addr = t_hdr->saved_state[ctr->index_a];
1410 QLCNIC_WR_DUMP_REG(addr, base, data);
1411 break;
1412 case QLCNIC_DUMP_MOD_SAVE_ST:
1413 data = t_hdr->saved_state[ctr->index_v];
1414 data <<= ctr->shl_val;
1415 data >>= ctr->shr_val;
1416 if (ctr->val2)
1417 data &= ctr->val2;
1418 data |= ctr->val3;
1419 data += ctr->val1;
1420 t_hdr->saved_state[ctr->index_v] = data;
1421 break;
1422 default:
1423 dev_info(&adapter->pdev->dev,
1424 "Unknown opcode\n");
1425 break;
1426 }
1427 }
1428 addr += ctr->stride;
1429 }
1430 return 0;
1431}
1432
1433static u32
1434qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1435 u32 *buffer)
1436{
1437 int loop;
1438 u32 val, data = 0;
1439 struct __mux *mux = &entry->region.mux;
1440 void __iomem *base = adapter->ahw->pci_base0;
1441
1442 val = mux->val;
1443 for (loop = 0; loop < mux->no_ops; loop++) {
1444 QLCNIC_WR_DUMP_REG(mux->addr, base, val);
1445 QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
1446 *buffer++ = cpu_to_le32(val);
1447 *buffer++ = cpu_to_le32(data);
1448 val += mux->val_stride;
1449 }
1450 return 2 * mux->no_ops * sizeof(u32);
1451}
1452
1453static u32
1454qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1455 u32 *buffer)
1456{
1457 int i, loop;
1458 u32 cnt, addr, data, que_id = 0;
1459 void __iomem *base = adapter->ahw->pci_base0;
1460 struct __queue *que = &entry->region.que;
1461
1462 addr = que->read_addr;
1463 cnt = que->read_addr_cnt;
1464
1465 for (loop = 0; loop < que->no_ops; loop++) {
1466 QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
1467 addr = que->read_addr;
1468 for (i = 0; i < cnt; i++) {
1469 QLCNIC_RD_DUMP_REG(addr, base, &data);
1470 *buffer++ = cpu_to_le32(data);
1471 addr += que->read_addr_stride;
1472 }
1473 que_id += que->stride;
1474 }
1475 return que->no_ops * cnt * sizeof(u32);
1476}
1477
1478static u32
1479qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1480 u32 *buffer)
1481{
1482 int i;
1483 u32 data;
1484 void __iomem *addr;
1485 struct __ocm *ocm = &entry->region.ocm;
1486
1487 addr = adapter->ahw->pci_base0 + ocm->read_addr;
1488 for (i = 0; i < ocm->no_ops; i++) {
1489 data = readl(addr);
1490 *buffer++ = cpu_to_le32(data);
1491 addr += ocm->read_addr_stride;
1492 }
1493 return ocm->no_ops * sizeof(u32);
1494}
1495
1496static u32
1497qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1498 u32 *buffer)
1499{
1500 int i, count = 0;
1501 u32 fl_addr, size, val, lck_val, addr;
1502 struct __mem *rom = &entry->region.mem;
1503 void __iomem *base = adapter->ahw->pci_base0;
1504
1505 fl_addr = rom->addr;
1506 size = rom->size/4;
1507lock_try:
1508 lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
1509 if (!lck_val && count < MAX_CTL_CHECK) {
1510 msleep(10);
1511 count++;
1512 goto lock_try;
1513 }
1514 writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
1515 for (i = 0; i < size; i++) {
1516 addr = fl_addr & 0xFFFF0000;
1517 QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
1518 addr = LSW(fl_addr) + FLASH_ROM_DATA;
1519 QLCNIC_RD_DUMP_REG(addr, base, &val);
1520 fl_addr += 4;
1521 *buffer++ = cpu_to_le32(val);
1522 }
1523 readl(base + QLCNIC_FLASH_SEM2_ULK);
1524 return rom->size;
1525}
1526
1527static u32
1528qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
1529 struct qlcnic_dump_entry *entry, u32 *buffer)
1530{
1531 int i;
1532 u32 cnt, val, data, addr;
1533 void __iomem *base = adapter->ahw->pci_base0;
1534 struct __cache *l1 = &entry->region.cache;
1535
1536 val = l1->init_tag_val;
1537
1538 for (i = 0; i < l1->no_ops; i++) {
1539 QLCNIC_WR_DUMP_REG(l1->addr, base, val);
1540 QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
1541 addr = l1->read_addr;
1542 cnt = l1->read_addr_num;
1543 while (cnt) {
1544 QLCNIC_RD_DUMP_REG(addr, base, &data);
1545 *buffer++ = cpu_to_le32(data);
1546 addr += l1->read_addr_stride;
1547 cnt--;
1548 }
1549 val += l1->stride;
1550 }
1551 return l1->no_ops * l1->read_addr_num * sizeof(u32);
1552}
1553
1554static u32
1555qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
1556 struct qlcnic_dump_entry *entry, u32 *buffer)
1557{
1558 int i;
1559 u32 cnt, val, data, addr;
1560 u8 poll_mask, poll_to, time_out = 0;
1561 void __iomem *base = adapter->ahw->pci_base0;
1562 struct __cache *l2 = &entry->region.cache;
1563
1564 val = l2->init_tag_val;
1565 poll_mask = LSB(MSW(l2->ctrl_val));
1566 poll_to = MSB(MSW(l2->ctrl_val));
1567
1568 for (i = 0; i < l2->no_ops; i++) {
1569 QLCNIC_WR_DUMP_REG(l2->addr, base, val);
1570 if (LSW(l2->ctrl_val))
1571 QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
1572 LSW(l2->ctrl_val));
1573 if (!poll_mask)
1574 goto skip_poll;
1575 do {
1576 QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
1577 if (!(data & poll_mask))
1578 break;
1579 msleep(1);
1580 time_out++;
1581 } while (time_out <= poll_to);
1582
1583 if (time_out > poll_to) {
1584 dev_err(&adapter->pdev->dev,
1585 "Timeout exceeded in %s, aborting dump\n",
1586 __func__);
1587 return -EINVAL;
1588 }
1589skip_poll:
1590 addr = l2->read_addr;
1591 cnt = l2->read_addr_num;
1592 while (cnt) {
1593 QLCNIC_RD_DUMP_REG(addr, base, &data);
1594 *buffer++ = cpu_to_le32(data);
1595 addr += l2->read_addr_stride;
1596 cnt--;
1597 }
1598 val += l2->stride;
1599 }
1600 return l2->no_ops * l2->read_addr_num * sizeof(u32);
1601}
1602
1603static u32
1604qlcnic_read_memory(struct qlcnic_adapter *adapter,
1605 struct qlcnic_dump_entry *entry, u32 *buffer)
1606{
1607 u32 addr, data, test, ret = 0;
1608 int i, reg_read;
1609 struct __mem *mem = &entry->region.mem;
1610 void __iomem *base = adapter->ahw->pci_base0;
1611
1612 reg_read = mem->size;
1613 addr = mem->addr;
1614 /* check for data size of multiple of 16 and 16 byte alignment */
1615 if ((addr & 0xf) || (reg_read%16)) {
1616 dev_info(&adapter->pdev->dev,
1617 "Unaligned memory addr:0x%x size:0x%x\n",
1618 addr, reg_read);
1619 return -EINVAL;
1620 }
1621
1622 mutex_lock(&adapter->ahw->mem_lock);
1623
1624 while (reg_read != 0) {
1625 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
1626 QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
1627 QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
1628 TA_CTL_ENABLE | TA_CTL_START);
1629
1630 for (i = 0; i < MAX_CTL_CHECK; i++) {
1631 QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
1632 if (!(test & TA_CTL_BUSY))
1633 break;
1634 }
1635 if (i == MAX_CTL_CHECK) {
1636 if (printk_ratelimit()) {
1637 dev_err(&adapter->pdev->dev,
1638 "failed to read through agent\n");
1639 ret = -EINVAL;
1640 goto out;
1641 }
1642 }
1643 for (i = 0; i < 4; i++) {
1644 QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
1645 *buffer++ = cpu_to_le32(data);
1646 }
1647 addr += 16;
1648 reg_read -= 16;
1649 ret += 16;
1650 }
1651out:
1652 mutex_unlock(&adapter->ahw->mem_lock);
1653 return mem->size;
1654}
1655
1656static u32
1657qlcnic_dump_nop(struct qlcnic_adapter *adapter,
1658 struct qlcnic_dump_entry *entry, u32 *buffer)
1659{
1660 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1661 return 0;
1662}
1663
1664struct qlcnic_dump_operations fw_dump_ops[] = {
1665 { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
1666 { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
1667 { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
1668 { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
1669 { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
1670 { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
1671 { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
1672 { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
1673 { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
1674 { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
1675 { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
1676 { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
1677 { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
1678 { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
1679 { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
1680 { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
1681 { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
1682 { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
1683 { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
1684 { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
1685};
1686
1687/* Walk the template and collect dump for each entry in the dump template */
1688static int
1689qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
1690 u32 size)
1691{
1692 int ret = 1;
1693 if (size != entry->hdr.cap_size) {
1694 dev_info(dev,
1695 "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
1696 entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
1697 dev_info(dev, "Aborting further dump capture\n");
1698 ret = 0;
1699 }
1700 return ret;
1701}
1702
1703int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1704{
1705 u32 *buffer;
1706 char mesg[64];
1707 char *msg[] = {mesg, NULL};
1708 int i, k, ops_cnt, ops_index, dump_size = 0;
1709 u32 entry_offset, dump, no_entries, buf_offset = 0;
1710 struct qlcnic_dump_entry *entry;
1711 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1712 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1713
1714 if (fw_dump->clr) {
1715 dev_info(&adapter->pdev->dev,
1716 "Previous dump not cleared, not capturing dump\n");
1717 return -EIO;
1718 }
1719 /* Calculate the size for dump data area only */
1720 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1721 if (i & tmpl_hdr->drv_cap_mask)
1722 dump_size += tmpl_hdr->cap_sizes[k];
1723 if (!dump_size)
1724 return -EIO;
1725
1726 fw_dump->data = vzalloc(dump_size);
1727 if (!fw_dump->data) {
1728 dev_info(&adapter->pdev->dev,
1729 "Unable to allocate (%d KB) for fw dump\n",
1730 dump_size/1024);
1731 return -ENOMEM;
1732 }
1733 buffer = fw_dump->data;
1734 fw_dump->size = dump_size;
1735 no_entries = tmpl_hdr->num_entries;
1736 ops_cnt = ARRAY_SIZE(fw_dump_ops);
1737 entry_offset = tmpl_hdr->offset;
1738 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1739 tmpl_hdr->sys_info[1] = adapter->fw_version;
1740
1741 for (i = 0; i < no_entries; i++) {
1742 entry = (void *)tmpl_hdr + entry_offset;
1743 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1744 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1745 entry_offset += entry->hdr.offset;
1746 continue;
1747 }
1748 /* Find the handler for this entry */
1749 ops_index = 0;
1750 while (ops_index < ops_cnt) {
1751 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1752 break;
1753 ops_index++;
1754 }
1755 if (ops_index == ops_cnt) {
1756 dev_info(&adapter->pdev->dev,
1757 "Invalid entry type %d, exiting dump\n",
1758 entry->hdr.type);
1759 goto error;
1760 }
1761 /* Collect dump for this entry */
1762 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1763 if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
1764 dump))
1765 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1766 buf_offset += entry->hdr.cap_size;
1767 entry_offset += entry->hdr.offset;
1768 buffer = fw_dump->data + buf_offset;
1769 }
1770 if (dump_size != buf_offset) {
1771 dev_info(&adapter->pdev->dev,
1772 "Captured(%d) and expected size(%d) do not match\n",
1773 buf_offset, dump_size);
1774 goto error;
1775 } else {
1776 fw_dump->clr = 1;
1777 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
1778 adapter->netdev->name);
1779 dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
1780 fw_dump->size);
1781 /* Send a udev event to notify availability of FW dump */
1782 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1783 return 0;
1784 }
1785error:
1786 vfree(fw_dump->data);
1787 return -EINVAL;
1788}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 0bcda9c51e9b..de79cde233de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -25,10 +25,6 @@ static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
25 25
26#define QLCNIC_ADDR_ERROR (0xffffffff) 26#define QLCNIC_ADDR_ERROR (0xffffffff)
27 27
28static void
29qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
30 struct qlcnic_host_rds_ring *rds_ring);
31
32static int 28static int
33qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter); 29qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
34 30
@@ -250,7 +246,8 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
250 rds_ring->dma_size = 246 rds_ring->dma_size =
251 QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN; 247 QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
252 248
253 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 249 if (adapter->ahw->capabilities &
250 QLCNIC_FW_CAPABILITY_HW_LRO)
254 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA; 251 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
255 252
256 rds_ring->skb_size = 253 rds_ring->skb_size =
@@ -659,7 +656,7 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
659 "Not an Ethernet NIC func=%u\n", val); 656 "Not an Ethernet NIC func=%u\n", val);
660 return -EIO; 657 return -EIO;
661 } 658 }
662 adapter->physical_port = (val >> 2); 659 adapter->ahw->physical_port = (val >> 2);
663 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo)) 660 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
664 timeo = QLCNIC_INIT_TIMEOUT_SECS; 661 timeo = QLCNIC_INIT_TIMEOUT_SECS;
665 662
@@ -778,15 +775,15 @@ qlcnic_has_mn(struct qlcnic_adapter *adapter)
778static 775static
779struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section) 776struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
780{ 777{
781 u32 i; 778 u32 i, entries;
782 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; 779 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
783 __le32 entries = cpu_to_le32(directory->num_entries); 780 entries = le32_to_cpu(directory->num_entries);
784 781
785 for (i = 0; i < entries; i++) { 782 for (i = 0; i < entries; i++) {
786 783
787 __le32 offs = cpu_to_le32(directory->findex) + 784 u32 offs = le32_to_cpu(directory->findex) +
788 (i * cpu_to_le32(directory->entry_size)); 785 i * le32_to_cpu(directory->entry_size);
789 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8)); 786 u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8));
790 787
791 if (tab_type == section) 788 if (tab_type == section)
792 return (struct uni_table_desc *) &unirom[offs]; 789 return (struct uni_table_desc *) &unirom[offs];
@@ -802,17 +799,16 @@ qlcnic_validate_header(struct qlcnic_adapter *adapter)
802{ 799{
803 const u8 *unirom = adapter->fw->data; 800 const u8 *unirom = adapter->fw->data;
804 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0]; 801 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
805 __le32 fw_file_size = adapter->fw->size; 802 u32 entries, entry_size, tab_size, fw_file_size;
806 __le32 entries; 803
807 __le32 entry_size; 804 fw_file_size = adapter->fw->size;
808 __le32 tab_size;
809 805
810 if (fw_file_size < FILEHEADER_SIZE) 806 if (fw_file_size < FILEHEADER_SIZE)
811 return -EINVAL; 807 return -EINVAL;
812 808
813 entries = cpu_to_le32(directory->num_entries); 809 entries = le32_to_cpu(directory->num_entries);
814 entry_size = cpu_to_le32(directory->entry_size); 810 entry_size = le32_to_cpu(directory->entry_size);
815 tab_size = cpu_to_le32(directory->findex) + (entries * entry_size); 811 tab_size = le32_to_cpu(directory->findex) + (entries * entry_size);
816 812
817 if (fw_file_size < tab_size) 813 if (fw_file_size < tab_size)
818 return -EINVAL; 814 return -EINVAL;
@@ -825,29 +821,29 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
825{ 821{
826 struct uni_table_desc *tab_desc; 822 struct uni_table_desc *tab_desc;
827 struct uni_data_desc *descr; 823 struct uni_data_desc *descr;
824 u32 offs, tab_size, data_size, idx;
828 const u8 *unirom = adapter->fw->data; 825 const u8 *unirom = adapter->fw->data;
829 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + 826 __le32 temp;
830 QLCNIC_UNI_BOOTLD_IDX_OFF));
831 __le32 offs;
832 __le32 tab_size;
833 __le32 data_size;
834 827
828 temp = *((__le32 *)&unirom[adapter->file_prd_off] +
829 QLCNIC_UNI_BOOTLD_IDX_OFF);
830 idx = le32_to_cpu(temp);
835 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD); 831 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
836 832
837 if (!tab_desc) 833 if (!tab_desc)
838 return -EINVAL; 834 return -EINVAL;
839 835
840 tab_size = cpu_to_le32(tab_desc->findex) + 836 tab_size = le32_to_cpu(tab_desc->findex) +
841 (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); 837 le32_to_cpu(tab_desc->entry_size) * (idx + 1);
842 838
843 if (adapter->fw->size < tab_size) 839 if (adapter->fw->size < tab_size)
844 return -EINVAL; 840 return -EINVAL;
845 841
846 offs = cpu_to_le32(tab_desc->findex) + 842 offs = le32_to_cpu(tab_desc->findex) +
847 (cpu_to_le32(tab_desc->entry_size) * (idx)); 843 le32_to_cpu(tab_desc->entry_size) * idx;
848 descr = (struct uni_data_desc *)&unirom[offs]; 844 descr = (struct uni_data_desc *)&unirom[offs];
849 845
850 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); 846 data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
851 847
852 if (adapter->fw->size < data_size) 848 if (adapter->fw->size < data_size)
853 return -EINVAL; 849 return -EINVAL;
@@ -861,27 +857,27 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
861 struct uni_table_desc *tab_desc; 857 struct uni_table_desc *tab_desc;
862 struct uni_data_desc *descr; 858 struct uni_data_desc *descr;
863 const u8 *unirom = adapter->fw->data; 859 const u8 *unirom = adapter->fw->data;
864 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] + 860 u32 offs, tab_size, data_size, idx;
865 QLCNIC_UNI_FIRMWARE_IDX_OFF)); 861 __le32 temp;
866 __le32 offs;
867 __le32 tab_size;
868 __le32 data_size;
869 862
863 temp = *((__le32 *)&unirom[adapter->file_prd_off] +
864 QLCNIC_UNI_FIRMWARE_IDX_OFF);
865 idx = le32_to_cpu(temp);
870 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW); 866 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
871 867
872 if (!tab_desc) 868 if (!tab_desc)
873 return -EINVAL; 869 return -EINVAL;
874 870
875 tab_size = cpu_to_le32(tab_desc->findex) + 871 tab_size = le32_to_cpu(tab_desc->findex) +
876 (cpu_to_le32(tab_desc->entry_size) * (idx + 1)); 872 le32_to_cpu(tab_desc->entry_size) * (idx + 1);
877 873
878 if (adapter->fw->size < tab_size) 874 if (adapter->fw->size < tab_size)
879 return -EINVAL; 875 return -EINVAL;
880 876
881 offs = cpu_to_le32(tab_desc->findex) + 877 offs = le32_to_cpu(tab_desc->findex) +
882 (cpu_to_le32(tab_desc->entry_size) * (idx)); 878 le32_to_cpu(tab_desc->entry_size) * idx;
883 descr = (struct uni_data_desc *)&unirom[offs]; 879 descr = (struct uni_data_desc *)&unirom[offs];
884 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size); 880 data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
885 881
886 if (adapter->fw->size < data_size) 882 if (adapter->fw->size < data_size)
887 return -EINVAL; 883 return -EINVAL;
@@ -895,19 +891,17 @@ qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
895 struct uni_table_desc *ptab_descr; 891 struct uni_table_desc *ptab_descr;
896 const u8 *unirom = adapter->fw->data; 892 const u8 *unirom = adapter->fw->data;
897 int mn_present = qlcnic_has_mn(adapter); 893 int mn_present = qlcnic_has_mn(adapter);
898 __le32 entries; 894 u32 entries, entry_size, tab_size, i;
899 __le32 entry_size; 895 __le32 temp;
900 __le32 tab_size;
901 u32 i;
902 896
903 ptab_descr = qlcnic_get_table_desc(unirom, 897 ptab_descr = qlcnic_get_table_desc(unirom,
904 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL); 898 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
905 if (!ptab_descr) 899 if (!ptab_descr)
906 return -EINVAL; 900 return -EINVAL;
907 901
908 entries = cpu_to_le32(ptab_descr->num_entries); 902 entries = le32_to_cpu(ptab_descr->num_entries);
909 entry_size = cpu_to_le32(ptab_descr->entry_size); 903 entry_size = le32_to_cpu(ptab_descr->entry_size);
910 tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size); 904 tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size);
911 905
912 if (adapter->fw->size < tab_size) 906 if (adapter->fw->size < tab_size)
913 return -EINVAL; 907 return -EINVAL;
@@ -915,16 +909,16 @@ qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
915nomn: 909nomn:
916 for (i = 0; i < entries; i++) { 910 for (i = 0; i < entries; i++) {
917 911
918 __le32 flags, file_chiprev, offs; 912 u32 flags, file_chiprev, offs;
919 u8 chiprev = adapter->ahw->revision_id; 913 u8 chiprev = adapter->ahw->revision_id;
920 u32 flagbit; 914 u32 flagbit;
921 915
922 offs = cpu_to_le32(ptab_descr->findex) + 916 offs = le32_to_cpu(ptab_descr->findex) +
923 (i * cpu_to_le32(ptab_descr->entry_size)); 917 i * le32_to_cpu(ptab_descr->entry_size);
924 flags = cpu_to_le32(*((int *)&unirom[offs] + 918 temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF);
925 QLCNIC_UNI_FLAGS_OFF)); 919 flags = le32_to_cpu(temp);
926 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] + 920 temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF);
927 QLCNIC_UNI_CHIP_REV_OFF)); 921 file_chiprev = le32_to_cpu(temp);
928 922
929 flagbit = mn_present ? 1 : 2; 923 flagbit = mn_present ? 1 : 2;
930 924
@@ -976,18 +970,20 @@ struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
976 u32 section, u32 idx_offset) 970 u32 section, u32 idx_offset)
977{ 971{
978 const u8 *unirom = adapter->fw->data; 972 const u8 *unirom = adapter->fw->data;
979 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
980 idx_offset));
981 struct uni_table_desc *tab_desc; 973 struct uni_table_desc *tab_desc;
982 __le32 offs; 974 u32 offs, idx;
975 __le32 temp;
976
977 temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset);
978 idx = le32_to_cpu(temp);
983 979
984 tab_desc = qlcnic_get_table_desc(unirom, section); 980 tab_desc = qlcnic_get_table_desc(unirom, section);
985 981
986 if (tab_desc == NULL) 982 if (tab_desc == NULL)
987 return NULL; 983 return NULL;
988 984
989 offs = cpu_to_le32(tab_desc->findex) + 985 offs = le32_to_cpu(tab_desc->findex) +
990 (cpu_to_le32(tab_desc->entry_size) * idx); 986 le32_to_cpu(tab_desc->entry_size) * idx;
991 987
992 return (struct uni_data_desc *)&unirom[offs]; 988 return (struct uni_data_desc *)&unirom[offs];
993} 989}
@@ -996,11 +992,13 @@ static u8 *
996qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter) 992qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
997{ 993{
998 u32 offs = QLCNIC_BOOTLD_START; 994 u32 offs = QLCNIC_BOOTLD_START;
995 struct uni_data_desc *data_desc;
999 996
1000 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) 997 data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD,
1001 offs = cpu_to_le32((qlcnic_get_data_desc(adapter, 998 QLCNIC_UNI_BOOTLD_IDX_OFF);
1002 QLCNIC_UNI_DIR_SECT_BOOTLD, 999
1003 QLCNIC_UNI_BOOTLD_IDX_OFF))->findex); 1000 if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
1001 offs = le32_to_cpu(data_desc->findex);
1004 1002
1005 return (u8 *)&adapter->fw->data[offs]; 1003 return (u8 *)&adapter->fw->data[offs];
1006} 1004}
@@ -1009,43 +1007,48 @@ static u8 *
1009qlcnic_get_fw_offs(struct qlcnic_adapter *adapter) 1007qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
1010{ 1008{
1011 u32 offs = QLCNIC_IMAGE_START; 1009 u32 offs = QLCNIC_IMAGE_START;
1010 struct uni_data_desc *data_desc;
1012 1011
1013 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) 1012 data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
1014 offs = cpu_to_le32((qlcnic_get_data_desc(adapter, 1013 QLCNIC_UNI_FIRMWARE_IDX_OFF);
1015 QLCNIC_UNI_DIR_SECT_FW, 1014 if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
1016 QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex); 1015 offs = le32_to_cpu(data_desc->findex);
1017 1016
1018 return (u8 *)&adapter->fw->data[offs]; 1017 return (u8 *)&adapter->fw->data[offs];
1019} 1018}
1020 1019
1021static __le32 1020static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
1022qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
1023{ 1021{
1024 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE) 1022 struct uni_data_desc *data_desc;
1025 return cpu_to_le32((qlcnic_get_data_desc(adapter, 1023 const u8 *unirom = adapter->fw->data;
1026 QLCNIC_UNI_DIR_SECT_FW, 1024
1027 QLCNIC_UNI_FIRMWARE_IDX_OFF))->size); 1025 data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
1026 QLCNIC_UNI_FIRMWARE_IDX_OFF);
1027
1028 if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
1029 return le32_to_cpu(data_desc->size);
1028 else 1030 else
1029 return cpu_to_le32( 1031 return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]);
1030 *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
1031} 1032}
1032 1033
1033static __le32 1034static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
1034qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
1035{ 1035{
1036 struct uni_data_desc *fw_data_desc; 1036 struct uni_data_desc *fw_data_desc;
1037 const struct firmware *fw = adapter->fw; 1037 const struct firmware *fw = adapter->fw;
1038 __le32 major, minor, sub; 1038 u32 major, minor, sub;
1039 __le32 version_offset;
1039 const u8 *ver_str; 1040 const u8 *ver_str;
1040 int i, ret; 1041 int i, ret;
1041 1042
1042 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) 1043 if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
1043 return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]); 1044 version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET];
1045 return le32_to_cpu(version_offset);
1046 }
1044 1047
1045 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW, 1048 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
1046 QLCNIC_UNI_FIRMWARE_IDX_OFF); 1049 QLCNIC_UNI_FIRMWARE_IDX_OFF);
1047 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) + 1050 ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) +
1048 cpu_to_le32(fw_data_desc->size) - 17; 1051 le32_to_cpu(fw_data_desc->size) - 17;
1049 1052
1050 for (i = 0; i < 12; i++) { 1053 for (i = 0; i < 12; i++) {
1051 if (!strncmp(&ver_str[i], "REV=", 4)) { 1054 if (!strncmp(&ver_str[i], "REV=", 4)) {
@@ -1061,18 +1064,20 @@ qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
1061 return 0; 1064 return 0;
1062} 1065}
1063 1066
1064static __le32 1067static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
1065qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
1066{ 1068{
1067 const struct firmware *fw = adapter->fw; 1069 const struct firmware *fw = adapter->fw;
1068 __le32 bios_ver, prd_off = adapter->file_prd_off; 1070 u32 bios_ver, prd_off = adapter->file_prd_off;
1071 u8 *version_offset;
1072 __le32 temp;
1069 1073
1070 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE) 1074 if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
1071 return cpu_to_le32( 1075 version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET];
1072 *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]); 1076 return le32_to_cpu(*(__le32 *)version_offset);
1077 }
1073 1078
1074 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off]) 1079 temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF);
1075 + QLCNIC_UNI_BIOS_VERSION_OFF)); 1080 bios_ver = le32_to_cpu(temp);
1076 1081
1077 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24); 1082 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
1078} 1083}
@@ -1131,24 +1136,24 @@ static const char *fw_name[] = {
1131int 1136int
1132qlcnic_load_firmware(struct qlcnic_adapter *adapter) 1137qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1133{ 1138{
1134 u64 *ptr64; 1139 __le64 *ptr64;
1135 u32 i, flashaddr, size; 1140 u32 i, flashaddr, size;
1136 const struct firmware *fw = adapter->fw; 1141 const struct firmware *fw = adapter->fw;
1137 struct pci_dev *pdev = adapter->pdev; 1142 struct pci_dev *pdev = adapter->pdev;
1138 1143
1139 dev_info(&pdev->dev, "loading firmware from %s\n", 1144 dev_info(&pdev->dev, "loading firmware from %s\n",
1140 fw_name[adapter->fw_type]); 1145 fw_name[adapter->ahw->fw_type]);
1141 1146
1142 if (fw) { 1147 if (fw) {
1143 __le64 data; 1148 u64 data;
1144 1149
1145 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8; 1150 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1146 1151
1147 ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter); 1152 ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter);
1148 flashaddr = QLCNIC_BOOTLD_START; 1153 flashaddr = QLCNIC_BOOTLD_START;
1149 1154
1150 for (i = 0; i < size; i++) { 1155 for (i = 0; i < size; i++) {
1151 data = cpu_to_le64(ptr64[i]); 1156 data = le64_to_cpu(ptr64[i]);
1152 1157
1153 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data)) 1158 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
1154 return -EIO; 1159 return -EIO;
@@ -1156,13 +1161,13 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1156 flashaddr += 8; 1161 flashaddr += 8;
1157 } 1162 }
1158 1163
1159 size = (__force u32)qlcnic_get_fw_size(adapter) / 8; 1164 size = qlcnic_get_fw_size(adapter) / 8;
1160 1165
1161 ptr64 = (u64 *)qlcnic_get_fw_offs(adapter); 1166 ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter);
1162 flashaddr = QLCNIC_IMAGE_START; 1167 flashaddr = QLCNIC_IMAGE_START;
1163 1168
1164 for (i = 0; i < size; i++) { 1169 for (i = 0; i < size; i++) {
1165 data = cpu_to_le64(ptr64[i]); 1170 data = le64_to_cpu(ptr64[i]);
1166 1171
1167 if (qlcnic_pci_mem_write_2M(adapter, 1172 if (qlcnic_pci_mem_write_2M(adapter,
1168 flashaddr, data)) 1173 flashaddr, data))
@@ -1171,9 +1176,9 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1171 flashaddr += 8; 1176 flashaddr += 8;
1172 } 1177 }
1173 1178
1174 size = (__force u32)qlcnic_get_fw_size(adapter) % 8; 1179 size = qlcnic_get_fw_size(adapter) % 8;
1175 if (size) { 1180 if (size) {
1176 data = cpu_to_le64(ptr64[i]); 1181 data = le64_to_cpu(ptr64[i]);
1177 1182
1178 if (qlcnic_pci_mem_write_2M(adapter, 1183 if (qlcnic_pci_mem_write_2M(adapter,
1179 flashaddr, data)) 1184 flashaddr, data))
@@ -1225,11 +1230,11 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1225static int 1230static int
1226qlcnic_validate_firmware(struct qlcnic_adapter *adapter) 1231qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1227{ 1232{
1228 __le32 val; 1233 u32 val;
1229 u32 ver, bios, min_size; 1234 u32 ver, bios, min_size;
1230 struct pci_dev *pdev = adapter->pdev; 1235 struct pci_dev *pdev = adapter->pdev;
1231 const struct firmware *fw = adapter->fw; 1236 const struct firmware *fw = adapter->fw;
1232 u8 fw_type = adapter->fw_type; 1237 u8 fw_type = adapter->ahw->fw_type;
1233 1238
1234 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) { 1239 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
1235 if (qlcnic_validate_unified_romimage(adapter)) 1240 if (qlcnic_validate_unified_romimage(adapter))
@@ -1237,8 +1242,8 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1237 1242
1238 min_size = QLCNIC_UNI_FW_MIN_SIZE; 1243 min_size = QLCNIC_UNI_FW_MIN_SIZE;
1239 } else { 1244 } else {
1240 val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]); 1245 val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
1241 if ((__force u32)val != QLCNIC_BDINFO_MAGIC) 1246 if (val != QLCNIC_BDINFO_MAGIC)
1242 return -EINVAL; 1247 return -EINVAL;
1243 1248
1244 min_size = QLCNIC_FW_MIN_SIZE; 1249 min_size = QLCNIC_FW_MIN_SIZE;
@@ -1259,7 +1264,7 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1259 1264
1260 val = qlcnic_get_bios_version(adapter); 1265 val = qlcnic_get_bios_version(adapter);
1261 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios); 1266 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
1262 if ((__force u32)val != bios) { 1267 if (val != bios) {
1263 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", 1268 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
1264 fw_name[fw_type]); 1269 fw_name[fw_type]);
1265 return -EINVAL; 1270 return -EINVAL;
@@ -1274,7 +1279,7 @@ qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
1274{ 1279{
1275 u8 fw_type; 1280 u8 fw_type;
1276 1281
1277 switch (adapter->fw_type) { 1282 switch (adapter->ahw->fw_type) {
1278 case QLCNIC_UNKNOWN_ROMIMAGE: 1283 case QLCNIC_UNKNOWN_ROMIMAGE:
1279 fw_type = QLCNIC_UNIFIED_ROMIMAGE; 1284 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
1280 break; 1285 break;
@@ -1285,7 +1290,7 @@ qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
1285 break; 1290 break;
1286 } 1291 }
1287 1292
1288 adapter->fw_type = fw_type; 1293 adapter->ahw->fw_type = fw_type;
1289} 1294}
1290 1295
1291 1296
@@ -1295,16 +1300,17 @@ void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
1295 struct pci_dev *pdev = adapter->pdev; 1300 struct pci_dev *pdev = adapter->pdev;
1296 int rc; 1301 int rc;
1297 1302
1298 adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE; 1303 adapter->ahw->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
1299 1304
1300next: 1305next:
1301 qlcnic_get_next_fwtype(adapter); 1306 qlcnic_get_next_fwtype(adapter);
1302 1307
1303 if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) { 1308 if (adapter->ahw->fw_type == QLCNIC_FLASH_ROMIMAGE) {
1304 adapter->fw = NULL; 1309 adapter->fw = NULL;
1305 } else { 1310 } else {
1306 rc = request_firmware(&adapter->fw, 1311 rc = request_firmware(&adapter->fw,
1307 fw_name[adapter->fw_type], &pdev->dev); 1312 fw_name[adapter->ahw->fw_type],
1313 &pdev->dev);
1308 if (rc != 0) 1314 if (rc != 0)
1309 goto next; 1315 goto next;
1310 1316
@@ -1324,633 +1330,3 @@ qlcnic_release_firmware(struct qlcnic_adapter *adapter)
1324 release_firmware(adapter->fw); 1330 release_firmware(adapter->fw);
1325 adapter->fw = NULL; 1331 adapter->fw = NULL;
1326} 1332}
1327
1328static void
1329qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1330 struct qlcnic_fw_msg *msg)
1331{
1332 u32 cable_OUI;
1333 u16 cable_len;
1334 u16 link_speed;
1335 u8 link_status, module, duplex, autoneg;
1336 u8 lb_status = 0;
1337 struct net_device *netdev = adapter->netdev;
1338
1339 adapter->has_link_events = 1;
1340
1341 cable_OUI = msg->body[1] & 0xffffffff;
1342 cable_len = (msg->body[1] >> 32) & 0xffff;
1343 link_speed = (msg->body[1] >> 48) & 0xffff;
1344
1345 link_status = msg->body[2] & 0xff;
1346 duplex = (msg->body[2] >> 16) & 0xff;
1347 autoneg = (msg->body[2] >> 24) & 0xff;
1348 lb_status = (msg->body[2] >> 32) & 0x3;
1349
1350 module = (msg->body[2] >> 8) & 0xff;
1351 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1352 dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
1353 "length %d\n", cable_OUI, cable_len);
1354 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1355 dev_info(&netdev->dev, "unsupported cable length %d\n",
1356 cable_len);
1357
1358 if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
1359 lb_status == QLCNIC_ELB_MODE))
1360 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
1361
1362 qlcnic_advert_link_change(adapter, link_status);
1363
1364 if (duplex == LINKEVENT_FULL_DUPLEX)
1365 adapter->link_duplex = DUPLEX_FULL;
1366 else
1367 adapter->link_duplex = DUPLEX_HALF;
1368
1369 adapter->module_type = module;
1370 adapter->link_autoneg = autoneg;
1371
1372 if (link_status) {
1373 adapter->link_speed = link_speed;
1374 } else {
1375 adapter->link_speed = SPEED_UNKNOWN;
1376 adapter->link_duplex = DUPLEX_UNKNOWN;
1377 }
1378}
1379
1380static void
1381qlcnic_handle_fw_message(int desc_cnt, int index,
1382 struct qlcnic_host_sds_ring *sds_ring)
1383{
1384 struct qlcnic_fw_msg msg;
1385 struct status_desc *desc;
1386 struct qlcnic_adapter *adapter;
1387 struct device *dev;
1388 int i = 0, opcode, ret;
1389
1390 while (desc_cnt > 0 && i < 8) {
1391 desc = &sds_ring->desc_head[index];
1392 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1393 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1394
1395 index = get_next_index(index, sds_ring->num_desc);
1396 desc_cnt--;
1397 }
1398
1399 adapter = sds_ring->adapter;
1400 dev = &adapter->pdev->dev;
1401 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1402
1403 switch (opcode) {
1404 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1405 qlcnic_handle_linkevent(adapter, &msg);
1406 break;
1407 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
1408 ret = (u32)(msg.body[1]);
1409 switch (ret) {
1410 case 0:
1411 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
1412 break;
1413 case 1:
1414 dev_info(dev, "loopback already in progress\n");
1415 adapter->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
1416 break;
1417 case 2:
1418 dev_info(dev, "loopback cable is not connected\n");
1419 adapter->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
1420 break;
1421 default:
1422 dev_info(dev, "loopback configure request failed,"
1423 " ret %x\n", ret);
1424 adapter->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
1425 break;
1426 }
1427 break;
1428 default:
1429 break;
1430 }
1431}
1432
1433static int
1434qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1435 struct qlcnic_host_rds_ring *rds_ring,
1436 struct qlcnic_rx_buffer *buffer)
1437{
1438 struct sk_buff *skb;
1439 dma_addr_t dma;
1440 struct pci_dev *pdev = adapter->pdev;
1441
1442 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
1443 if (!skb) {
1444 adapter->stats.skb_alloc_failure++;
1445 return -ENOMEM;
1446 }
1447
1448 skb_reserve(skb, NET_IP_ALIGN);
1449
1450 dma = pci_map_single(pdev, skb->data,
1451 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1452
1453 if (pci_dma_mapping_error(pdev, dma)) {
1454 adapter->stats.rx_dma_map_error++;
1455 dev_kfree_skb_any(skb);
1456 return -ENOMEM;
1457 }
1458
1459 buffer->skb = skb;
1460 buffer->dma = dma;
1461
1462 return 0;
1463}
1464
1465static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1466 struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
1467{
1468 struct qlcnic_rx_buffer *buffer;
1469 struct sk_buff *skb;
1470
1471 buffer = &rds_ring->rx_buf_arr[index];
1472
1473 if (unlikely(buffer->skb == NULL)) {
1474 WARN_ON(1);
1475 return NULL;
1476 }
1477
1478 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1479 PCI_DMA_FROMDEVICE);
1480
1481 skb = buffer->skb;
1482
1483 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
1484 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
1485 adapter->stats.csummed++;
1486 skb->ip_summed = CHECKSUM_UNNECESSARY;
1487 } else {
1488 skb_checksum_none_assert(skb);
1489 }
1490
1491 buffer->skb = NULL;
1492
1493 return skb;
1494}
1495
1496static inline int
1497qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
1498 u16 *vlan_tag)
1499{
1500 struct ethhdr *eth_hdr;
1501
1502 if (!__vlan_get_tag(skb, vlan_tag)) {
1503 eth_hdr = (struct ethhdr *) skb->data;
1504 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1505 skb_pull(skb, VLAN_HLEN);
1506 }
1507 if (!adapter->pvid)
1508 return 0;
1509
1510 if (*vlan_tag == adapter->pvid) {
1511 /* Outer vlan tag. Packet should follow non-vlan path */
1512 *vlan_tag = 0xffff;
1513 return 0;
1514 }
1515 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1516 return 0;
1517
1518 return -EINVAL;
1519}
1520
1521static struct qlcnic_rx_buffer *
1522qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1523 struct qlcnic_host_sds_ring *sds_ring,
1524 int ring, u64 sts_data0)
1525{
1526 struct net_device *netdev = adapter->netdev;
1527 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1528 struct qlcnic_rx_buffer *buffer;
1529 struct sk_buff *skb;
1530 struct qlcnic_host_rds_ring *rds_ring;
1531 int index, length, cksum, pkt_offset;
1532 u16 vid = 0xffff;
1533
1534 if (unlikely(ring >= adapter->max_rds_rings))
1535 return NULL;
1536
1537 rds_ring = &recv_ctx->rds_rings[ring];
1538
1539 index = qlcnic_get_sts_refhandle(sts_data0);
1540 if (unlikely(index >= rds_ring->num_desc))
1541 return NULL;
1542
1543 buffer = &rds_ring->rx_buf_arr[index];
1544
1545 length = qlcnic_get_sts_totallength(sts_data0);
1546 cksum = qlcnic_get_sts_status(sts_data0);
1547 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1548
1549 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1550 if (!skb)
1551 return buffer;
1552
1553 if (length > rds_ring->skb_size)
1554 skb_put(skb, rds_ring->skb_size);
1555 else
1556 skb_put(skb, length);
1557
1558 if (pkt_offset)
1559 skb_pull(skb, pkt_offset);
1560
1561 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1562 adapter->stats.rxdropped++;
1563 dev_kfree_skb(skb);
1564 return buffer;
1565 }
1566
1567 skb->protocol = eth_type_trans(skb, netdev);
1568
1569 if (vid != 0xffff)
1570 __vlan_hwaccel_put_tag(skb, vid);
1571
1572 napi_gro_receive(&sds_ring->napi, skb);
1573
1574 adapter->stats.rx_pkts++;
1575 adapter->stats.rxbytes += length;
1576
1577 return buffer;
1578}
1579
1580#define QLC_TCP_HDR_SIZE 20
1581#define QLC_TCP_TS_OPTION_SIZE 12
1582#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1583
1584static struct qlcnic_rx_buffer *
1585qlcnic_process_lro(struct qlcnic_adapter *adapter,
1586 struct qlcnic_host_sds_ring *sds_ring,
1587 int ring, u64 sts_data0, u64 sts_data1)
1588{
1589 struct net_device *netdev = adapter->netdev;
1590 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1591 struct qlcnic_rx_buffer *buffer;
1592 struct sk_buff *skb;
1593 struct qlcnic_host_rds_ring *rds_ring;
1594 struct iphdr *iph;
1595 struct tcphdr *th;
1596 bool push, timestamp;
1597 int l2_hdr_offset, l4_hdr_offset;
1598 int index;
1599 u16 lro_length, length, data_offset;
1600 u32 seq_number;
1601 u16 vid = 0xffff;
1602
1603 if (unlikely(ring > adapter->max_rds_rings))
1604 return NULL;
1605
1606 rds_ring = &recv_ctx->rds_rings[ring];
1607
1608 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1609 if (unlikely(index > rds_ring->num_desc))
1610 return NULL;
1611
1612 buffer = &rds_ring->rx_buf_arr[index];
1613
1614 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1615 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1616 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1617 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1618 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1619 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1620
1621 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1622 if (!skb)
1623 return buffer;
1624
1625 if (timestamp)
1626 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1627 else
1628 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1629
1630 skb_put(skb, lro_length + data_offset);
1631
1632 skb_pull(skb, l2_hdr_offset);
1633
1634 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1635 adapter->stats.rxdropped++;
1636 dev_kfree_skb(skb);
1637 return buffer;
1638 }
1639
1640 skb->protocol = eth_type_trans(skb, netdev);
1641
1642 iph = (struct iphdr *)skb->data;
1643 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1644
1645 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1646 iph->tot_len = htons(length);
1647 iph->check = 0;
1648 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1649 th->psh = push;
1650 th->seq = htonl(seq_number);
1651
1652 length = skb->len;
1653
1654 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
1655 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
1656
1657 if (vid != 0xffff)
1658 __vlan_hwaccel_put_tag(skb, vid);
1659 netif_receive_skb(skb);
1660
1661 adapter->stats.lro_pkts++;
1662 adapter->stats.lrobytes += length;
1663
1664 return buffer;
1665}
1666
1667int
1668qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1669{
1670 struct qlcnic_adapter *adapter = sds_ring->adapter;
1671 struct list_head *cur;
1672 struct status_desc *desc;
1673 struct qlcnic_rx_buffer *rxbuf;
1674 u64 sts_data0, sts_data1;
1675
1676 int count = 0;
1677 int opcode, ring, desc_cnt;
1678 u32 consumer = sds_ring->consumer;
1679
1680 while (count < max) {
1681 desc = &sds_ring->desc_head[consumer];
1682 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1683
1684 if (!(sts_data0 & STATUS_OWNER_HOST))
1685 break;
1686
1687 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1688 opcode = qlcnic_get_sts_opcode(sts_data0);
1689
1690 switch (opcode) {
1691 case QLCNIC_RXPKT_DESC:
1692 case QLCNIC_OLD_RXPKT_DESC:
1693 case QLCNIC_SYN_OFFLOAD:
1694 ring = qlcnic_get_sts_type(sts_data0);
1695 rxbuf = qlcnic_process_rcv(adapter, sds_ring,
1696 ring, sts_data0);
1697 break;
1698 case QLCNIC_LRO_DESC:
1699 ring = qlcnic_get_lro_sts_type(sts_data0);
1700 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1701 rxbuf = qlcnic_process_lro(adapter, sds_ring,
1702 ring, sts_data0, sts_data1);
1703 break;
1704 case QLCNIC_RESPONSE_DESC:
1705 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1706 default:
1707 goto skip;
1708 }
1709
1710 WARN_ON(desc_cnt > 1);
1711
1712 if (likely(rxbuf))
1713 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1714 else
1715 adapter->stats.null_rxbuf++;
1716
1717skip:
1718 for (; desc_cnt > 0; desc_cnt--) {
1719 desc = &sds_ring->desc_head[consumer];
1720 desc->status_desc_data[0] =
1721 cpu_to_le64(STATUS_OWNER_PHANTOM);
1722 consumer = get_next_index(consumer, sds_ring->num_desc);
1723 }
1724 count++;
1725 }
1726
1727 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1728 struct qlcnic_host_rds_ring *rds_ring =
1729 &adapter->recv_ctx->rds_rings[ring];
1730
1731 if (!list_empty(&sds_ring->free_list[ring])) {
1732 list_for_each(cur, &sds_ring->free_list[ring]) {
1733 rxbuf = list_entry(cur,
1734 struct qlcnic_rx_buffer, list);
1735 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1736 }
1737 spin_lock(&rds_ring->lock);
1738 list_splice_tail_init(&sds_ring->free_list[ring],
1739 &rds_ring->free_list);
1740 spin_unlock(&rds_ring->lock);
1741 }
1742
1743 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1744 }
1745
1746 if (count) {
1747 sds_ring->consumer = consumer;
1748 writel(consumer, sds_ring->crb_sts_consumer);
1749 }
1750
1751 return count;
1752}
1753
1754void
1755qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1756 struct qlcnic_host_rds_ring *rds_ring)
1757{
1758 struct rcv_desc *pdesc;
1759 struct qlcnic_rx_buffer *buffer;
1760 int count = 0;
1761 u32 producer;
1762 struct list_head *head;
1763
1764 producer = rds_ring->producer;
1765
1766 head = &rds_ring->free_list;
1767 while (!list_empty(head)) {
1768
1769 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1770
1771 if (!buffer->skb) {
1772 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1773 break;
1774 }
1775
1776 count++;
1777 list_del(&buffer->list);
1778
1779 /* make a rcv descriptor */
1780 pdesc = &rds_ring->desc_head[producer];
1781 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1782 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1783 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1784
1785 producer = get_next_index(producer, rds_ring->num_desc);
1786 }
1787
1788 if (count) {
1789 rds_ring->producer = producer;
1790 writel((producer-1) & (rds_ring->num_desc-1),
1791 rds_ring->crb_rcv_producer);
1792 }
1793}
1794
1795static void
1796qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1797 struct qlcnic_host_rds_ring *rds_ring)
1798{
1799 struct rcv_desc *pdesc;
1800 struct qlcnic_rx_buffer *buffer;
1801 int count = 0;
1802 uint32_t producer;
1803 struct list_head *head;
1804
1805 if (!spin_trylock(&rds_ring->lock))
1806 return;
1807
1808 producer = rds_ring->producer;
1809
1810 head = &rds_ring->free_list;
1811 while (!list_empty(head)) {
1812
1813 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1814
1815 if (!buffer->skb) {
1816 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1817 break;
1818 }
1819
1820 count++;
1821 list_del(&buffer->list);
1822
1823 /* make a rcv descriptor */
1824 pdesc = &rds_ring->desc_head[producer];
1825 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1826 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1827 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1828
1829 producer = get_next_index(producer, rds_ring->num_desc);
1830 }
1831
1832 if (count) {
1833 rds_ring->producer = producer;
1834 writel((producer - 1) & (rds_ring->num_desc - 1),
1835 rds_ring->crb_rcv_producer);
1836 }
1837 spin_unlock(&rds_ring->lock);
1838}
1839
1840static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1841{
1842 int i;
1843 unsigned char *data = skb->data;
1844
1845 printk(KERN_INFO "\n");
1846 for (i = 0; i < skb->len; i++) {
1847 QLCDB(adapter, DRV, "%02x ", data[i]);
1848 if ((i & 0x0f) == 8)
1849 printk(KERN_INFO "\n");
1850 }
1851}
1852
1853void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1854 struct qlcnic_host_sds_ring *sds_ring,
1855 int ring, u64 sts_data0)
1856{
1857 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1858 struct sk_buff *skb;
1859 struct qlcnic_host_rds_ring *rds_ring;
1860 int index, length, cksum, pkt_offset;
1861
1862 if (unlikely(ring >= adapter->max_rds_rings))
1863 return;
1864
1865 rds_ring = &recv_ctx->rds_rings[ring];
1866
1867 index = qlcnic_get_sts_refhandle(sts_data0);
1868 length = qlcnic_get_sts_totallength(sts_data0);
1869 if (unlikely(index >= rds_ring->num_desc))
1870 return;
1871
1872 cksum = qlcnic_get_sts_status(sts_data0);
1873 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1874
1875 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1876 if (!skb)
1877 return;
1878
1879 if (length > rds_ring->skb_size)
1880 skb_put(skb, rds_ring->skb_size);
1881 else
1882 skb_put(skb, length);
1883
1884 if (pkt_offset)
1885 skb_pull(skb, pkt_offset);
1886
1887 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1888 adapter->diag_cnt++;
1889 else
1890 dump_skb(skb, adapter);
1891
1892 dev_kfree_skb_any(skb);
1893 adapter->stats.rx_pkts++;
1894 adapter->stats.rxbytes += length;
1895
1896 return;
1897}
1898
1899void
1900qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1901{
1902 struct qlcnic_adapter *adapter = sds_ring->adapter;
1903 struct status_desc *desc;
1904 u64 sts_data0;
1905 int ring, opcode, desc_cnt;
1906
1907 u32 consumer = sds_ring->consumer;
1908
1909 desc = &sds_ring->desc_head[consumer];
1910 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1911
1912 if (!(sts_data0 & STATUS_OWNER_HOST))
1913 return;
1914
1915 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1916 opcode = qlcnic_get_sts_opcode(sts_data0);
1917 switch (opcode) {
1918 case QLCNIC_RESPONSE_DESC:
1919 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1920 break;
1921 default:
1922 ring = qlcnic_get_sts_type(sts_data0);
1923 qlcnic_process_rcv_diag(adapter, sds_ring, ring, sts_data0);
1924 break;
1925 }
1926
1927 for (; desc_cnt > 0; desc_cnt--) {
1928 desc = &sds_ring->desc_head[consumer];
1929 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1930 consumer = get_next_index(consumer, sds_ring->num_desc);
1931 }
1932
1933 sds_ring->consumer = consumer;
1934 writel(consumer, sds_ring->crb_sts_consumer);
1935}
1936
1937void
1938qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1939 u8 alt_mac, u8 *mac)
1940{
1941 u32 mac_low, mac_high;
1942 int i;
1943
1944 mac_low = off1;
1945 mac_high = off2;
1946
1947 if (alt_mac) {
1948 mac_low |= (mac_low >> 16) | (mac_high << 16);
1949 mac_high >>= 16;
1950 }
1951
1952 for (i = 0; i < 2; i++)
1953 mac[i] = (u8)(mac_high >> ((1 - i) * 8));
1954 for (i = 2; i < 6; i++)
1955 mac[i] = (u8)(mac_low >> ((5 - i) * 8));
1956}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
new file mode 100644
index 000000000000..6f82812d0fab
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -0,0 +1,1309 @@
1#include <linux/netdevice.h>
2#include <linux/if_vlan.h>
3#include <net/ip.h>
4#include <linux/ipv6.h>
5
6#include "qlcnic.h"
7
8#define QLCNIC_MAC_HASH(MAC)\
9 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
10
11#define TX_ETHER_PKT 0x01
12#define TX_TCP_PKT 0x02
13#define TX_UDP_PKT 0x03
14#define TX_IP_PKT 0x04
15#define TX_TCP_LSO 0x05
16#define TX_TCP_LSO6 0x06
17#define TX_TCPV6_PKT 0x0b
18#define TX_UDPV6_PKT 0x0c
19#define FLAGS_VLAN_TAGGED 0x10
20#define FLAGS_VLAN_OOB 0x40
21
22#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
23 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
24#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
25 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
26#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
27 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
28
29#define qlcnic_set_tx_port(_desc, _port) \
30 ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
31
32#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
33 ((_desc)->flags_opcode |= \
34 cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
35
36#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
37 ((_desc)->nfrags__length = \
38 cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
39
40/* owner bits of status_desc */
41#define STATUS_OWNER_HOST (0x1ULL << 56)
42#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
43
44/* Status descriptor:
45 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
46 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
47 53-55 desc_cnt, 56-57 owner, 58-63 opcode
48 */
49#define qlcnic_get_sts_port(sts_data) \
50 ((sts_data) & 0x0F)
51#define qlcnic_get_sts_status(sts_data) \
52 (((sts_data) >> 4) & 0x0F)
53#define qlcnic_get_sts_type(sts_data) \
54 (((sts_data) >> 8) & 0x0F)
55#define qlcnic_get_sts_totallength(sts_data) \
56 (((sts_data) >> 12) & 0xFFFF)
57#define qlcnic_get_sts_refhandle(sts_data) \
58 (((sts_data) >> 28) & 0xFFFF)
59#define qlcnic_get_sts_prot(sts_data) \
60 (((sts_data) >> 44) & 0x0F)
61#define qlcnic_get_sts_pkt_offset(sts_data) \
62 (((sts_data) >> 48) & 0x1F)
63#define qlcnic_get_sts_desc_cnt(sts_data) \
64 (((sts_data) >> 53) & 0x7)
65#define qlcnic_get_sts_opcode(sts_data) \
66 (((sts_data) >> 58) & 0x03F)
67
68#define qlcnic_get_lro_sts_refhandle(sts_data) \
69 ((sts_data) & 0x07FFF)
70#define qlcnic_get_lro_sts_length(sts_data) \
71 (((sts_data) >> 16) & 0x0FFFF)
72#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
73 (((sts_data) >> 32) & 0x0FF)
74#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
75 (((sts_data) >> 40) & 0x0FF)
76#define qlcnic_get_lro_sts_timestamp(sts_data) \
77 (((sts_data) >> 48) & 0x1)
78#define qlcnic_get_lro_sts_type(sts_data) \
79 (((sts_data) >> 49) & 0x7)
80#define qlcnic_get_lro_sts_push_flag(sts_data) \
81 (((sts_data) >> 52) & 0x1)
82#define qlcnic_get_lro_sts_seq_number(sts_data) \
83 ((sts_data) & 0x0FFFFFFFF)
84#define qlcnic_get_lro_sts_mss(sts_data1) \
85 ((sts_data1 >> 32) & 0x0FFFF)
86
87/* opcode field in status_desc */
88#define QLCNIC_SYN_OFFLOAD 0x03
89#define QLCNIC_RXPKT_DESC 0x04
90#define QLCNIC_OLD_RXPKT_DESC 0x3f
91#define QLCNIC_RESPONSE_DESC 0x05
92#define QLCNIC_LRO_DESC 0x12
93
94/* for status field in status_desc */
95#define STATUS_CKSUM_LOOP 0
96#define STATUS_CKSUM_OK 2
97
98static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
99 u64 uaddr, __le16 vlan_id,
100 struct qlcnic_host_tx_ring *tx_ring)
101{
102 struct cmd_desc_type0 *hwdesc;
103 struct qlcnic_nic_req *req;
104 struct qlcnic_mac_req *mac_req;
105 struct qlcnic_vlan_req *vlan_req;
106 u32 producer;
107 u64 word;
108
109 producer = tx_ring->producer;
110 hwdesc = &tx_ring->desc_head[tx_ring->producer];
111
112 req = (struct qlcnic_nic_req *)hwdesc;
113 memset(req, 0, sizeof(struct qlcnic_nic_req));
114 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
115
116 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
117 req->req_hdr = cpu_to_le64(word);
118
119 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
120 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
121 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
122
123 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
124 vlan_req->vlan_id = vlan_id;
125
126 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
127 smp_mb();
128}
129
130static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring,
132 struct cmd_desc_type0 *first_desc,
133 struct sk_buff *skb)
134{
135 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
136 struct qlcnic_filter *fil, *tmp_fil;
137 struct hlist_node *tmp_hnode, *n;
138 struct hlist_head *head;
139 u64 src_addr = 0;
140 __le16 vlan_id = 0;
141 u8 hindex;
142
143 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
144 return;
145
146 if (adapter->fhash.fnum >= adapter->fhash.fmax)
147 return;
148
149 /* Only NPAR capable devices support vlan based learning*/
150 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
151 vlan_id = first_desc->vlan_TCI;
152 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
153 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
154 head = &(adapter->fhash.fhead[hindex]);
155
156 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
157 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
158 tmp_fil->vlan_id == vlan_id) {
159
160 if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
161 qlcnic_change_filter(adapter, src_addr, vlan_id,
162 tx_ring);
163 tmp_fil->ftime = jiffies;
164 return;
165 }
166 }
167
168 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
169 if (!fil)
170 return;
171
172 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
173
174 fil->ftime = jiffies;
175 fil->vlan_id = vlan_id;
176 memcpy(fil->faddr, &src_addr, ETH_ALEN);
177
178 spin_lock(&adapter->mac_learn_lock);
179
180 hlist_add_head(&(fil->fnode), head);
181 adapter->fhash.fnum++;
182
183 spin_unlock(&adapter->mac_learn_lock);
184}
185
186static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
187 struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
188{
189 u8 l4proto, opcode = 0, hdr_len = 0;
190 u16 flags = 0, vlan_tci = 0;
191 int copied, offset, copy_len, size;
192 struct cmd_desc_type0 *hwdesc;
193 struct vlan_ethhdr *vh;
194 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
195 u16 protocol = ntohs(skb->protocol);
196 u32 producer = tx_ring->producer;
197
198 if (protocol == ETH_P_8021Q) {
199 vh = (struct vlan_ethhdr *)skb->data;
200 flags = FLAGS_VLAN_TAGGED;
201 vlan_tci = ntohs(vh->h_vlan_TCI);
202 protocol = ntohs(vh->h_vlan_encapsulated_proto);
203 } else if (vlan_tx_tag_present(skb)) {
204 flags = FLAGS_VLAN_OOB;
205 vlan_tci = vlan_tx_tag_get(skb);
206 }
207 if (unlikely(adapter->pvid)) {
208 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
209 return -EIO;
210 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
211 goto set_flags;
212
213 flags = FLAGS_VLAN_OOB;
214 vlan_tci = adapter->pvid;
215 }
216set_flags:
217 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
218 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
219
220 if (*(skb->data) & BIT_0) {
221 flags |= BIT_0;
222 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
223 }
224 opcode = TX_ETHER_PKT;
225 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
226 skb_shinfo(skb)->gso_size > 0) {
227 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
228 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
229 first_desc->total_hdr_length = hdr_len;
230 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
231
232 /* For LSO, we need to copy the MAC/IP/TCP headers into
233 * the descriptor ring */
234 copied = 0;
235 offset = 2;
236
237 if (flags & FLAGS_VLAN_OOB) {
238 first_desc->total_hdr_length += VLAN_HLEN;
239 first_desc->tcp_hdr_offset = VLAN_HLEN;
240 first_desc->ip_hdr_offset = VLAN_HLEN;
241
242 /* Only in case of TSO on vlan device */
243 flags |= FLAGS_VLAN_TAGGED;
244
245 /* Create a TSO vlan header template for firmware */
246 hwdesc = &tx_ring->desc_head[producer];
247 tx_ring->cmd_buf_arr[producer].skb = NULL;
248
249 copy_len = min((int)sizeof(struct cmd_desc_type0) -
250 offset, hdr_len + VLAN_HLEN);
251
252 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
253 skb_copy_from_linear_data(skb, vh, 12);
254 vh->h_vlan_proto = htons(ETH_P_8021Q);
255 vh->h_vlan_TCI = htons(vlan_tci);
256
257 skb_copy_from_linear_data_offset(skb, 12,
258 (char *)vh + 16,
259 copy_len - 16);
260 copied = copy_len - VLAN_HLEN;
261 offset = 0;
262 producer = get_next_index(producer, tx_ring->num_desc);
263 }
264
265 while (copied < hdr_len) {
266 size = (int)sizeof(struct cmd_desc_type0) - offset;
267 copy_len = min(size, (hdr_len - copied));
268 hwdesc = &tx_ring->desc_head[producer];
269 tx_ring->cmd_buf_arr[producer].skb = NULL;
270 skb_copy_from_linear_data_offset(skb, copied,
271 (char *)hwdesc +
272 offset, copy_len);
273 copied += copy_len;
274 offset = 0;
275 producer = get_next_index(producer, tx_ring->num_desc);
276 }
277
278 tx_ring->producer = producer;
279 smp_mb();
280 adapter->stats.lso_frames++;
281
282 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
283 if (protocol == ETH_P_IP) {
284 l4proto = ip_hdr(skb)->protocol;
285
286 if (l4proto == IPPROTO_TCP)
287 opcode = TX_TCP_PKT;
288 else if (l4proto == IPPROTO_UDP)
289 opcode = TX_UDP_PKT;
290 } else if (protocol == ETH_P_IPV6) {
291 l4proto = ipv6_hdr(skb)->nexthdr;
292
293 if (l4proto == IPPROTO_TCP)
294 opcode = TX_TCPV6_PKT;
295 else if (l4proto == IPPROTO_UDP)
296 opcode = TX_UDPV6_PKT;
297 }
298 }
299 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
300 first_desc->ip_hdr_offset += skb_network_offset(skb);
301 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
302
303 return 0;
304}
305
306static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
307 struct qlcnic_cmd_buffer *pbuf)
308{
309 struct qlcnic_skb_frag *nf;
310 struct skb_frag_struct *frag;
311 int i, nr_frags;
312 dma_addr_t map;
313
314 nr_frags = skb_shinfo(skb)->nr_frags;
315 nf = &pbuf->frag_array[0];
316
317 map = pci_map_single(pdev, skb->data, skb_headlen(skb),
318 PCI_DMA_TODEVICE);
319 if (pci_dma_mapping_error(pdev, map))
320 goto out_err;
321
322 nf->dma = map;
323 nf->length = skb_headlen(skb);
324
325 for (i = 0; i < nr_frags; i++) {
326 frag = &skb_shinfo(skb)->frags[i];
327 nf = &pbuf->frag_array[i+1];
328 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
329 DMA_TO_DEVICE);
330 if (dma_mapping_error(&pdev->dev, map))
331 goto unwind;
332
333 nf->dma = map;
334 nf->length = skb_frag_size(frag);
335 }
336
337 return 0;
338
339unwind:
340 while (--i >= 0) {
341 nf = &pbuf->frag_array[i+1];
342 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
343 }
344
345 nf = &pbuf->frag_array[0];
346 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
347
348out_err:
349 return -ENOMEM;
350}
351
352static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
353 struct qlcnic_cmd_buffer *pbuf)
354{
355 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
356 int i, nr_frags = skb_shinfo(skb)->nr_frags;
357
358 for (i = 0; i < nr_frags; i++) {
359 nf = &pbuf->frag_array[i+1];
360 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
361 }
362
363 nf = &pbuf->frag_array[0];
364 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
365 pbuf->skb = NULL;
366}
367
368static inline void qlcnic_clear_cmddesc(u64 *desc)
369{
370 desc[0] = 0ULL;
371 desc[2] = 0ULL;
372 desc[7] = 0ULL;
373}
374
375netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
376{
377 struct qlcnic_adapter *adapter = netdev_priv(netdev);
378 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
379 struct qlcnic_cmd_buffer *pbuf;
380 struct qlcnic_skb_frag *buffrag;
381 struct cmd_desc_type0 *hwdesc, *first_desc;
382 struct pci_dev *pdev;
383 struct ethhdr *phdr;
384 int i, k, frag_count, delta = 0;
385 u32 producer, num_txd;
386
387 num_txd = tx_ring->num_desc;
388
389 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
390 netif_stop_queue(netdev);
391 return NETDEV_TX_BUSY;
392 }
393
394 if (adapter->flags & QLCNIC_MACSPOOF) {
395 phdr = (struct ethhdr *)skb->data;
396 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
397 goto drop_packet;
398 }
399
400 frag_count = skb_shinfo(skb)->nr_frags + 1;
401 /* 14 frags supported for normal packet and
402 * 32 frags supported for TSO packet
403 */
404 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
405 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
406 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
407
408 if (!__pskb_pull_tail(skb, delta))
409 goto drop_packet;
410
411 frag_count = 1 + skb_shinfo(skb)->nr_frags;
412 }
413
414 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
415 netif_stop_queue(netdev);
416 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
417 netif_start_queue(netdev);
418 } else {
419 adapter->stats.xmit_off++;
420 return NETDEV_TX_BUSY;
421 }
422 }
423
424 producer = tx_ring->producer;
425 pbuf = &tx_ring->cmd_buf_arr[producer];
426 pdev = adapter->pdev;
427 first_desc = &tx_ring->desc_head[producer];
428 hwdesc = &tx_ring->desc_head[producer];
429 qlcnic_clear_cmddesc((u64 *)hwdesc);
430
431 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
432 adapter->stats.tx_dma_map_error++;
433 goto drop_packet;
434 }
435
436 pbuf->skb = skb;
437 pbuf->frag_count = frag_count;
438
439 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
440 qlcnic_set_tx_port(first_desc, adapter->portnum);
441
442 for (i = 0; i < frag_count; i++) {
443 k = i % 4;
444
445 if ((k == 0) && (i > 0)) {
446 /* move to next desc.*/
447 producer = get_next_index(producer, num_txd);
448 hwdesc = &tx_ring->desc_head[producer];
449 qlcnic_clear_cmddesc((u64 *)hwdesc);
450 tx_ring->cmd_buf_arr[producer].skb = NULL;
451 }
452
453 buffrag = &pbuf->frag_array[i];
454 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
455 switch (k) {
456 case 0:
457 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
458 break;
459 case 1:
460 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
461 break;
462 case 2:
463 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
464 break;
465 case 3:
466 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
467 break;
468 }
469 }
470
471 tx_ring->producer = get_next_index(producer, num_txd);
472 smp_mb();
473
474 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
475 goto unwind_buff;
476
477 if (adapter->mac_learn)
478 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
479
480 adapter->stats.txbytes += skb->len;
481 adapter->stats.xmitcalled++;
482
483 qlcnic_update_cmd_producer(tx_ring);
484
485 return NETDEV_TX_OK;
486
487unwind_buff:
488 qlcnic_unmap_buffers(pdev, skb, pbuf);
489drop_packet:
490 adapter->stats.txdropped++;
491 dev_kfree_skb_any(skb);
492 return NETDEV_TX_OK;
493}
494
495void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
496{
497 struct net_device *netdev = adapter->netdev;
498
499 if (adapter->ahw->linkup && !linkup) {
500 netdev_info(netdev, "NIC Link is down\n");
501 adapter->ahw->linkup = 0;
502 if (netif_running(netdev)) {
503 netif_carrier_off(netdev);
504 netif_stop_queue(netdev);
505 }
506 } else if (!adapter->ahw->linkup && linkup) {
507 netdev_info(netdev, "NIC Link is up\n");
508 adapter->ahw->linkup = 1;
509 if (netif_running(netdev)) {
510 netif_carrier_on(netdev);
511 netif_wake_queue(netdev);
512 }
513 }
514}
515
516static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
517 struct qlcnic_host_rds_ring *rds_ring,
518 struct qlcnic_rx_buffer *buffer)
519{
520 struct sk_buff *skb;
521 dma_addr_t dma;
522 struct pci_dev *pdev = adapter->pdev;
523
524 skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
525 if (!skb) {
526 adapter->stats.skb_alloc_failure++;
527 return -ENOMEM;
528 }
529
530 skb_reserve(skb, NET_IP_ALIGN);
531 dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
532 PCI_DMA_FROMDEVICE);
533
534 if (pci_dma_mapping_error(pdev, dma)) {
535 adapter->stats.rx_dma_map_error++;
536 dev_kfree_skb_any(skb);
537 return -ENOMEM;
538 }
539
540 buffer->skb = skb;
541 buffer->dma = dma;
542
543 return 0;
544}
545
546static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
547 struct qlcnic_host_rds_ring *rds_ring)
548{
549 struct rcv_desc *pdesc;
550 struct qlcnic_rx_buffer *buffer;
551 int count = 0;
552 uint32_t producer;
553 struct list_head *head;
554
555 if (!spin_trylock(&rds_ring->lock))
556 return;
557
558 producer = rds_ring->producer;
559 head = &rds_ring->free_list;
560
561 while (!list_empty(head)) {
562 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
563
564 if (!buffer->skb) {
565 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
566 break;
567 }
568
569 count++;
570 list_del(&buffer->list);
571
572 /* make a rcv descriptor */
573 pdesc = &rds_ring->desc_head[producer];
574 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
575 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
576 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
577 producer = get_next_index(producer, rds_ring->num_desc);
578 }
579
580 if (count) {
581 rds_ring->producer = producer;
582 writel((producer - 1) & (rds_ring->num_desc - 1),
583 rds_ring->crb_rcv_producer);
584 }
585
586 spin_unlock(&rds_ring->lock);
587}
588
589static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
590{
591 u32 sw_consumer, hw_consumer;
592 int i, done, count = 0;
593 struct qlcnic_cmd_buffer *buffer;
594 struct pci_dev *pdev = adapter->pdev;
595 struct net_device *netdev = adapter->netdev;
596 struct qlcnic_skb_frag *frag;
597 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
598
599 if (!spin_trylock(&adapter->tx_clean_lock))
600 return 1;
601
602 sw_consumer = tx_ring->sw_consumer;
603 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
604
605 while (sw_consumer != hw_consumer) {
606 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
607 if (buffer->skb) {
608 frag = &buffer->frag_array[0];
609 pci_unmap_single(pdev, frag->dma, frag->length,
610 PCI_DMA_TODEVICE);
611 frag->dma = 0ULL;
612 for (i = 1; i < buffer->frag_count; i++) {
613 frag++;
614 pci_unmap_page(pdev, frag->dma, frag->length,
615 PCI_DMA_TODEVICE);
616 frag->dma = 0ULL;
617 }
618
619 adapter->stats.xmitfinished++;
620 dev_kfree_skb_any(buffer->skb);
621 buffer->skb = NULL;
622 }
623
624 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
625 if (++count >= MAX_STATUS_HANDLE)
626 break;
627 }
628
629 if (count && netif_running(netdev)) {
630 tx_ring->sw_consumer = sw_consumer;
631
632 smp_mb();
633
634 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
635 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
636 netif_wake_queue(netdev);
637 adapter->stats.xmit_on++;
638 }
639 }
640 adapter->tx_timeo_cnt = 0;
641 }
642 /*
643 * If everything is freed up to consumer then check if the ring is full
644 * If the ring is full then check if more needs to be freed and
645 * schedule the call back again.
646 *
647 * This happens when there are 2 CPUs. One could be freeing and the
648 * other filling it. If the ring is full when we get out of here and
649 * the card has already interrupted the host then the host can miss the
650 * interrupt.
651 *
652 * There is still a possible race condition and the host could miss an
653 * interrupt. The card has to take care of this.
654 */
655 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
656 done = (sw_consumer == hw_consumer);
657
658 spin_unlock(&adapter->tx_clean_lock);
659
660 return done;
661}
662
663static int qlcnic_poll(struct napi_struct *napi, int budget)
664{
665 struct qlcnic_host_sds_ring *sds_ring;
666 struct qlcnic_adapter *adapter;
667 int tx_complete, work_done;
668
669 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
670 adapter = sds_ring->adapter;
671
672 tx_complete = qlcnic_process_cmd_ring(adapter);
673 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
674
675 if ((work_done < budget) && tx_complete) {
676 napi_complete(&sds_ring->napi);
677 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
678 qlcnic_enable_int(sds_ring);
679 }
680
681 return work_done;
682}
683
684static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
685{
686 struct qlcnic_host_sds_ring *sds_ring;
687 struct qlcnic_adapter *adapter;
688 int work_done;
689
690 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
691 adapter = sds_ring->adapter;
692
693 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
694
695 if (work_done < budget) {
696 napi_complete(&sds_ring->napi);
697 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
698 qlcnic_enable_int(sds_ring);
699 }
700
701 return work_done;
702}
703
704static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
705 struct qlcnic_fw_msg *msg)
706{
707 u32 cable_OUI;
708 u16 cable_len, link_speed;
709 u8 link_status, module, duplex, autoneg, lb_status = 0;
710 struct net_device *netdev = adapter->netdev;
711
712 adapter->ahw->has_link_events = 1;
713
714 cable_OUI = msg->body[1] & 0xffffffff;
715 cable_len = (msg->body[1] >> 32) & 0xffff;
716 link_speed = (msg->body[1] >> 48) & 0xffff;
717
718 link_status = msg->body[2] & 0xff;
719 duplex = (msg->body[2] >> 16) & 0xff;
720 autoneg = (msg->body[2] >> 24) & 0xff;
721 lb_status = (msg->body[2] >> 32) & 0x3;
722
723 module = (msg->body[2] >> 8) & 0xff;
724 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
725 dev_info(&netdev->dev,
726 "unsupported cable: OUI 0x%x, length %d\n",
727 cable_OUI, cable_len);
728 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
729 dev_info(&netdev->dev, "unsupported cable length %d\n",
730 cable_len);
731
732 if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
733 lb_status == QLCNIC_ELB_MODE))
734 adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
735
736 qlcnic_advert_link_change(adapter, link_status);
737
738 if (duplex == LINKEVENT_FULL_DUPLEX)
739 adapter->ahw->link_duplex = DUPLEX_FULL;
740 else
741 adapter->ahw->link_duplex = DUPLEX_HALF;
742
743 adapter->ahw->module_type = module;
744 adapter->ahw->link_autoneg = autoneg;
745
746 if (link_status) {
747 adapter->ahw->link_speed = link_speed;
748 } else {
749 adapter->ahw->link_speed = SPEED_UNKNOWN;
750 adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
751 }
752}
753
754static void qlcnic_handle_fw_message(int desc_cnt, int index,
755 struct qlcnic_host_sds_ring *sds_ring)
756{
757 struct qlcnic_fw_msg msg;
758 struct status_desc *desc;
759 struct qlcnic_adapter *adapter;
760 struct device *dev;
761 int i = 0, opcode, ret;
762
763 while (desc_cnt > 0 && i < 8) {
764 desc = &sds_ring->desc_head[index];
765 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
766 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
767
768 index = get_next_index(index, sds_ring->num_desc);
769 desc_cnt--;
770 }
771
772 adapter = sds_ring->adapter;
773 dev = &adapter->pdev->dev;
774 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
775
776 switch (opcode) {
777 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
778 qlcnic_handle_linkevent(adapter, &msg);
779 break;
780 case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
781 ret = (u32)(msg.body[1]);
782 switch (ret) {
783 case 0:
784 adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
785 break;
786 case 1:
787 dev_info(dev, "loopback already in progress\n");
788 adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
789 break;
790 case 2:
791 dev_info(dev, "loopback cable is not connected\n");
792 adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
793 break;
794 default:
795 dev_info(dev,
796 "loopback configure request failed, err %x\n",
797 ret);
798 adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
799 break;
800 }
801 break;
802 default:
803 break;
804 }
805}
806
807static struct sk_buff *
808qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
809 struct qlcnic_host_rds_ring *rds_ring, u16 index,
810 u16 cksum)
811{
812 struct qlcnic_rx_buffer *buffer;
813 struct sk_buff *skb;
814
815 buffer = &rds_ring->rx_buf_arr[index];
816
817 if (unlikely(buffer->skb == NULL)) {
818 WARN_ON(1);
819 return NULL;
820 }
821
822 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
823 PCI_DMA_FROMDEVICE);
824
825 skb = buffer->skb;
826
827 if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
828 (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
829 adapter->stats.csummed++;
830 skb->ip_summed = CHECKSUM_UNNECESSARY;
831 } else {
832 skb_checksum_none_assert(skb);
833 }
834
835 buffer->skb = NULL;
836
837 return skb;
838}
839
840static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
841 struct sk_buff *skb, u16 *vlan_tag)
842{
843 struct ethhdr *eth_hdr;
844
845 if (!__vlan_get_tag(skb, vlan_tag)) {
846 eth_hdr = (struct ethhdr *)skb->data;
847 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
848 skb_pull(skb, VLAN_HLEN);
849 }
850 if (!adapter->pvid)
851 return 0;
852
853 if (*vlan_tag == adapter->pvid) {
854 /* Outer vlan tag. Packet should follow non-vlan path */
855 *vlan_tag = 0xffff;
856 return 0;
857 }
858 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
859 return 0;
860
861 return -EINVAL;
862}
863
864static struct qlcnic_rx_buffer *
865qlcnic_process_rcv(struct qlcnic_adapter *adapter,
866 struct qlcnic_host_sds_ring *sds_ring, int ring,
867 u64 sts_data0)
868{
869 struct net_device *netdev = adapter->netdev;
870 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
871 struct qlcnic_rx_buffer *buffer;
872 struct sk_buff *skb;
873 struct qlcnic_host_rds_ring *rds_ring;
874 int index, length, cksum, pkt_offset;
875 u16 vid = 0xffff;
876
877 if (unlikely(ring >= adapter->max_rds_rings))
878 return NULL;
879
880 rds_ring = &recv_ctx->rds_rings[ring];
881
882 index = qlcnic_get_sts_refhandle(sts_data0);
883 if (unlikely(index >= rds_ring->num_desc))
884 return NULL;
885
886 buffer = &rds_ring->rx_buf_arr[index];
887 length = qlcnic_get_sts_totallength(sts_data0);
888 cksum = qlcnic_get_sts_status(sts_data0);
889 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
890
891 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
892 if (!skb)
893 return buffer;
894
895 if (length > rds_ring->skb_size)
896 skb_put(skb, rds_ring->skb_size);
897 else
898 skb_put(skb, length);
899
900 if (pkt_offset)
901 skb_pull(skb, pkt_offset);
902
903 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
904 adapter->stats.rxdropped++;
905 dev_kfree_skb(skb);
906 return buffer;
907 }
908
909 skb->protocol = eth_type_trans(skb, netdev);
910
911 if (vid != 0xffff)
912 __vlan_hwaccel_put_tag(skb, vid);
913
914 napi_gro_receive(&sds_ring->napi, skb);
915
916 adapter->stats.rx_pkts++;
917 adapter->stats.rxbytes += length;
918
919 return buffer;
920}
921
922#define QLC_TCP_HDR_SIZE 20
923#define QLC_TCP_TS_OPTION_SIZE 12
924#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
925
926static struct qlcnic_rx_buffer *
927qlcnic_process_lro(struct qlcnic_adapter *adapter,
928 int ring, u64 sts_data0, u64 sts_data1)
929{
930 struct net_device *netdev = adapter->netdev;
931 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
932 struct qlcnic_rx_buffer *buffer;
933 struct sk_buff *skb;
934 struct qlcnic_host_rds_ring *rds_ring;
935 struct iphdr *iph;
936 struct tcphdr *th;
937 bool push, timestamp;
938 int index, l2_hdr_offset, l4_hdr_offset;
939 u16 lro_length, length, data_offset, vid = 0xffff;
940 u32 seq_number;
941
942 if (unlikely(ring > adapter->max_rds_rings))
943 return NULL;
944
945 rds_ring = &recv_ctx->rds_rings[ring];
946
947 index = qlcnic_get_lro_sts_refhandle(sts_data0);
948 if (unlikely(index > rds_ring->num_desc))
949 return NULL;
950
951 buffer = &rds_ring->rx_buf_arr[index];
952
953 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
954 lro_length = qlcnic_get_lro_sts_length(sts_data0);
955 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
956 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
957 push = qlcnic_get_lro_sts_push_flag(sts_data0);
958 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
959
960 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
961 if (!skb)
962 return buffer;
963
964 if (timestamp)
965 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
966 else
967 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
968
969 skb_put(skb, lro_length + data_offset);
970 skb_pull(skb, l2_hdr_offset);
971
972 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
973 adapter->stats.rxdropped++;
974 dev_kfree_skb(skb);
975 return buffer;
976 }
977
978 skb->protocol = eth_type_trans(skb, netdev);
979 iph = (struct iphdr *)skb->data;
980 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
981 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
982 iph->tot_len = htons(length);
983 iph->check = 0;
984 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
985 th->psh = push;
986 th->seq = htonl(seq_number);
987 length = skb->len;
988
989 if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
990 skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
991
992 if (vid != 0xffff)
993 __vlan_hwaccel_put_tag(skb, vid);
994 netif_receive_skb(skb);
995
996 adapter->stats.lro_pkts++;
997 adapter->stats.lrobytes += length;
998
999 return buffer;
1000}
1001
1002int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1003{
1004 struct qlcnic_host_rds_ring *rds_ring;
1005 struct qlcnic_adapter *adapter = sds_ring->adapter;
1006 struct list_head *cur;
1007 struct status_desc *desc;
1008 struct qlcnic_rx_buffer *rxbuf;
1009 u64 sts_data0, sts_data1;
1010 __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
1011 int opcode, ring, desc_cnt, count = 0;
1012 u32 consumer = sds_ring->consumer;
1013
1014 while (count < max) {
1015 desc = &sds_ring->desc_head[consumer];
1016 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1017
1018 if (!(sts_data0 & STATUS_OWNER_HOST))
1019 break;
1020
1021 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1022 opcode = qlcnic_get_sts_opcode(sts_data0);
1023
1024 switch (opcode) {
1025 case QLCNIC_RXPKT_DESC:
1026 case QLCNIC_OLD_RXPKT_DESC:
1027 case QLCNIC_SYN_OFFLOAD:
1028 ring = qlcnic_get_sts_type(sts_data0);
1029 rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
1030 sts_data0);
1031 break;
1032 case QLCNIC_LRO_DESC:
1033 ring = qlcnic_get_lro_sts_type(sts_data0);
1034 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1035 rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
1036 sts_data1);
1037 break;
1038 case QLCNIC_RESPONSE_DESC:
1039 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1040 default:
1041 goto skip;
1042 }
1043
1044 WARN_ON(desc_cnt > 1);
1045
1046 if (likely(rxbuf))
1047 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1048 else
1049 adapter->stats.null_rxbuf++;
1050
1051skip:
1052 for (; desc_cnt > 0; desc_cnt--) {
1053 desc = &sds_ring->desc_head[consumer];
1054 desc->status_desc_data[0] = owner_phantom;
1055 consumer = get_next_index(consumer, sds_ring->num_desc);
1056 }
1057 count++;
1058 }
1059
1060 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1061 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1062
1063 if (!list_empty(&sds_ring->free_list[ring])) {
1064 list_for_each(cur, &sds_ring->free_list[ring]) {
1065 rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
1066 list);
1067 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1068 }
1069 spin_lock(&rds_ring->lock);
1070 list_splice_tail_init(&sds_ring->free_list[ring],
1071 &rds_ring->free_list);
1072 spin_unlock(&rds_ring->lock);
1073 }
1074
1075 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1076 }
1077
1078 if (count) {
1079 sds_ring->consumer = consumer;
1080 writel(consumer, sds_ring->crb_sts_consumer);
1081 }
1082
1083 return count;
1084}
1085
1086void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1087 struct qlcnic_host_rds_ring *rds_ring)
1088{
1089 struct rcv_desc *pdesc;
1090 struct qlcnic_rx_buffer *buffer;
1091 int count = 0;
1092 u32 producer;
1093 struct list_head *head;
1094
1095 producer = rds_ring->producer;
1096 head = &rds_ring->free_list;
1097
1098 while (!list_empty(head)) {
1099
1100 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1101
1102 if (!buffer->skb) {
1103 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1104 break;
1105 }
1106
1107 count++;
1108 list_del(&buffer->list);
1109
1110 /* make a rcv descriptor */
1111 pdesc = &rds_ring->desc_head[producer];
1112 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1113 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1114 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1115 producer = get_next_index(producer, rds_ring->num_desc);
1116 }
1117
1118 if (count) {
1119 rds_ring->producer = producer;
1120 writel((producer-1) & (rds_ring->num_desc-1),
1121 rds_ring->crb_rcv_producer);
1122 }
1123}
1124
1125static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
1126{
1127 int i;
1128 unsigned char *data = skb->data;
1129
1130 pr_info(KERN_INFO "\n");
1131 for (i = 0; i < skb->len; i++) {
1132 QLCDB(adapter, DRV, "%02x ", data[i]);
1133 if ((i & 0x0f) == 8)
1134 pr_info(KERN_INFO "\n");
1135 }
1136}
1137
1138static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
1139 u64 sts_data0)
1140{
1141 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1142 struct sk_buff *skb;
1143 struct qlcnic_host_rds_ring *rds_ring;
1144 int index, length, cksum, pkt_offset;
1145
1146 if (unlikely(ring >= adapter->max_rds_rings))
1147 return;
1148
1149 rds_ring = &recv_ctx->rds_rings[ring];
1150
1151 index = qlcnic_get_sts_refhandle(sts_data0);
1152 length = qlcnic_get_sts_totallength(sts_data0);
1153 if (unlikely(index >= rds_ring->num_desc))
1154 return;
1155
1156 cksum = qlcnic_get_sts_status(sts_data0);
1157 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1158
1159 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1160 if (!skb)
1161 return;
1162
1163 if (length > rds_ring->skb_size)
1164 skb_put(skb, rds_ring->skb_size);
1165 else
1166 skb_put(skb, length);
1167
1168 if (pkt_offset)
1169 skb_pull(skb, pkt_offset);
1170
1171 if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
1172 adapter->ahw->diag_cnt++;
1173 else
1174 dump_skb(skb, adapter);
1175
1176 dev_kfree_skb_any(skb);
1177 adapter->stats.rx_pkts++;
1178 adapter->stats.rxbytes += length;
1179
1180 return;
1181}
1182
1183void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1184{
1185 struct qlcnic_adapter *adapter = sds_ring->adapter;
1186 struct status_desc *desc;
1187 u64 sts_data0;
1188 int ring, opcode, desc_cnt;
1189
1190 u32 consumer = sds_ring->consumer;
1191
1192 desc = &sds_ring->desc_head[consumer];
1193 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1194
1195 if (!(sts_data0 & STATUS_OWNER_HOST))
1196 return;
1197
1198 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1199 opcode = qlcnic_get_sts_opcode(sts_data0);
1200 switch (opcode) {
1201 case QLCNIC_RESPONSE_DESC:
1202 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1203 break;
1204 default:
1205 ring = qlcnic_get_sts_type(sts_data0);
1206 qlcnic_process_rcv_diag(adapter, ring, sts_data0);
1207 break;
1208 }
1209
1210 for (; desc_cnt > 0; desc_cnt--) {
1211 desc = &sds_ring->desc_head[consumer];
1212 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1213 consumer = get_next_index(consumer, sds_ring->num_desc);
1214 }
1215
1216 sds_ring->consumer = consumer;
1217 writel(consumer, sds_ring->crb_sts_consumer);
1218}
1219
1220void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
1221{
1222 u32 mac_low, mac_high;
1223 int i;
1224
1225 mac_low = off1;
1226 mac_high = off2;
1227
1228 if (alt_mac) {
1229 mac_low |= (mac_low >> 16) | (mac_high << 16);
1230 mac_high >>= 16;
1231 }
1232
1233 for (i = 0; i < 2; i++)
1234 mac[i] = (u8)(mac_high >> ((1 - i) * 8));
1235 for (i = 2; i < 6; i++)
1236 mac[i] = (u8)(mac_low >> ((5 - i) * 8));
1237}
1238
1239int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
1240{
1241 int ring, max_sds_rings;
1242 struct qlcnic_host_sds_ring *sds_ring;
1243 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1244
1245 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
1246 return -ENOMEM;
1247
1248 max_sds_rings = adapter->max_sds_rings;
1249
1250 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1251 sds_ring = &recv_ctx->sds_rings[ring];
1252
1253 if (ring == max_sds_rings - 1)
1254 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
1255 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1256 else
1257 netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
1258 QLCNIC_NETDEV_WEIGHT*2);
1259 }
1260
1261 return 0;
1262}
1263
1264void qlcnic_napi_del(struct qlcnic_adapter *adapter)
1265{
1266 int ring;
1267 struct qlcnic_host_sds_ring *sds_ring;
1268 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1269
1270 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1271 sds_ring = &recv_ctx->sds_rings[ring];
1272 netif_napi_del(&sds_ring->napi);
1273 }
1274
1275 qlcnic_free_sds_rings(adapter->recv_ctx);
1276}
1277
1278void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
1279{
1280 int ring;
1281 struct qlcnic_host_sds_ring *sds_ring;
1282 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1283
1284 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1285 return;
1286
1287 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1288 sds_ring = &recv_ctx->sds_rings[ring];
1289 napi_enable(&sds_ring->napi);
1290 qlcnic_enable_int(sds_ring);
1291 }
1292}
1293
1294void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
1295{
1296 int ring;
1297 struct qlcnic_host_sds_ring *sds_ring;
1298 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1299
1300 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1301 return;
1302
1303 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1304 sds_ring = &recv_ctx->sds_rings[ring];
1305 qlcnic_disable_int(sds_ring);
1306 napi_synchronize(&sds_ring->napi);
1307 napi_disable(&sds_ring->napi);
1308 }
1309}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 24ad17ec7fcd..a7554d9aab0c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -34,29 +34,28 @@ static int qlcnic_mac_learn;
34module_param(qlcnic_mac_learn, int, 0444); 34module_param(qlcnic_mac_learn, int, 0444);
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); 35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36 36
37static int use_msi = 1; 37static int qlcnic_use_msi = 1;
38module_param(use_msi, int, 0444);
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 38MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
39module_param_named(use_msi, qlcnic_use_msi, int, 0444);
40 40
41static int use_msi_x = 1; 41static int qlcnic_use_msi_x = 1;
42module_param(use_msi_x, int, 0444);
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 42MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
43module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
44 44
45static int auto_fw_reset = 1; 45static int qlcnic_auto_fw_reset = 1;
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 46MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
47module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
48 48
49static int load_fw_file; 49static int qlcnic_load_fw_file;
50module_param(load_fw_file, int, 0444);
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 50MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
51module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
52 52
53static int qlcnic_config_npars; 53static int qlcnic_config_npars;
54module_param(qlcnic_config_npars, int, 0444); 54module_param(qlcnic_config_npars, int, 0444);
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56 56
57static int __devinit qlcnic_probe(struct pci_dev *pdev, 57static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
58 const struct pci_device_id *ent); 58static void qlcnic_remove(struct pci_dev *pdev);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev); 59static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev); 60static int qlcnic_close(struct net_device *netdev);
62static void qlcnic_tx_timeout(struct net_device *netdev); 61static void qlcnic_tx_timeout(struct net_device *netdev);
@@ -66,17 +65,10 @@ static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter, 65static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay); 66 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter); 67static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
71#ifdef CONFIG_NET_POLL_CONTROLLER 68#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev); 69static void qlcnic_poll_controller(struct net_device *netdev);
73#endif 70#endif
74 71
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding); 72static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8); 73static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); 74static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -92,14 +84,15 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *);
92 84
93static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); 85static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
94static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); 86static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
95static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
96static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
97static int qlcnicvf_start_firmware(struct qlcnic_adapter *); 87static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
98static void qlcnic_set_netdev_features(struct qlcnic_adapter *, 88static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
99 struct qlcnic_esw_func_cfg *); 89 struct qlcnic_esw_func_cfg *);
100static int qlcnic_vlan_rx_add(struct net_device *, u16); 90static int qlcnic_vlan_rx_add(struct net_device *, u16);
101static int qlcnic_vlan_rx_del(struct net_device *, u16); 91static int qlcnic_vlan_rx_del(struct net_device *, u16);
102 92
93#define QLCNIC_IS_TSO_CAPABLE(adapter) \
94 ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
95
103/* PCI Device ID Table */ 96/* PCI Device ID Table */
104#define ENTRY(device) \ 97#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \ 98 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -115,9 +108,7 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl); 108MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116 109
117 110
118inline void 111inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121{ 112{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer); 113 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
123} 114}
@@ -129,26 +120,34 @@ static const u32 msi_tgt_status[8] = {
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 120 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130}; 121};
131 122
132static const 123static const struct qlcnic_board_info qlcnic_boards[] = {
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG; 124 {0x1077, 0x8020, 0x1077, 0x203,
134 125 "8200 Series Single Port 10GbE Converged Network Adapter"
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring) 126 "(TCP/IP Networking)"},
136{ 127 {0x1077, 0x8020, 0x1077, 0x207,
137 writel(0, sds_ring->crb_intr_mask); 128 "8200 Series Dual Port 10GbE Converged Network Adapter"
138} 129 "(TCP/IP Networking)"},
139 130 {0x1077, 0x8020, 0x1077, 0x20b,
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring) 131 "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
141{ 132 {0x1077, 0x8020, 0x1077, 0x20c,
142 struct qlcnic_adapter *adapter = sds_ring->adapter; 133 "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
134 {0x1077, 0x8020, 0x1077, 0x20f,
135 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
136 {0x1077, 0x8020, 0x103c, 0x3733,
137 "NC523SFP 10Gb 2-port Server Adapter"},
138 {0x1077, 0x8020, 0x103c, 0x3346,
139 "CN1000Q Dual Port Converged Network Adapter"},
140 {0x1077, 0x8020, 0x1077, 0x210,
141 "QME8242-k 10GbE Dual Port Mezzanine Card"},
142 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
143};
143 144
144 writel(0x1, sds_ring->crb_intr_mask); 145#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
145 146
146 if (!QLCNIC_IS_MSI_FAMILY(adapter)) 147static const
147 writel(0xfbff, adapter->tgt_mask_reg); 148struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
148}
149 149
150static int 150int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{ 151{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count; 152 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154 153
@@ -157,8 +156,7 @@ qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
157 return recv_ctx->sds_rings == NULL; 156 return recv_ctx->sds_rings == NULL;
158} 157}
159 158
160static void 159void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{ 160{
163 if (recv_ctx->sds_rings != NULL) 161 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings); 162 kfree(recv_ctx->sds_rings);
@@ -166,80 +164,6 @@ qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
166 recv_ctx->sds_rings = NULL; 164 recv_ctx->sds_rings = NULL;
167} 165}
168 166
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
174 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
181
182 if (ring == adapter->max_sds_rings - 1)
183 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
184 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
185 else
186 netif_napi_add(netdev, &sds_ring->napi,
187 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
188 }
189
190 return 0;
191}
192
193static void
194qlcnic_napi_del(struct qlcnic_adapter *adapter)
195{
196 int ring;
197 struct qlcnic_host_sds_ring *sds_ring;
198 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
199
200 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
201 sds_ring = &recv_ctx->sds_rings[ring];
202 netif_napi_del(&sds_ring->napi);
203 }
204
205 qlcnic_free_sds_rings(adapter->recv_ctx);
206}
207
208static void
209qlcnic_napi_enable(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
213 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
214
215 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
216 return;
217
218 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
219 sds_ring = &recv_ctx->sds_rings[ring];
220 napi_enable(&sds_ring->napi);
221 qlcnic_enable_int(sds_ring);
222 }
223}
224
225static void
226qlcnic_napi_disable(struct qlcnic_adapter *adapter)
227{
228 int ring;
229 struct qlcnic_host_sds_ring *sds_ring;
230 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
231
232 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
233 return;
234
235 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
236 sds_ring = &recv_ctx->sds_rings[ring];
237 qlcnic_disable_int(sds_ring);
238 napi_synchronize(&sds_ring->napi);
239 napi_disable(&sds_ring->napi);
240 }
241}
242
243static void qlcnic_clear_stats(struct qlcnic_adapter *adapter) 167static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
244{ 168{
245 memset(&adapter->stats, 0, sizeof(adapter->stats)); 169 memset(&adapter->stats, 0, sizeof(adapter->stats));
@@ -363,7 +287,7 @@ static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
363 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED); 287 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
364 qlcnic_set_msix_bit(pdev, 0); 288 qlcnic_set_msix_bit(pdev, 0);
365 289
366 if (adapter->msix_supported) { 290 if (adapter->ahw->msix_supported) {
367 enable_msix: 291 enable_msix:
368 qlcnic_init_msix_entries(adapter, num_msix); 292 qlcnic_init_msix_entries(adapter, num_msix);
369 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 293 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
@@ -385,32 +309,31 @@ static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
385 return err; 309 return err;
386} 310}
387 311
388
389static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter) 312static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
390{ 313{
314 u32 offset, mask_reg;
391 const struct qlcnic_legacy_intr_set *legacy_intrp; 315 const struct qlcnic_legacy_intr_set *legacy_intrp;
316 struct qlcnic_hardware_context *ahw = adapter->ahw;
392 struct pci_dev *pdev = adapter->pdev; 317 struct pci_dev *pdev = adapter->pdev;
393 318
394 if (use_msi && !pci_enable_msi(pdev)) { 319 if (qlcnic_use_msi && !pci_enable_msi(pdev)) {
395 adapter->flags |= QLCNIC_MSI_ENABLED; 320 adapter->flags |= QLCNIC_MSI_ENABLED;
396 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, 321 offset = msi_tgt_status[adapter->ahw->pci_func];
397 msi_tgt_status[adapter->ahw->pci_func]); 322 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw,
323 offset);
398 dev_info(&pdev->dev, "using msi interrupts\n"); 324 dev_info(&pdev->dev, "using msi interrupts\n");
399 adapter->msix_entries[0].vector = pdev->irq; 325 adapter->msix_entries[0].vector = pdev->irq;
400 return; 326 return;
401 } 327 }
402 328
403 legacy_intrp = &legacy_intr[adapter->ahw->pci_func]; 329 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
404 330 adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
405 adapter->int_vec_bit = legacy_intrp->int_vec_bit; 331 offset = legacy_intrp->tgt_status_reg;
406 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter, 332 adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset);
407 legacy_intrp->tgt_status_reg); 333 mask_reg = legacy_intrp->tgt_mask_reg;
408 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter, 334 adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg);
409 legacy_intrp->tgt_mask_reg); 335 adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR);
410 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR); 336 adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
411
412 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
413 ISR_INT_STATE_REG);
414 dev_info(&pdev->dev, "using legacy interrupts\n"); 337 dev_info(&pdev->dev, "using legacy interrupts\n");
415 adapter->msix_entries[0].vector = pdev->irq; 338 adapter->msix_entries[0].vector = pdev->irq;
416} 339}
@@ -420,7 +343,7 @@ qlcnic_setup_intr(struct qlcnic_adapter *adapter)
420{ 343{
421 int num_msix; 344 int num_msix;
422 345
423 if (adapter->msix_supported) { 346 if (adapter->ahw->msix_supported) {
424 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(), 347 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
425 QLCNIC_DEF_NUM_STS_DESC_RINGS)); 348 QLCNIC_DEF_NUM_STS_DESC_RINGS));
426 } else 349 } else
@@ -448,19 +371,25 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
448 iounmap(adapter->ahw->pci_base0); 371 iounmap(adapter->ahw->pci_base0);
449} 372}
450 373
451static int 374static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
452qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
453{ 375{
454 struct qlcnic_pci_info *pci_info; 376 struct qlcnic_pci_info *pci_info;
455 int i, ret = 0; 377 int i, ret = 0, j = 0;
378 u16 act_pci_func;
456 u8 pfn; 379 u8 pfn;
457 380
458 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL); 381 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
459 if (!pci_info) 382 if (!pci_info)
460 return -ENOMEM; 383 return -ENOMEM;
461 384
385 ret = qlcnic_get_pci_info(adapter, pci_info);
386 if (ret)
387 goto err_pci_info;
388
389 act_pci_func = adapter->ahw->act_pci_func;
390
462 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * 391 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
463 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL); 392 act_pci_func, GFP_KERNEL);
464 if (!adapter->npars) { 393 if (!adapter->npars) {
465 ret = -ENOMEM; 394 ret = -ENOMEM;
466 goto err_pci_info; 395 goto err_pci_info;
@@ -473,21 +402,25 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
473 goto err_npars; 402 goto err_npars;
474 } 403 }
475 404
476 ret = qlcnic_get_pci_info(adapter, pci_info);
477 if (ret)
478 goto err_eswitch;
479
480 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 405 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
481 pfn = pci_info[i].id; 406 pfn = pci_info[i].id;
407
482 if (pfn >= QLCNIC_MAX_PCI_FUNC) { 408 if (pfn >= QLCNIC_MAX_PCI_FUNC) {
483 ret = QL_STATUS_INVALID_PARAM; 409 ret = QL_STATUS_INVALID_PARAM;
484 goto err_eswitch; 410 goto err_eswitch;
485 } 411 }
486 adapter->npars[pfn].active = (u8)pci_info[i].active; 412
487 adapter->npars[pfn].type = (u8)pci_info[i].type; 413 if (!pci_info[i].active ||
488 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port; 414 (pci_info[i].type != QLCNIC_TYPE_NIC))
489 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw; 415 continue;
490 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw; 416
417 adapter->npars[j].pci_func = pfn;
418 adapter->npars[j].active = (u8)pci_info[i].active;
419 adapter->npars[j].type = (u8)pci_info[i].type;
420 adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
421 adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
422 adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
423 j++;
491 } 424 }
492 425
493 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) 426 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
@@ -515,7 +448,7 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
515 u32 ref_count; 448 u32 ref_count;
516 int i, ret = 1; 449 int i, ret = 1;
517 u32 data = QLCNIC_MGMT_FUNC; 450 u32 data = QLCNIC_MGMT_FUNC;
518 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE; 451 struct qlcnic_hardware_context *ahw = adapter->ahw;
519 452
520 /* If other drivers are not in use set their privilege level */ 453 /* If other drivers are not in use set their privilege level */
521 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE); 454 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
@@ -524,21 +457,20 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
524 goto err_lock; 457 goto err_lock;
525 458
526 if (qlcnic_config_npars) { 459 if (qlcnic_config_npars) {
527 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 460 for (i = 0; i < ahw->act_pci_func; i++) {
528 id = i; 461 id = adapter->npars[i].pci_func;
529 if (adapter->npars[i].type != QLCNIC_TYPE_NIC || 462 if (id == ahw->pci_func)
530 id == adapter->ahw->pci_func)
531 continue; 463 continue;
532 data |= (qlcnic_config_npars & 464 data |= (qlcnic_config_npars &
533 QLC_DEV_SET_DRV(0xf, id)); 465 QLC_DEV_SET_DRV(0xf, id));
534 } 466 }
535 } else { 467 } else {
536 data = readl(priv_op); 468 data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
537 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) | 469 data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
538 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, 470 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
539 adapter->ahw->pci_func)); 471 ahw->pci_func));
540 } 472 }
541 writel(data, priv_op); 473 QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
542 qlcnic_api_unlock(adapter); 474 qlcnic_api_unlock(adapter);
543err_lock: 475err_lock:
544 return ret; 476 return ret;
@@ -554,8 +486,8 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
554 u32 op_mode, priv_level; 486 u32 op_mode, priv_level;
555 487
556 /* Determine FW API version */ 488 /* Determine FW API version */
557 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 + 489 adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
558 QLCNIC_FW_API); 490 QLCNIC_FW_API);
559 491
560 /* Find PCI function number */ 492 /* Find PCI function number */
561 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func); 493 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
@@ -573,29 +505,41 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
573 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func); 505 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
574 506
575 if (priv_level == QLCNIC_NON_PRIV_FUNC) { 507 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
576 adapter->op_mode = QLCNIC_NON_PRIV_FUNC; 508 adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
577 dev_info(&adapter->pdev->dev, 509 dev_info(&adapter->pdev->dev,
578 "HAL Version: %d Non Privileged function\n", 510 "HAL Version: %d Non Privileged function\n",
579 adapter->fw_hal_version); 511 adapter->ahw->fw_hal_version);
580 adapter->nic_ops = &qlcnic_vf_ops; 512 adapter->nic_ops = &qlcnic_vf_ops;
581 } else 513 } else
582 adapter->nic_ops = &qlcnic_ops; 514 adapter->nic_ops = &qlcnic_ops;
583} 515}
584 516
585static int 517#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
586qlcnic_setup_pci_map(struct qlcnic_adapter *adapter) 518static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
587{ 519{
520 switch (dev_id) {
521 case PCI_DEVICE_ID_QLOGIC_QLE824X:
522 *bar = QLCNIC_82XX_BAR0_LENGTH;
523 break;
524 default:
525 *bar = 0;
526 }
527}
528
529static int qlcnic_setup_pci_map(struct pci_dev *pdev,
530 struct qlcnic_hardware_context *ahw)
531{
532 u32 offset;
588 void __iomem *mem_ptr0 = NULL; 533 void __iomem *mem_ptr0 = NULL;
589 resource_size_t mem_base; 534 resource_size_t mem_base;
590 unsigned long mem_len, pci_len0 = 0; 535 unsigned long mem_len, pci_len0 = 0, bar0_len;
591
592 struct pci_dev *pdev = adapter->pdev;
593 536
594 /* remap phys address */ 537 /* remap phys address */
595 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 538 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
596 mem_len = pci_resource_len(pdev, 0); 539 mem_len = pci_resource_len(pdev, 0);
597 540
598 if (mem_len == QLCNIC_PCI_2MB_SIZE) { 541 qlcnic_get_bar_length(pdev->device, &bar0_len);
542 if (mem_len >= bar0_len) {
599 543
600 mem_ptr0 = pci_ioremap_bar(pdev, 0); 544 mem_ptr0 = pci_ioremap_bar(pdev, 0);
601 if (mem_ptr0 == NULL) { 545 if (mem_ptr0 == NULL) {
@@ -608,20 +552,15 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
608 } 552 }
609 553
610 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 554 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
611 555 ahw->pci_base0 = mem_ptr0;
612 adapter->ahw->pci_base0 = mem_ptr0; 556 ahw->pci_len0 = pci_len0;
613 adapter->ahw->pci_len0 = pci_len0; 557 offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
614 558 qlcnic_get_ioaddr(ahw, offset);
615 qlcnic_check_vf(adapter);
616
617 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
618 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
619 adapter->ahw->pci_func)));
620 559
621 return 0; 560 return 0;
622} 561}
623 562
624static void get_brd_name(struct qlcnic_adapter *adapter, char *name) 563static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
625{ 564{
626 struct pci_dev *pdev = adapter->pdev; 565 struct pci_dev *pdev = adapter->pdev;
627 int i, found = 0; 566 int i, found = 0;
@@ -659,7 +598,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
659 598
660 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build); 599 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
661 600
662 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC) { 601 if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
663 if (fw_dump->tmpl_hdr == NULL || 602 if (fw_dump->tmpl_hdr == NULL ||
664 adapter->fw_version > prev_fw_version) { 603 adapter->fw_version > prev_fw_version) {
665 if (fw_dump->tmpl_hdr) 604 if (fw_dump->tmpl_hdr)
@@ -691,7 +630,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
691 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G; 630 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
692 } 631 }
693 632
694 adapter->msix_supported = !!use_msi_x; 633 adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
695 634
696 adapter->num_txd = MAX_CMD_DESCRIPTORS; 635 adapter->num_txd = MAX_CMD_DESCRIPTORS;
697 636
@@ -704,19 +643,20 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
704 int err; 643 int err;
705 struct qlcnic_info nic_info; 644 struct qlcnic_info nic_info;
706 645
646 memset(&nic_info, 0, sizeof(struct qlcnic_info));
707 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func); 647 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
708 if (err) 648 if (err)
709 return err; 649 return err;
710 650
711 adapter->physical_port = (u8)nic_info.phys_port; 651 adapter->ahw->physical_port = (u8)nic_info.phys_port;
712 adapter->switch_mode = nic_info.switch_mode; 652 adapter->ahw->switch_mode = nic_info.switch_mode;
713 adapter->max_tx_ques = nic_info.max_tx_ques; 653 adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
714 adapter->max_rx_ques = nic_info.max_rx_ques; 654 adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
715 adapter->capabilities = nic_info.capabilities; 655 adapter->ahw->capabilities = nic_info.capabilities;
716 adapter->max_mac_filters = nic_info.max_mac_filters; 656 adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
717 adapter->max_mtu = nic_info.max_mtu; 657 adapter->ahw->max_mtu = nic_info.max_mtu;
718 658
719 if (adapter->capabilities & BIT_6) 659 if (adapter->ahw->capabilities & BIT_6)
720 adapter->flags |= QLCNIC_ESWITCH_ENABLED; 660 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
721 else 661 else
722 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED; 662 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
@@ -724,9 +664,8 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
724 return err; 664 return err;
725} 665}
726 666
727static void 667void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
728qlcnic_set_vlan_config(struct qlcnic_adapter *adapter, 668 struct qlcnic_esw_func_cfg *esw_cfg)
729 struct qlcnic_esw_func_cfg *esw_cfg)
730{ 669{
731 if (esw_cfg->discard_tagged) 670 if (esw_cfg->discard_tagged)
732 adapter->flags &= ~QLCNIC_TAGGING_ENABLED; 671 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
@@ -757,9 +696,8 @@ qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
757 return 0; 696 return 0;
758} 697}
759 698
760static void 699void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
761qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, 700 struct qlcnic_esw_func_cfg *esw_cfg)
762 struct qlcnic_esw_func_cfg *esw_cfg)
763{ 701{
764 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED | 702 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
765 QLCNIC_PROMISC_DISABLED); 703 QLCNIC_PROMISC_DISABLED);
@@ -776,8 +714,7 @@ qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
776 qlcnic_set_netdev_features(adapter, esw_cfg); 714 qlcnic_set_netdev_features(adapter, esw_cfg);
777} 715}
778 716
779static int 717static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
780qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
781{ 718{
782 struct qlcnic_esw_func_cfg esw_cfg; 719 struct qlcnic_esw_func_cfg esw_cfg;
783 720
@@ -805,7 +742,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
805 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | 742 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
806 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER); 743 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
807 744
808 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) { 745 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
809 features |= (NETIF_F_TSO | NETIF_F_TSO6); 746 features |= (NETIF_F_TSO | NETIF_F_TSO6);
810 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); 747 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
811 } 748 }
@@ -851,7 +788,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
851 788
852 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 789 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
853 if (priv_level == QLCNIC_MGMT_FUNC) { 790 if (priv_level == QLCNIC_MGMT_FUNC) {
854 adapter->op_mode = QLCNIC_MGMT_FUNC; 791 adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
855 err = qlcnic_init_pci_info(adapter); 792 err = qlcnic_init_pci_info(adapter);
856 if (err) 793 if (err)
857 return err; 794 return err;
@@ -859,12 +796,12 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
859 qlcnic_set_function_modes(adapter); 796 qlcnic_set_function_modes(adapter);
860 dev_info(&adapter->pdev->dev, 797 dev_info(&adapter->pdev->dev,
861 "HAL Version: %d, Management function\n", 798 "HAL Version: %d, Management function\n",
862 adapter->fw_hal_version); 799 adapter->ahw->fw_hal_version);
863 } else if (priv_level == QLCNIC_PRIV_FUNC) { 800 } else if (priv_level == QLCNIC_PRIV_FUNC) {
864 adapter->op_mode = QLCNIC_PRIV_FUNC; 801 adapter->ahw->op_mode = QLCNIC_PRIV_FUNC;
865 dev_info(&adapter->pdev->dev, 802 dev_info(&adapter->pdev->dev,
866 "HAL Version: %d, Privileged function\n", 803 "HAL Version: %d, Privileged function\n",
867 adapter->fw_hal_version); 804 adapter->ahw->fw_hal_version);
868 } 805 }
869 } 806 }
870 807
@@ -873,8 +810,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
873 return err; 810 return err;
874} 811}
875 812
876static int 813static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
877qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
878{ 814{
879 struct qlcnic_esw_func_cfg esw_cfg; 815 struct qlcnic_esw_func_cfg esw_cfg;
880 struct qlcnic_npar_info *npar; 816 struct qlcnic_npar_info *npar;
@@ -883,16 +819,16 @@ qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
883 if (adapter->need_fw_reset) 819 if (adapter->need_fw_reset)
884 return 0; 820 return 0;
885 821
886 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 822 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
887 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
888 continue;
889 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg)); 823 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
890 esw_cfg.pci_func = i; 824 esw_cfg.pci_func = adapter->npars[i].pci_func;
891 esw_cfg.offload_flags = BIT_0;
892 esw_cfg.mac_override = BIT_0; 825 esw_cfg.mac_override = BIT_0;
893 esw_cfg.promisc_mode = BIT_0; 826 esw_cfg.promisc_mode = BIT_0;
894 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) 827 if (qlcnic_82xx_check(adapter)) {
895 esw_cfg.offload_flags |= (BIT_1 | BIT_2); 828 esw_cfg.offload_flags = BIT_0;
829 if (QLCNIC_IS_TSO_CAPABLE(adapter))
830 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
831 }
896 if (qlcnic_config_switch_port(adapter, &esw_cfg)) 832 if (qlcnic_config_switch_port(adapter, &esw_cfg))
897 return -EIO; 833 return -EIO;
898 npar = &adapter->npars[i]; 834 npar = &adapter->npars[i];
@@ -930,22 +866,24 @@ qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
930 return 0; 866 return 0;
931} 867}
932 868
933static int 869static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
934qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
935{ 870{
936 int i, err; 871 int i, err;
937 struct qlcnic_npar_info *npar; 872 struct qlcnic_npar_info *npar;
938 struct qlcnic_info nic_info; 873 struct qlcnic_info nic_info;
874 u8 pci_func;
939 875
940 if (!adapter->need_fw_reset) 876 if (qlcnic_82xx_check(adapter))
941 return 0; 877 if (!adapter->need_fw_reset)
878 return 0;
942 879
943 /* Set the NPAR config data after FW reset */ 880 /* Set the NPAR config data after FW reset */
944 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 881 for (i = 0; i < adapter->ahw->act_pci_func; i++) {
945 npar = &adapter->npars[i]; 882 npar = &adapter->npars[i];
946 if (npar->type != QLCNIC_TYPE_NIC) 883 pci_func = npar->pci_func;
947 continue; 884 memset(&nic_info, 0, sizeof(struct qlcnic_info));
948 err = qlcnic_get_nic_info(adapter, &nic_info, i); 885 err = qlcnic_get_nic_info(adapter,
886 &nic_info, pci_func);
949 if (err) 887 if (err)
950 return err; 888 return err;
951 nic_info.min_tx_bw = npar->min_bw; 889 nic_info.min_tx_bw = npar->min_bw;
@@ -956,11 +894,12 @@ qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
956 894
957 if (npar->enable_pm) { 895 if (npar->enable_pm) {
958 err = qlcnic_config_port_mirroring(adapter, 896 err = qlcnic_config_port_mirroring(adapter,
959 npar->dest_npar, 1, i); 897 npar->dest_npar, 1,
898 pci_func);
960 if (err) 899 if (err)
961 return err; 900 return err;
962 } 901 }
963 err = qlcnic_reset_eswitch_config(adapter, npar, i); 902 err = qlcnic_reset_eswitch_config(adapter, npar, pci_func);
964 if (err) 903 if (err)
965 return err; 904 return err;
966 } 905 }
@@ -972,7 +911,7 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
972 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO; 911 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
973 u32 npar_state; 912 u32 npar_state;
974 913
975 if (adapter->op_mode == QLCNIC_MGMT_FUNC) 914 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
976 return 0; 915 return 0;
977 916
978 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); 917 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
@@ -994,7 +933,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
994 int err; 933 int err;
995 934
996 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) || 935 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
997 adapter->op_mode != QLCNIC_MGMT_FUNC) 936 adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
998 return 0; 937 return 0;
999 938
1000 err = qlcnic_set_default_offload_settings(adapter); 939 err = qlcnic_set_default_offload_settings(adapter);
@@ -1021,14 +960,14 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1021 else if (!err) 960 else if (!err)
1022 goto check_fw_status; 961 goto check_fw_status;
1023 962
1024 if (load_fw_file) 963 if (qlcnic_load_fw_file)
1025 qlcnic_request_firmware(adapter); 964 qlcnic_request_firmware(adapter);
1026 else { 965 else {
1027 err = qlcnic_check_flash_fw_ver(adapter); 966 err = qlcnic_check_flash_fw_ver(adapter);
1028 if (err) 967 if (err)
1029 goto err_out; 968 goto err_out;
1030 969
1031 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE; 970 adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE;
1032 } 971 }
1033 972
1034 err = qlcnic_need_fw_reset(adapter); 973 err = qlcnic_need_fw_reset(adapter);
@@ -1089,7 +1028,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1089 struct net_device *netdev = adapter->netdev; 1028 struct net_device *netdev = adapter->netdev;
1090 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1029 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
1091 1030
1092 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1031 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1093 handler = qlcnic_tmp_intr; 1032 handler = qlcnic_tmp_intr;
1094 if (!QLCNIC_IS_MSI_FAMILY(adapter)) 1033 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1095 flags |= IRQF_SHARED; 1034 flags |= IRQF_SHARED;
@@ -1148,7 +1087,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1148 if (qlcnic_set_eswitch_port_config(adapter)) 1087 if (qlcnic_set_eswitch_port_config(adapter))
1149 return -EIO; 1088 return -EIO;
1150 1089
1151 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) { 1090 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
1152 capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2); 1091 capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
1153 if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG) 1092 if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
1154 adapter->flags |= QLCNIC_FW_LRO_MSS_CAP; 1093 adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
@@ -1179,7 +1118,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1179 1118
1180 qlcnic_linkevent_request(adapter, 1); 1119 qlcnic_linkevent_request(adapter, 1);
1181 1120
1182 adapter->reset_context = 0; 1121 adapter->ahw->reset_context = 0;
1183 set_bit(__QLCNIC_DEV_UP, &adapter->state); 1122 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1184 return 0; 1123 return 0;
1185} 1124}
@@ -1312,7 +1251,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1312 int ring; 1251 int ring;
1313 1252
1314 clear_bit(__QLCNIC_DEV_UP, &adapter->state); 1253 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1315 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1254 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1316 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1255 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1317 sds_ring = &adapter->recv_ctx->sds_rings[ring]; 1256 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1318 qlcnic_disable_int(sds_ring); 1257 qlcnic_disable_int(sds_ring);
@@ -1323,7 +1262,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1323 1262
1324 qlcnic_detach(adapter); 1263 qlcnic_detach(adapter);
1325 1264
1326 adapter->diag_test = 0; 1265 adapter->ahw->diag_test = 0;
1327 adapter->max_sds_rings = max_sds_rings; 1266 adapter->max_sds_rings = max_sds_rings;
1328 1267
1329 if (qlcnic_attach(adapter)) 1268 if (qlcnic_attach(adapter))
@@ -1393,7 +1332,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1393 qlcnic_detach(adapter); 1332 qlcnic_detach(adapter);
1394 1333
1395 adapter->max_sds_rings = 1; 1334 adapter->max_sds_rings = 1;
1396 adapter->diag_test = test; 1335 adapter->ahw->diag_test = test;
1397 1336
1398 ret = qlcnic_attach(adapter); 1337 ret = qlcnic_attach(adapter);
1399 if (ret) { 1338 if (ret) {
@@ -1413,14 +1352,14 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1413 qlcnic_post_rx_buffers(adapter, rds_ring); 1352 qlcnic_post_rx_buffers(adapter, rds_ring);
1414 } 1353 }
1415 1354
1416 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 1355 if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
1417 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1356 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1418 sds_ring = &adapter->recv_ctx->sds_rings[ring]; 1357 sds_ring = &adapter->recv_ctx->sds_rings[ring];
1419 qlcnic_enable_int(sds_ring); 1358 qlcnic_enable_int(sds_ring);
1420 } 1359 }
1421 } 1360 }
1422 1361
1423 if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) { 1362 if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
1424 adapter->ahw->loopback_state = 0; 1363 adapter->ahw->loopback_state = 0;
1425 qlcnic_linkevent_request(adapter, 1); 1364 qlcnic_linkevent_request(adapter, 1);
1426 } 1365 }
@@ -1485,14 +1424,14 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
1485} 1424}
1486 1425
1487static int 1426static int
1488qlcnic_setup_netdev(struct qlcnic_adapter *adapter, 1427qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1489 struct net_device *netdev, u8 pci_using_dac) 1428 int pci_using_dac)
1490{ 1429{
1491 int err; 1430 int err;
1492 struct pci_dev *pdev = adapter->pdev; 1431 struct pci_dev *pdev = adapter->pdev;
1493 1432
1494 adapter->mc_enabled = 0; 1433 adapter->ahw->mc_enabled = 0;
1495 adapter->max_mc_count = 38; 1434 adapter->ahw->max_mc_count = 38;
1496 1435
1497 netdev->netdev_ops = &qlcnic_netdev_ops; 1436 netdev->netdev_ops = &qlcnic_netdev_ops;
1498 netdev->watchdog_timeo = 5*HZ; 1437 netdev->watchdog_timeo = 5*HZ;
@@ -1504,16 +1443,16 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1504 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | 1443 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1505 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; 1444 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
1506 1445
1507 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) 1446 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1508 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 1447 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1509 if (pci_using_dac) 1448 if (pci_using_dac == 1)
1510 netdev->hw_features |= NETIF_F_HIGHDMA; 1449 netdev->hw_features |= NETIF_F_HIGHDMA;
1511 1450
1512 netdev->vlan_features = netdev->hw_features; 1451 netdev->vlan_features = netdev->hw_features;
1513 1452
1514 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX) 1453 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1515 netdev->hw_features |= NETIF_F_HW_VLAN_TX; 1454 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
1516 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 1455 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1517 netdev->hw_features |= NETIF_F_LRO; 1456 netdev->hw_features |= NETIF_F_LRO;
1518 1457
1519 netdev->features |= netdev->hw_features | 1458 netdev->features |= netdev->hw_features |
@@ -1530,7 +1469,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1530 return 0; 1469 return 0;
1531} 1470}
1532 1471
1533static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac) 1472static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
1534{ 1473{
1535 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 1474 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1536 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) 1475 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
@@ -1559,15 +1498,14 @@ qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1559 return -ENOMEM; 1498 return -ENOMEM;
1560} 1499}
1561 1500
1562static int __devinit 1501static int
1563qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1502qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1564{ 1503{
1565 struct net_device *netdev = NULL; 1504 struct net_device *netdev = NULL;
1566 struct qlcnic_adapter *adapter = NULL; 1505 struct qlcnic_adapter *adapter = NULL;
1567 int err; 1506 int err, pci_using_dac = -1;
1568 uint8_t revision_id; 1507 uint8_t revision_id;
1569 uint8_t pci_using_dac; 1508 char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
1570 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
1571 1509
1572 err = pci_enable_device(pdev); 1510 err = pci_enable_device(pdev);
1573 if (err) 1511 if (err)
@@ -1616,9 +1554,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1616 spin_lock_init(&adapter->tx_clean_lock); 1554 spin_lock_init(&adapter->tx_clean_lock);
1617 INIT_LIST_HEAD(&adapter->mac_list); 1555 INIT_LIST_HEAD(&adapter->mac_list);
1618 1556
1619 err = qlcnic_setup_pci_map(adapter); 1557 err = qlcnic_setup_pci_map(pdev, adapter->ahw);
1620 if (err) 1558 if (err)
1621 goto err_out_free_hw; 1559 goto err_out_free_hw;
1560 qlcnic_check_vf(adapter);
1622 1561
1623 /* This will be reset for mezz cards */ 1562 /* This will be reset for mezz cards */
1624 adapter->portnum = adapter->ahw->pci_func; 1563 adapter->portnum = adapter->ahw->pci_func;
@@ -1646,16 +1585,15 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1646 dev_warn(&pdev->dev, "failed to read mac addr\n"); 1585 dev_warn(&pdev->dev, "failed to read mac addr\n");
1647 1586
1648 if (adapter->portnum == 0) { 1587 if (adapter->portnum == 0) {
1649 get_brd_name(adapter, brd_name); 1588 qlcnic_get_board_name(adapter, board_name);
1650
1651 pr_info("%s: %s Board Chip rev 0x%x\n", 1589 pr_info("%s: %s Board Chip rev 0x%x\n",
1652 module_name(THIS_MODULE), 1590 module_name(THIS_MODULE),
1653 brd_name, adapter->ahw->revision_id); 1591 board_name, adapter->ahw->revision_id);
1654 } 1592 }
1655 1593
1656 qlcnic_clear_stats(adapter); 1594 qlcnic_clear_stats(adapter);
1657 1595
1658 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques); 1596 err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
1659 if (err) 1597 if (err)
1660 goto err_out_decr_ref; 1598 goto err_out_decr_ref;
1661 1599
@@ -1667,7 +1605,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1667 1605
1668 pci_set_drvdata(pdev, adapter); 1606 pci_set_drvdata(pdev, adapter);
1669 1607
1670 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY); 1608 if (qlcnic_82xx_check(adapter))
1609 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
1610 FW_POLL_DELAY);
1671 1611
1672 switch (adapter->ahw->port_type) { 1612 switch (adapter->ahw->port_type) {
1673 case QLCNIC_GBE: 1613 case QLCNIC_GBE:
@@ -1724,7 +1664,7 @@ err_out_maintenance_mode:
1724 return 0; 1664 return 0;
1725} 1665}
1726 1666
1727static void __devexit qlcnic_remove(struct pci_dev *pdev) 1667static void qlcnic_remove(struct pci_dev *pdev)
1728{ 1668{
1729 struct qlcnic_adapter *adapter; 1669 struct qlcnic_adapter *adapter;
1730 struct net_device *netdev; 1670 struct net_device *netdev;
@@ -1746,7 +1686,8 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
1746 if (adapter->eswitch != NULL) 1686 if (adapter->eswitch != NULL)
1747 kfree(adapter->eswitch); 1687 kfree(adapter->eswitch);
1748 1688
1749 qlcnic_clr_all_drv_state(adapter, 0); 1689 if (qlcnic_82xx_check(adapter))
1690 qlcnic_clr_all_drv_state(adapter, 0);
1750 1691
1751 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1692 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1752 1693
@@ -1782,7 +1723,8 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
1782 if (netif_running(netdev)) 1723 if (netif_running(netdev))
1783 qlcnic_down(adapter, netdev); 1724 qlcnic_down(adapter, netdev);
1784 1725
1785 qlcnic_clr_all_drv_state(adapter, 0); 1726 if (qlcnic_82xx_check(adapter))
1727 qlcnic_clr_all_drv_state(adapter, 0);
1786 1728
1787 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1729 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1788 1730
@@ -1790,9 +1732,11 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
1790 if (retval) 1732 if (retval)
1791 return retval; 1733 return retval;
1792 1734
1793 if (qlcnic_wol_supported(adapter)) { 1735 if (qlcnic_82xx_check(adapter)) {
1794 pci_enable_wake(pdev, PCI_D3cold, 1); 1736 if (qlcnic_wol_supported(adapter)) {
1795 pci_enable_wake(pdev, PCI_D3hot, 1); 1737 pci_enable_wake(pdev, PCI_D3cold, 1);
1738 pci_enable_wake(pdev, PCI_D3hot, 1);
1739 }
1796 } 1740 }
1797 1741
1798 return 0; 1742 return 0;
@@ -1927,435 +1871,14 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1927 adapter->fhash.fmax = 0; 1871 adapter->fhash.fmax = 0;
1928} 1872}
1929 1873
1930static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
1931 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
1932{
1933 struct cmd_desc_type0 *hwdesc;
1934 struct qlcnic_nic_req *req;
1935 struct qlcnic_mac_req *mac_req;
1936 struct qlcnic_vlan_req *vlan_req;
1937 u32 producer;
1938 u64 word;
1939
1940 producer = tx_ring->producer;
1941 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1942
1943 req = (struct qlcnic_nic_req *)hwdesc;
1944 memset(req, 0, sizeof(struct qlcnic_nic_req));
1945 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1946
1947 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1948 req->req_hdr = cpu_to_le64(word);
1949
1950 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
1951 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
1952 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1953
1954 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1955 vlan_req->vlan_id = vlan_id;
1956
1957 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1958 smp_mb();
1959}
1960
1961#define QLCNIC_MAC_HASH(MAC)\
1962 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1963
1964static void
1965qlcnic_send_filter(struct qlcnic_adapter *adapter,
1966 struct qlcnic_host_tx_ring *tx_ring,
1967 struct cmd_desc_type0 *first_desc,
1968 struct sk_buff *skb)
1969{
1970 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1971 struct qlcnic_filter *fil, *tmp_fil;
1972 struct hlist_node *tmp_hnode, *n;
1973 struct hlist_head *head;
1974 u64 src_addr = 0;
1975 __le16 vlan_id = 0;
1976 u8 hindex;
1977
1978 if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
1979 return;
1980
1981 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1982 return;
1983
1984 /* Only NPAR capable devices support vlan based learning*/
1985 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1986 vlan_id = first_desc->vlan_TCI;
1987 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1988 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1989 head = &(adapter->fhash.fhead[hindex]);
1990
1991 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
1992 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1993 tmp_fil->vlan_id == vlan_id) {
1994
1995 if (jiffies >
1996 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1997 qlcnic_change_filter(adapter, src_addr, vlan_id,
1998 tx_ring);
1999 tmp_fil->ftime = jiffies;
2000 return;
2001 }
2002 }
2003
2004 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
2005 if (!fil)
2006 return;
2007
2008 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
2009
2010 fil->ftime = jiffies;
2011 fil->vlan_id = vlan_id;
2012 memcpy(fil->faddr, &src_addr, ETH_ALEN);
2013 spin_lock(&adapter->mac_learn_lock);
2014 hlist_add_head(&(fil->fnode), head);
2015 adapter->fhash.fnum++;
2016 spin_unlock(&adapter->mac_learn_lock);
2017}
2018
2019static int
2020qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
2021 struct cmd_desc_type0 *first_desc,
2022 struct sk_buff *skb)
2023{
2024 u8 opcode = 0, hdr_len = 0;
2025 u16 flags = 0, vlan_tci = 0;
2026 int copied, offset, copy_len;
2027 struct cmd_desc_type0 *hwdesc;
2028 struct vlan_ethhdr *vh;
2029 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2030 u16 protocol = ntohs(skb->protocol);
2031 u32 producer = tx_ring->producer;
2032
2033 if (protocol == ETH_P_8021Q) {
2034 vh = (struct vlan_ethhdr *)skb->data;
2035 flags = FLAGS_VLAN_TAGGED;
2036 vlan_tci = vh->h_vlan_TCI;
2037 protocol = ntohs(vh->h_vlan_encapsulated_proto);
2038 } else if (vlan_tx_tag_present(skb)) {
2039 flags = FLAGS_VLAN_OOB;
2040 vlan_tci = vlan_tx_tag_get(skb);
2041 }
2042 if (unlikely(adapter->pvid)) {
2043 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2044 return -EIO;
2045 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
2046 goto set_flags;
2047
2048 flags = FLAGS_VLAN_OOB;
2049 vlan_tci = adapter->pvid;
2050 }
2051set_flags:
2052 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2053 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2054
2055 if (*(skb->data) & BIT_0) {
2056 flags |= BIT_0;
2057 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
2058 }
2059 opcode = TX_ETHER_PKT;
2060 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
2061 skb_shinfo(skb)->gso_size > 0) {
2062
2063 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2064
2065 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2066 first_desc->total_hdr_length = hdr_len;
2067
2068 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2069
2070 /* For LSO, we need to copy the MAC/IP/TCP headers into
2071 * the descriptor ring */
2072 copied = 0;
2073 offset = 2;
2074
2075 if (flags & FLAGS_VLAN_OOB) {
2076 first_desc->total_hdr_length += VLAN_HLEN;
2077 first_desc->tcp_hdr_offset = VLAN_HLEN;
2078 first_desc->ip_hdr_offset = VLAN_HLEN;
2079 /* Only in case of TSO on vlan device */
2080 flags |= FLAGS_VLAN_TAGGED;
2081
2082 /* Create a TSO vlan header template for firmware */
2083
2084 hwdesc = &tx_ring->desc_head[producer];
2085 tx_ring->cmd_buf_arr[producer].skb = NULL;
2086
2087 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2088 offset, hdr_len + VLAN_HLEN);
2089
2090 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2091 skb_copy_from_linear_data(skb, vh, 12);
2092 vh->h_vlan_proto = htons(ETH_P_8021Q);
2093 vh->h_vlan_TCI = htons(vlan_tci);
2094
2095 skb_copy_from_linear_data_offset(skb, 12,
2096 (char *)vh + 16, copy_len - 16);
2097
2098 copied = copy_len - VLAN_HLEN;
2099 offset = 0;
2100
2101 producer = get_next_index(producer, tx_ring->num_desc);
2102 }
2103
2104 while (copied < hdr_len) {
2105
2106 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2107 offset, (hdr_len - copied));
2108
2109 hwdesc = &tx_ring->desc_head[producer];
2110 tx_ring->cmd_buf_arr[producer].skb = NULL;
2111
2112 skb_copy_from_linear_data_offset(skb, copied,
2113 (char *) hwdesc + offset, copy_len);
2114
2115 copied += copy_len;
2116 offset = 0;
2117
2118 producer = get_next_index(producer, tx_ring->num_desc);
2119 }
2120
2121 tx_ring->producer = producer;
2122 smp_mb();
2123 adapter->stats.lso_frames++;
2124
2125 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2126 u8 l4proto;
2127
2128 if (protocol == ETH_P_IP) {
2129 l4proto = ip_hdr(skb)->protocol;
2130
2131 if (l4proto == IPPROTO_TCP)
2132 opcode = TX_TCP_PKT;
2133 else if (l4proto == IPPROTO_UDP)
2134 opcode = TX_UDP_PKT;
2135 } else if (protocol == ETH_P_IPV6) {
2136 l4proto = ipv6_hdr(skb)->nexthdr;
2137
2138 if (l4proto == IPPROTO_TCP)
2139 opcode = TX_TCPV6_PKT;
2140 else if (l4proto == IPPROTO_UDP)
2141 opcode = TX_UDPV6_PKT;
2142 }
2143 }
2144 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2145 first_desc->ip_hdr_offset += skb_network_offset(skb);
2146 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2147
2148 return 0;
2149}
2150
2151static int
2152qlcnic_map_tx_skb(struct pci_dev *pdev,
2153 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2154{
2155 struct qlcnic_skb_frag *nf;
2156 struct skb_frag_struct *frag;
2157 int i, nr_frags;
2158 dma_addr_t map;
2159
2160 nr_frags = skb_shinfo(skb)->nr_frags;
2161 nf = &pbuf->frag_array[0];
2162
2163 map = pci_map_single(pdev, skb->data,
2164 skb_headlen(skb), PCI_DMA_TODEVICE);
2165 if (pci_dma_mapping_error(pdev, map))
2166 goto out_err;
2167
2168 nf->dma = map;
2169 nf->length = skb_headlen(skb);
2170
2171 for (i = 0; i < nr_frags; i++) {
2172 frag = &skb_shinfo(skb)->frags[i];
2173 nf = &pbuf->frag_array[i+1];
2174
2175 map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
2176 DMA_TO_DEVICE);
2177 if (dma_mapping_error(&pdev->dev, map))
2178 goto unwind;
2179
2180 nf->dma = map;
2181 nf->length = skb_frag_size(frag);
2182 }
2183
2184 return 0;
2185
2186unwind:
2187 while (--i >= 0) {
2188 nf = &pbuf->frag_array[i+1];
2189 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2190 }
2191
2192 nf = &pbuf->frag_array[0];
2193 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2194
2195out_err:
2196 return -ENOMEM;
2197}
2198
2199static void
2200qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2201 struct qlcnic_cmd_buffer *pbuf)
2202{
2203 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2204 int nr_frags = skb_shinfo(skb)->nr_frags;
2205 int i;
2206
2207 for (i = 0; i < nr_frags; i++) {
2208 nf = &pbuf->frag_array[i+1];
2209 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2210 }
2211
2212 nf = &pbuf->frag_array[0];
2213 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2214 pbuf->skb = NULL;
2215}
2216
2217static inline void
2218qlcnic_clear_cmddesc(u64 *desc)
2219{
2220 desc[0] = 0ULL;
2221 desc[2] = 0ULL;
2222 desc[7] = 0ULL;
2223}
2224
2225netdev_tx_t
2226qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2227{
2228 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2229 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2230 struct qlcnic_cmd_buffer *pbuf;
2231 struct qlcnic_skb_frag *buffrag;
2232 struct cmd_desc_type0 *hwdesc, *first_desc;
2233 struct pci_dev *pdev;
2234 struct ethhdr *phdr;
2235 int delta = 0;
2236 int i, k;
2237
2238 u32 producer;
2239 int frag_count;
2240 u32 num_txd = tx_ring->num_desc;
2241
2242 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2243 netif_stop_queue(netdev);
2244 return NETDEV_TX_BUSY;
2245 }
2246
2247 if (adapter->flags & QLCNIC_MACSPOOF) {
2248 phdr = (struct ethhdr *)skb->data;
2249 if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
2250 goto drop_packet;
2251 }
2252
2253 frag_count = skb_shinfo(skb)->nr_frags + 1;
2254 /* 14 frags supported for normal packet and
2255 * 32 frags supported for TSO packet
2256 */
2257 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2258
2259 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2260 delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2261
2262 if (!__pskb_pull_tail(skb, delta))
2263 goto drop_packet;
2264
2265 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2266 }
2267
2268 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
2269 netif_stop_queue(netdev);
2270 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2271 netif_start_queue(netdev);
2272 else {
2273 adapter->stats.xmit_off++;
2274 return NETDEV_TX_BUSY;
2275 }
2276 }
2277
2278 producer = tx_ring->producer;
2279 pbuf = &tx_ring->cmd_buf_arr[producer];
2280
2281 pdev = adapter->pdev;
2282
2283 first_desc = hwdesc = &tx_ring->desc_head[producer];
2284 qlcnic_clear_cmddesc((u64 *)hwdesc);
2285
2286 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2287 adapter->stats.tx_dma_map_error++;
2288 goto drop_packet;
2289 }
2290
2291 pbuf->skb = skb;
2292 pbuf->frag_count = frag_count;
2293
2294 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2295 qlcnic_set_tx_port(first_desc, adapter->portnum);
2296
2297 for (i = 0; i < frag_count; i++) {
2298
2299 k = i % 4;
2300
2301 if ((k == 0) && (i > 0)) {
2302 /* move to next desc.*/
2303 producer = get_next_index(producer, num_txd);
2304 hwdesc = &tx_ring->desc_head[producer];
2305 qlcnic_clear_cmddesc((u64 *)hwdesc);
2306 tx_ring->cmd_buf_arr[producer].skb = NULL;
2307 }
2308
2309 buffrag = &pbuf->frag_array[i];
2310
2311 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2312 switch (k) {
2313 case 0:
2314 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2315 break;
2316 case 1:
2317 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2318 break;
2319 case 2:
2320 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2321 break;
2322 case 3:
2323 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2324 break;
2325 }
2326 }
2327
2328 tx_ring->producer = get_next_index(producer, num_txd);
2329 smp_mb();
2330
2331 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2332 goto unwind_buff;
2333
2334 if (adapter->mac_learn)
2335 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2336
2337 adapter->stats.txbytes += skb->len;
2338 adapter->stats.xmitcalled++;
2339
2340 qlcnic_update_cmd_producer(adapter, tx_ring);
2341
2342 return NETDEV_TX_OK;
2343
2344unwind_buff:
2345 qlcnic_unmap_buffers(pdev, skb, pbuf);
2346drop_packet:
2347 adapter->stats.txdropped++;
2348 dev_kfree_skb_any(skb);
2349 return NETDEV_TX_OK;
2350}
2351
2352static int qlcnic_check_temp(struct qlcnic_adapter *adapter) 1874static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2353{ 1875{
2354 struct net_device *netdev = adapter->netdev; 1876 struct net_device *netdev = adapter->netdev;
2355 u32 temp, temp_state, temp_val; 1877 u32 temp_state, temp_val, temp = 0;
2356 int rv = 0; 1878 int rv = 0;
2357 1879
2358 temp = QLCRD32(adapter, CRB_TEMP_STATE); 1880 if (qlcnic_82xx_check(adapter))
1881 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2359 1882
2360 temp_state = qlcnic_get_temp_state(temp); 1883 temp_state = qlcnic_get_temp_state(temp);
2361 temp_val = qlcnic_get_temp_val(temp); 1884 temp_val = qlcnic_get_temp_val(temp);
@@ -2367,7 +1890,7 @@ static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2367 temp_val); 1890 temp_val);
2368 rv = 1; 1891 rv = 1;
2369 } else if (temp_state == QLCNIC_TEMP_WARN) { 1892 } else if (temp_state == QLCNIC_TEMP_WARN) {
2370 if (adapter->temp == QLCNIC_TEMP_NORMAL) { 1893 if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) {
2371 dev_err(&netdev->dev, 1894 dev_err(&netdev->dev,
2372 "Device temperature %d degrees C " 1895 "Device temperature %d degrees C "
2373 "exceeds operating range." 1896 "exceeds operating range."
@@ -2375,37 +1898,16 @@ static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2375 temp_val); 1898 temp_val);
2376 } 1899 }
2377 } else { 1900 } else {
2378 if (adapter->temp == QLCNIC_TEMP_WARN) { 1901 if (adapter->ahw->temp == QLCNIC_TEMP_WARN) {
2379 dev_info(&netdev->dev, 1902 dev_info(&netdev->dev,
2380 "Device temperature is now %d degrees C" 1903 "Device temperature is now %d degrees C"
2381 " in normal range.\n", temp_val); 1904 " in normal range.\n", temp_val);
2382 } 1905 }
2383 } 1906 }
2384 adapter->temp = temp_state; 1907 adapter->ahw->temp = temp_state;
2385 return rv; 1908 return rv;
2386} 1909}
2387 1910
2388void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2389{
2390 struct net_device *netdev = adapter->netdev;
2391
2392 if (adapter->ahw->linkup && !linkup) {
2393 netdev_info(netdev, "NIC Link is down\n");
2394 adapter->ahw->linkup = 0;
2395 if (netif_running(netdev)) {
2396 netif_carrier_off(netdev);
2397 netif_stop_queue(netdev);
2398 }
2399 } else if (!adapter->ahw->linkup && linkup) {
2400 netdev_info(netdev, "NIC Link is up\n");
2401 adapter->ahw->linkup = 1;
2402 if (netif_running(netdev)) {
2403 netif_carrier_on(netdev);
2404 netif_wake_queue(netdev);
2405 }
2406 }
2407}
2408
2409static void qlcnic_tx_timeout(struct net_device *netdev) 1911static void qlcnic_tx_timeout(struct net_device *netdev)
2410{ 1912{
2411 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1913 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -2418,7 +1920,7 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
2418 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) 1920 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
2419 adapter->need_fw_reset = 1; 1921 adapter->need_fw_reset = 1;
2420 else 1922 else
2421 adapter->reset_context = 1; 1923 adapter->ahw->reset_context = 1;
2422} 1924}
2423 1925
2424static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) 1926static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -2442,7 +1944,7 @@ static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
2442 1944
2443 status = readl(adapter->isr_int_vec); 1945 status = readl(adapter->isr_int_vec);
2444 1946
2445 if (!(status & adapter->int_vec_bit)) 1947 if (!(status & adapter->ahw->int_vec_bit))
2446 return IRQ_NONE; 1948 return IRQ_NONE;
2447 1949
2448 /* check interrupt state machine, to be sure */ 1950 /* check interrupt state machine, to be sure */
@@ -2474,7 +1976,7 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2474 return IRQ_NONE; 1976 return IRQ_NONE;
2475 1977
2476done: 1978done:
2477 adapter->diag_cnt++; 1979 adapter->ahw->diag_cnt++;
2478 qlcnic_enable_int(sds_ring); 1980 qlcnic_enable_int(sds_ring);
2479 return IRQ_HANDLED; 1981 return IRQ_HANDLED;
2480} 1982}
@@ -2512,122 +2014,6 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2512 return IRQ_HANDLED; 2014 return IRQ_HANDLED;
2513} 2015}
2514 2016
2515static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2516{
2517 u32 sw_consumer, hw_consumer;
2518 int count = 0, i;
2519 struct qlcnic_cmd_buffer *buffer;
2520 struct pci_dev *pdev = adapter->pdev;
2521 struct net_device *netdev = adapter->netdev;
2522 struct qlcnic_skb_frag *frag;
2523 int done;
2524 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2525
2526 if (!spin_trylock(&adapter->tx_clean_lock))
2527 return 1;
2528
2529 sw_consumer = tx_ring->sw_consumer;
2530 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2531
2532 while (sw_consumer != hw_consumer) {
2533 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2534 if (buffer->skb) {
2535 frag = &buffer->frag_array[0];
2536 pci_unmap_single(pdev, frag->dma, frag->length,
2537 PCI_DMA_TODEVICE);
2538 frag->dma = 0ULL;
2539 for (i = 1; i < buffer->frag_count; i++) {
2540 frag++;
2541 pci_unmap_page(pdev, frag->dma, frag->length,
2542 PCI_DMA_TODEVICE);
2543 frag->dma = 0ULL;
2544 }
2545
2546 adapter->stats.xmitfinished++;
2547 dev_kfree_skb_any(buffer->skb);
2548 buffer->skb = NULL;
2549 }
2550
2551 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2552 if (++count >= MAX_STATUS_HANDLE)
2553 break;
2554 }
2555
2556 if (count && netif_running(netdev)) {
2557 tx_ring->sw_consumer = sw_consumer;
2558
2559 smp_mb();
2560
2561 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
2562 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2563 netif_wake_queue(netdev);
2564 adapter->stats.xmit_on++;
2565 }
2566 }
2567 adapter->tx_timeo_cnt = 0;
2568 }
2569 /*
2570 * If everything is freed up to consumer then check if the ring is full
2571 * If the ring is full then check if more needs to be freed and
2572 * schedule the call back again.
2573 *
2574 * This happens when there are 2 CPUs. One could be freeing and the
2575 * other filling it. If the ring is full when we get out of here and
2576 * the card has already interrupted the host then the host can miss the
2577 * interrupt.
2578 *
2579 * There is still a possible race condition and the host could miss an
2580 * interrupt. The card has to take care of this.
2581 */
2582 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2583 done = (sw_consumer == hw_consumer);
2584 spin_unlock(&adapter->tx_clean_lock);
2585
2586 return done;
2587}
2588
2589static int qlcnic_poll(struct napi_struct *napi, int budget)
2590{
2591 struct qlcnic_host_sds_ring *sds_ring =
2592 container_of(napi, struct qlcnic_host_sds_ring, napi);
2593
2594 struct qlcnic_adapter *adapter = sds_ring->adapter;
2595
2596 int tx_complete;
2597 int work_done;
2598
2599 tx_complete = qlcnic_process_cmd_ring(adapter);
2600
2601 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2602
2603 if ((work_done < budget) && tx_complete) {
2604 napi_complete(&sds_ring->napi);
2605 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2606 qlcnic_enable_int(sds_ring);
2607 }
2608
2609 return work_done;
2610}
2611
2612static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2613{
2614 struct qlcnic_host_sds_ring *sds_ring =
2615 container_of(napi, struct qlcnic_host_sds_ring, napi);
2616
2617 struct qlcnic_adapter *adapter = sds_ring->adapter;
2618 int work_done;
2619
2620 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2621
2622 if (work_done < budget) {
2623 napi_complete(&sds_ring->napi);
2624 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2625 qlcnic_enable_int(sds_ring);
2626 }
2627
2628 return work_done;
2629}
2630
2631#ifdef CONFIG_NET_POLL_CONTROLLER 2017#ifdef CONFIG_NET_POLL_CONTROLLER
2632static void qlcnic_poll_controller(struct net_device *netdev) 2018static void qlcnic_poll_controller(struct net_device *netdev)
2633{ 2019{
@@ -2871,7 +2257,7 @@ qlcnic_fwinit_work(struct work_struct *work)
2871 return; 2257 return;
2872 } 2258 }
2873 2259
2874 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) { 2260 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
2875 qlcnic_api_unlock(adapter); 2261 qlcnic_api_unlock(adapter);
2876 goto wait_npar; 2262 goto wait_npar;
2877 } 2263 }
@@ -2987,9 +2373,9 @@ qlcnic_detach_work(struct work_struct *work)
2987 goto err_ret; 2373 goto err_ret;
2988 } 2374 }
2989 2375
2990 if (adapter->temp == QLCNIC_TEMP_PANIC) { 2376 if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) {
2991 dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n", 2377 dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
2992 adapter->temp); 2378 adapter->ahw->temp);
2993 goto err_ret; 2379 goto err_ret;
2994 } 2380 }
2995 2381
@@ -3114,7 +2500,7 @@ qlcnic_attach_work(struct work_struct *work)
3114 struct net_device *netdev = adapter->netdev; 2500 struct net_device *netdev = adapter->netdev;
3115 u32 npar_state; 2501 u32 npar_state;
3116 2502
3117 if (adapter->op_mode != QLCNIC_MGMT_FUNC) { 2503 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
3118 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE); 2504 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3119 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO) 2505 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
3120 qlcnic_clr_all_drv_state(adapter, 0); 2506 qlcnic_clr_all_drv_state(adapter, 0);
@@ -3171,7 +2557,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3171 if (adapter->need_fw_reset) 2557 if (adapter->need_fw_reset)
3172 goto detach; 2558 goto detach;
3173 2559
3174 if (adapter->reset_context && auto_fw_reset) { 2560 if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) {
3175 qlcnic_reset_hw_context(adapter); 2561 qlcnic_reset_hw_context(adapter);
3176 adapter->netdev->trans_start = jiffies; 2562 adapter->netdev->trans_start = jiffies;
3177 } 2563 }
@@ -3186,7 +2572,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3186 2572
3187 qlcnic_dev_request_reset(adapter); 2573 qlcnic_dev_request_reset(adapter);
3188 2574
3189 if (auto_fw_reset) 2575 if (qlcnic_auto_fw_reset)
3190 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); 2576 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
3191 2577
3192 dev_err(&adapter->pdev->dev, "firmware hang detected\n"); 2578 dev_err(&adapter->pdev->dev, "firmware hang detected\n");
@@ -3211,8 +2597,8 @@ detach:
3211 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : 2597 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3212 QLCNIC_DEV_NEED_RESET; 2598 QLCNIC_DEV_NEED_RESET;
3213 2599
3214 if (auto_fw_reset && 2600 if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING,
3215 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { 2601 &adapter->state)) {
3216 2602
3217 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); 2603 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
3218 QLCDB(adapter, DRV, "fw recovery scheduled.\n"); 2604 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
@@ -3283,7 +2669,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
3283 if (qlcnic_api_lock(adapter)) 2669 if (qlcnic_api_lock(adapter))
3284 return -EINVAL; 2670 return -EINVAL;
3285 2671
3286 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) { 2672 if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
3287 adapter->need_fw_reset = 1; 2673 adapter->need_fw_reset = 1;
3288 set_bit(__QLCNIC_START_FW, &adapter->state); 2674 set_bit(__QLCNIC_START_FW, &adapter->state);
3289 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING); 2675 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
@@ -3395,96 +2781,9 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3395 return err; 2781 return err;
3396} 2782}
3397 2783
3398static int
3399qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3400{
3401 return -EOPNOTSUPP;
3402}
3403
3404static int
3405qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3406{
3407 return -EOPNOTSUPP;
3408}
3409
3410static ssize_t
3411qlcnic_store_bridged_mode(struct device *dev,
3412 struct device_attribute *attr, const char *buf, size_t len)
3413{
3414 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3415 unsigned long new;
3416 int ret = -EINVAL;
3417
3418 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3419 goto err_out;
3420
3421 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3422 goto err_out;
3423
3424 if (strict_strtoul(buf, 2, &new))
3425 goto err_out;
3426
3427 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
3428 ret = len;
3429
3430err_out:
3431 return ret;
3432}
3433
3434static ssize_t
3435qlcnic_show_bridged_mode(struct device *dev,
3436 struct device_attribute *attr, char *buf)
3437{
3438 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3439 int bridged_mode = 0;
3440
3441 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3442 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3443
3444 return sprintf(buf, "%d\n", bridged_mode);
3445}
3446
3447static struct device_attribute dev_attr_bridged_mode = {
3448 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3449 .show = qlcnic_show_bridged_mode,
3450 .store = qlcnic_store_bridged_mode,
3451};
3452
3453static ssize_t
3454qlcnic_store_diag_mode(struct device *dev,
3455 struct device_attribute *attr, const char *buf, size_t len)
3456{
3457 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3458 unsigned long new;
3459
3460 if (strict_strtoul(buf, 2, &new))
3461 return -EINVAL;
3462
3463 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3464 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3465
3466 return len;
3467}
3468
3469static ssize_t
3470qlcnic_show_diag_mode(struct device *dev,
3471 struct device_attribute *attr, char *buf)
3472{
3473 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3474
3475 return sprintf(buf, "%d\n",
3476 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3477}
3478
3479static struct device_attribute dev_attr_diag_mode = {
3480 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3481 .show = qlcnic_show_diag_mode,
3482 .store = qlcnic_store_diag_mode,
3483};
3484
3485int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val) 2784int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3486{ 2785{
3487 if (!use_msi_x && !use_msi) { 2786 if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
3488 netdev_info(netdev, "no msix or msi support, hence no rss\n"); 2787 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3489 return -EINVAL; 2788 return -EINVAL;
3490 } 2789 }
@@ -3532,859 +2831,6 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3532 return err; 2831 return err;
3533} 2832}
3534 2833
3535static int
3536qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon, u8 *state,
3537 u8 *rate)
3538{
3539 *rate = LSB(beacon);
3540 *state = MSB(beacon);
3541
3542 QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
3543
3544 if (!*state) {
3545 *rate = __QLCNIC_MAX_LED_RATE;
3546 return 0;
3547 } else if (*state > __QLCNIC_MAX_LED_STATE)
3548 return -EINVAL;
3549
3550 if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
3551 return -EINVAL;
3552
3553 return 0;
3554}
3555
3556static ssize_t
3557qlcnic_store_beacon(struct device *dev,
3558 struct device_attribute *attr, const char *buf, size_t len)
3559{
3560 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3561 int max_sds_rings = adapter->max_sds_rings;
3562 u16 beacon;
3563 u8 b_state, b_rate;
3564 int err;
3565
3566 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3567 dev_warn(dev, "LED test not supported for non "
3568 "privilege function\n");
3569 return -EOPNOTSUPP;
3570 }
3571
3572 if (len != sizeof(u16))
3573 return QL_STATUS_INVALID_PARAM;
3574
3575 memcpy(&beacon, buf, sizeof(u16));
3576 err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
3577 if (err)
3578 return err;
3579
3580 if (adapter->ahw->beacon_state == b_state)
3581 return len;
3582
3583 rtnl_lock();
3584
3585 if (!adapter->ahw->beacon_state)
3586 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
3587 rtnl_unlock();
3588 return -EBUSY;
3589 }
3590
3591 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
3592 err = -EIO;
3593 goto out;
3594 }
3595
3596 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
3597 err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
3598 if (err)
3599 goto out;
3600 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
3601 }
3602
3603 err = qlcnic_config_led(adapter, b_state, b_rate);
3604
3605 if (!err) {
3606 err = len;
3607 adapter->ahw->beacon_state = b_state;
3608 }
3609
3610 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
3611 qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
3612
3613 out:
3614 if (!adapter->ahw->beacon_state)
3615 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
3616 rtnl_unlock();
3617
3618 return err;
3619}
3620
3621static ssize_t
3622qlcnic_show_beacon(struct device *dev,
3623 struct device_attribute *attr, char *buf)
3624{
3625 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3626
3627 return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
3628}
3629
3630static struct device_attribute dev_attr_beacon = {
3631 .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
3632 .show = qlcnic_show_beacon,
3633 .store = qlcnic_store_beacon,
3634};
3635
3636static int
3637qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3638 loff_t offset, size_t size)
3639{
3640 size_t crb_size = 4;
3641
3642 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3643 return -EIO;
3644
3645 if (offset < QLCNIC_PCI_CRBSPACE) {
3646 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3647 QLCNIC_PCI_CAMQM_END))
3648 crb_size = 8;
3649 else
3650 return -EINVAL;
3651 }
3652
3653 if ((size != crb_size) || (offset & (crb_size-1)))
3654 return -EINVAL;
3655
3656 return 0;
3657}
3658
3659static ssize_t
3660qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3661 struct bin_attribute *attr,
3662 char *buf, loff_t offset, size_t size)
3663{
3664 struct device *dev = container_of(kobj, struct device, kobj);
3665 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3666 u32 data;
3667 u64 qmdata;
3668 int ret;
3669
3670 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3671 if (ret != 0)
3672 return ret;
3673
3674 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3675 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3676 memcpy(buf, &qmdata, size);
3677 } else {
3678 data = QLCRD32(adapter, offset);
3679 memcpy(buf, &data, size);
3680 }
3681 return size;
3682}
3683
3684static ssize_t
3685qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3686 struct bin_attribute *attr,
3687 char *buf, loff_t offset, size_t size)
3688{
3689 struct device *dev = container_of(kobj, struct device, kobj);
3690 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3691 u32 data;
3692 u64 qmdata;
3693 int ret;
3694
3695 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3696 if (ret != 0)
3697 return ret;
3698
3699 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3700 memcpy(&qmdata, buf, size);
3701 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3702 } else {
3703 memcpy(&data, buf, size);
3704 QLCWR32(adapter, offset, data);
3705 }
3706 return size;
3707}
3708
3709static int
3710qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3711 loff_t offset, size_t size)
3712{
3713 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3714 return -EIO;
3715
3716 if ((size != 8) || (offset & 0x7))
3717 return -EIO;
3718
3719 return 0;
3720}
3721
3722static ssize_t
3723qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3724 struct bin_attribute *attr,
3725 char *buf, loff_t offset, size_t size)
3726{
3727 struct device *dev = container_of(kobj, struct device, kobj);
3728 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3729 u64 data;
3730 int ret;
3731
3732 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3733 if (ret != 0)
3734 return ret;
3735
3736 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3737 return -EIO;
3738
3739 memcpy(buf, &data, size);
3740
3741 return size;
3742}
3743
3744static ssize_t
3745qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3746 struct bin_attribute *attr,
3747 char *buf, loff_t offset, size_t size)
3748{
3749 struct device *dev = container_of(kobj, struct device, kobj);
3750 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3751 u64 data;
3752 int ret;
3753
3754 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3755 if (ret != 0)
3756 return ret;
3757
3758 memcpy(&data, buf, size);
3759
3760 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3761 return -EIO;
3762
3763 return size;
3764}
3765
3766static struct bin_attribute bin_attr_crb = {
3767 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3768 .size = 0,
3769 .read = qlcnic_sysfs_read_crb,
3770 .write = qlcnic_sysfs_write_crb,
3771};
3772
3773static struct bin_attribute bin_attr_mem = {
3774 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3775 .size = 0,
3776 .read = qlcnic_sysfs_read_mem,
3777 .write = qlcnic_sysfs_write_mem,
3778};
3779
3780static int
3781validate_pm_config(struct qlcnic_adapter *adapter,
3782 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3783{
3784
3785 u8 src_pci_func, s_esw_id, d_esw_id;
3786 u8 dest_pci_func;
3787 int i;
3788
3789 for (i = 0; i < count; i++) {
3790 src_pci_func = pm_cfg[i].pci_func;
3791 dest_pci_func = pm_cfg[i].dest_npar;
3792 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3793 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3794 return QL_STATUS_INVALID_PARAM;
3795
3796 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3797 return QL_STATUS_INVALID_PARAM;
3798
3799 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3800 return QL_STATUS_INVALID_PARAM;
3801
3802 s_esw_id = adapter->npars[src_pci_func].phy_port;
3803 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3804
3805 if (s_esw_id != d_esw_id)
3806 return QL_STATUS_INVALID_PARAM;
3807
3808 }
3809 return 0;
3810
3811}
3812
3813static ssize_t
3814qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3815 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3816{
3817 struct device *dev = container_of(kobj, struct device, kobj);
3818 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3819 struct qlcnic_pm_func_cfg *pm_cfg;
3820 u32 id, action, pci_func;
3821 int count, rem, i, ret;
3822
3823 count = size / sizeof(struct qlcnic_pm_func_cfg);
3824 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3825 if (rem)
3826 return QL_STATUS_INVALID_PARAM;
3827
3828 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3829
3830 ret = validate_pm_config(adapter, pm_cfg, count);
3831 if (ret)
3832 return ret;
3833 for (i = 0; i < count; i++) {
3834 pci_func = pm_cfg[i].pci_func;
3835 action = !!pm_cfg[i].action;
3836 id = adapter->npars[pci_func].phy_port;
3837 ret = qlcnic_config_port_mirroring(adapter, id,
3838 action, pci_func);
3839 if (ret)
3840 return ret;
3841 }
3842
3843 for (i = 0; i < count; i++) {
3844 pci_func = pm_cfg[i].pci_func;
3845 id = adapter->npars[pci_func].phy_port;
3846 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
3847 adapter->npars[pci_func].dest_npar = id;
3848 }
3849 return size;
3850}
3851
3852static ssize_t
3853qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3854 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3855{
3856 struct device *dev = container_of(kobj, struct device, kobj);
3857 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3858 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3859 int i;
3860
3861 if (size != sizeof(pm_cfg))
3862 return QL_STATUS_INVALID_PARAM;
3863
3864 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3865 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3866 continue;
3867 pm_cfg[i].action = adapter->npars[i].enable_pm;
3868 pm_cfg[i].dest_npar = 0;
3869 pm_cfg[i].pci_func = i;
3870 }
3871 memcpy(buf, &pm_cfg, size);
3872
3873 return size;
3874}
3875
3876static int
3877validate_esw_config(struct qlcnic_adapter *adapter,
3878 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3879{
3880 u32 op_mode;
3881 u8 pci_func;
3882 int i;
3883
3884 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
3885
3886 for (i = 0; i < count; i++) {
3887 pci_func = esw_cfg[i].pci_func;
3888 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3889 return QL_STATUS_INVALID_PARAM;
3890
3891 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3892 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3893 return QL_STATUS_INVALID_PARAM;
3894
3895 switch (esw_cfg[i].op_mode) {
3896 case QLCNIC_PORT_DEFAULTS:
3897 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3898 QLCNIC_NON_PRIV_FUNC) {
3899 if (esw_cfg[i].mac_anti_spoof != 0)
3900 return QL_STATUS_INVALID_PARAM;
3901 if (esw_cfg[i].mac_override != 1)
3902 return QL_STATUS_INVALID_PARAM;
3903 if (esw_cfg[i].promisc_mode != 1)
3904 return QL_STATUS_INVALID_PARAM;
3905 }
3906 break;
3907 case QLCNIC_ADD_VLAN:
3908 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3909 return QL_STATUS_INVALID_PARAM;
3910 if (!esw_cfg[i].op_type)
3911 return QL_STATUS_INVALID_PARAM;
3912 break;
3913 case QLCNIC_DEL_VLAN:
3914 if (!esw_cfg[i].op_type)
3915 return QL_STATUS_INVALID_PARAM;
3916 break;
3917 default:
3918 return QL_STATUS_INVALID_PARAM;
3919 }
3920 }
3921 return 0;
3922}
3923
3924static ssize_t
3925qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3926 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3927{
3928 struct device *dev = container_of(kobj, struct device, kobj);
3929 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3930 struct qlcnic_esw_func_cfg *esw_cfg;
3931 struct qlcnic_npar_info *npar;
3932 int count, rem, i, ret;
3933 u8 pci_func, op_mode = 0;
3934
3935 count = size / sizeof(struct qlcnic_esw_func_cfg);
3936 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3937 if (rem)
3938 return QL_STATUS_INVALID_PARAM;
3939
3940 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3941 ret = validate_esw_config(adapter, esw_cfg, count);
3942 if (ret)
3943 return ret;
3944
3945 for (i = 0; i < count; i++) {
3946 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3947 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3948 return QL_STATUS_INVALID_PARAM;
3949
3950 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
3951 continue;
3952
3953 op_mode = esw_cfg[i].op_mode;
3954 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3955 esw_cfg[i].op_mode = op_mode;
3956 esw_cfg[i].pci_func = adapter->ahw->pci_func;
3957
3958 switch (esw_cfg[i].op_mode) {
3959 case QLCNIC_PORT_DEFAULTS:
3960 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3961 break;
3962 case QLCNIC_ADD_VLAN:
3963 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3964 break;
3965 case QLCNIC_DEL_VLAN:
3966 esw_cfg[i].vlan_id = 0;
3967 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3968 break;
3969 }
3970 }
3971
3972 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3973 goto out;
3974
3975 for (i = 0; i < count; i++) {
3976 pci_func = esw_cfg[i].pci_func;
3977 npar = &adapter->npars[pci_func];
3978 switch (esw_cfg[i].op_mode) {
3979 case QLCNIC_PORT_DEFAULTS:
3980 npar->promisc_mode = esw_cfg[i].promisc_mode;
3981 npar->mac_override = esw_cfg[i].mac_override;
3982 npar->offload_flags = esw_cfg[i].offload_flags;
3983 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3984 npar->discard_tagged = esw_cfg[i].discard_tagged;
3985 break;
3986 case QLCNIC_ADD_VLAN:
3987 npar->pvid = esw_cfg[i].vlan_id;
3988 break;
3989 case QLCNIC_DEL_VLAN:
3990 npar->pvid = 0;
3991 break;
3992 }
3993 }
3994out:
3995 return size;
3996}
3997
3998static ssize_t
3999qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
4000 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4001{
4002 struct device *dev = container_of(kobj, struct device, kobj);
4003 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4004 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4005 u8 i;
4006
4007 if (size != sizeof(esw_cfg))
4008 return QL_STATUS_INVALID_PARAM;
4009
4010 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
4011 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
4012 continue;
4013 esw_cfg[i].pci_func = i;
4014 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
4015 return QL_STATUS_INVALID_PARAM;
4016 }
4017 memcpy(buf, &esw_cfg, size);
4018
4019 return size;
4020}
4021
4022static int
4023validate_npar_config(struct qlcnic_adapter *adapter,
4024 struct qlcnic_npar_func_cfg *np_cfg, int count)
4025{
4026 u8 pci_func, i;
4027
4028 for (i = 0; i < count; i++) {
4029 pci_func = np_cfg[i].pci_func;
4030 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
4031 return QL_STATUS_INVALID_PARAM;
4032
4033 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
4034 return QL_STATUS_INVALID_PARAM;
4035
4036 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
4037 !IS_VALID_BW(np_cfg[i].max_bw))
4038 return QL_STATUS_INVALID_PARAM;
4039 }
4040 return 0;
4041}
4042
4043static ssize_t
4044qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
4045 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4046{
4047 struct device *dev = container_of(kobj, struct device, kobj);
4048 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4049 struct qlcnic_info nic_info;
4050 struct qlcnic_npar_func_cfg *np_cfg;
4051 int i, count, rem, ret;
4052 u8 pci_func;
4053
4054 count = size / sizeof(struct qlcnic_npar_func_cfg);
4055 rem = size % sizeof(struct qlcnic_npar_func_cfg);
4056 if (rem)
4057 return QL_STATUS_INVALID_PARAM;
4058
4059 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
4060 ret = validate_npar_config(adapter, np_cfg, count);
4061 if (ret)
4062 return ret;
4063
4064 for (i = 0; i < count ; i++) {
4065 pci_func = np_cfg[i].pci_func;
4066 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
4067 if (ret)
4068 return ret;
4069 nic_info.pci_func = pci_func;
4070 nic_info.min_tx_bw = np_cfg[i].min_bw;
4071 nic_info.max_tx_bw = np_cfg[i].max_bw;
4072 ret = qlcnic_set_nic_info(adapter, &nic_info);
4073 if (ret)
4074 return ret;
4075 adapter->npars[i].min_bw = nic_info.min_tx_bw;
4076 adapter->npars[i].max_bw = nic_info.max_tx_bw;
4077 }
4078
4079 return size;
4080
4081}
4082static ssize_t
4083qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
4084 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4085{
4086 struct device *dev = container_of(kobj, struct device, kobj);
4087 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4088 struct qlcnic_info nic_info;
4089 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
4090 int i, ret;
4091
4092 if (size != sizeof(np_cfg))
4093 return QL_STATUS_INVALID_PARAM;
4094
4095 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4096 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
4097 continue;
4098 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
4099 if (ret)
4100 return ret;
4101
4102 np_cfg[i].pci_func = i;
4103 np_cfg[i].op_mode = (u8)nic_info.op_mode;
4104 np_cfg[i].port_num = nic_info.phys_port;
4105 np_cfg[i].fw_capab = nic_info.capabilities;
4106 np_cfg[i].min_bw = nic_info.min_tx_bw ;
4107 np_cfg[i].max_bw = nic_info.max_tx_bw;
4108 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
4109 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
4110 }
4111 memcpy(buf, &np_cfg, size);
4112 return size;
4113}
4114
4115static ssize_t
4116qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
4117 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4118{
4119 struct device *dev = container_of(kobj, struct device, kobj);
4120 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4121 struct qlcnic_esw_statistics port_stats;
4122 int ret;
4123
4124 if (size != sizeof(struct qlcnic_esw_statistics))
4125 return QL_STATUS_INVALID_PARAM;
4126
4127 if (offset >= QLCNIC_MAX_PCI_FUNC)
4128 return QL_STATUS_INVALID_PARAM;
4129
4130 memset(&port_stats, 0, size);
4131 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
4132 &port_stats.rx);
4133 if (ret)
4134 return ret;
4135
4136 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
4137 &port_stats.tx);
4138 if (ret)
4139 return ret;
4140
4141 memcpy(buf, &port_stats, size);
4142 return size;
4143}
4144
4145static ssize_t
4146qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
4147 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4148{
4149 struct device *dev = container_of(kobj, struct device, kobj);
4150 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4151 struct qlcnic_esw_statistics esw_stats;
4152 int ret;
4153
4154 if (size != sizeof(struct qlcnic_esw_statistics))
4155 return QL_STATUS_INVALID_PARAM;
4156
4157 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
4158 return QL_STATUS_INVALID_PARAM;
4159
4160 memset(&esw_stats, 0, size);
4161 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
4162 &esw_stats.rx);
4163 if (ret)
4164 return ret;
4165
4166 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
4167 &esw_stats.tx);
4168 if (ret)
4169 return ret;
4170
4171 memcpy(buf, &esw_stats, size);
4172 return size;
4173}
4174
4175static ssize_t
4176qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
4177 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4178{
4179 struct device *dev = container_of(kobj, struct device, kobj);
4180 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4181 int ret;
4182
4183 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
4184 return QL_STATUS_INVALID_PARAM;
4185
4186 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
4187 QLCNIC_QUERY_RX_COUNTER);
4188 if (ret)
4189 return ret;
4190
4191 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
4192 QLCNIC_QUERY_TX_COUNTER);
4193 if (ret)
4194 return ret;
4195
4196 return size;
4197}
4198
4199static ssize_t
4200qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
4201 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4202{
4203
4204 struct device *dev = container_of(kobj, struct device, kobj);
4205 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4206 int ret;
4207
4208 if (offset >= QLCNIC_MAX_PCI_FUNC)
4209 return QL_STATUS_INVALID_PARAM;
4210
4211 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4212 QLCNIC_QUERY_RX_COUNTER);
4213 if (ret)
4214 return ret;
4215
4216 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4217 QLCNIC_QUERY_TX_COUNTER);
4218 if (ret)
4219 return ret;
4220
4221 return size;
4222}
4223
4224static ssize_t
4225qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
4226 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4227{
4228 struct device *dev = container_of(kobj, struct device, kobj);
4229 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4230 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
4231 struct qlcnic_pci_info *pci_info;
4232 int i, ret;
4233
4234 if (size != sizeof(pci_cfg))
4235 return QL_STATUS_INVALID_PARAM;
4236
4237 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
4238 if (!pci_info)
4239 return -ENOMEM;
4240
4241 ret = qlcnic_get_pci_info(adapter, pci_info);
4242 if (ret) {
4243 kfree(pci_info);
4244 return ret;
4245 }
4246
4247 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4248 pci_cfg[i].pci_func = pci_info[i].id;
4249 pci_cfg[i].func_type = pci_info[i].type;
4250 pci_cfg[i].port_num = pci_info[i].default_port;
4251 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4252 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4253 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4254 }
4255 memcpy(buf, &pci_cfg, size);
4256 kfree(pci_info);
4257 return size;
4258}
4259static struct bin_attribute bin_attr_npar_config = {
4260 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4261 .size = 0,
4262 .read = qlcnic_sysfs_read_npar_config,
4263 .write = qlcnic_sysfs_write_npar_config,
4264};
4265
4266static struct bin_attribute bin_attr_pci_config = {
4267 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4268 .size = 0,
4269 .read = qlcnic_sysfs_read_pci_config,
4270 .write = NULL,
4271};
4272
4273static struct bin_attribute bin_attr_port_stats = {
4274 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4275 .size = 0,
4276 .read = qlcnic_sysfs_get_port_stats,
4277 .write = qlcnic_sysfs_clear_port_stats,
4278};
4279
4280static struct bin_attribute bin_attr_esw_stats = {
4281 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4282 .size = 0,
4283 .read = qlcnic_sysfs_get_esw_stats,
4284 .write = qlcnic_sysfs_clear_esw_stats,
4285};
4286
4287static struct bin_attribute bin_attr_esw_config = {
4288 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4289 .size = 0,
4290 .read = qlcnic_sysfs_read_esw_config,
4291 .write = qlcnic_sysfs_write_esw_config,
4292};
4293
4294static struct bin_attribute bin_attr_pm_config = {
4295 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4296 .size = 0,
4297 .read = qlcnic_sysfs_read_pm_config,
4298 .write = qlcnic_sysfs_write_pm_config,
4299};
4300
4301static void
4302qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4303{
4304 struct device *dev = &adapter->pdev->dev;
4305
4306 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4307 if (device_create_file(dev, &dev_attr_bridged_mode))
4308 dev_warn(dev,
4309 "failed to create bridged_mode sysfs entry\n");
4310}
4311
4312static void
4313qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4314{
4315 struct device *dev = &adapter->pdev->dev;
4316
4317 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4318 device_remove_file(dev, &dev_attr_bridged_mode);
4319}
4320
4321static void
4322qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4323{
4324 struct device *dev = &adapter->pdev->dev;
4325 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
4326
4327 if (device_create_bin_file(dev, &bin_attr_port_stats))
4328 dev_info(dev, "failed to create port stats sysfs entry");
4329
4330 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4331 return;
4332 if (device_create_file(dev, &dev_attr_diag_mode))
4333 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4334 if (device_create_bin_file(dev, &bin_attr_crb))
4335 dev_info(dev, "failed to create crb sysfs entry\n");
4336 if (device_create_bin_file(dev, &bin_attr_mem))
4337 dev_info(dev, "failed to create mem sysfs entry\n");
4338
4339 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
4340 return;
4341
4342 if (device_create_bin_file(dev, &bin_attr_pci_config))
4343 dev_info(dev, "failed to create pci config sysfs entry");
4344 if (device_create_file(dev, &dev_attr_beacon))
4345 dev_info(dev, "failed to create beacon sysfs entry");
4346
4347 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4348 return;
4349 if (device_create_bin_file(dev, &bin_attr_esw_config))
4350 dev_info(dev, "failed to create esw config sysfs entry");
4351 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4352 return;
4353 if (device_create_bin_file(dev, &bin_attr_npar_config))
4354 dev_info(dev, "failed to create npar config sysfs entry");
4355 if (device_create_bin_file(dev, &bin_attr_pm_config))
4356 dev_info(dev, "failed to create pm config sysfs entry");
4357 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4358 dev_info(dev, "failed to create eswitch stats sysfs entry");
4359}
4360
4361static void
4362qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4363{
4364 struct device *dev = &adapter->pdev->dev;
4365 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
4366
4367 device_remove_bin_file(dev, &bin_attr_port_stats);
4368
4369 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4370 return;
4371 device_remove_file(dev, &dev_attr_diag_mode);
4372 device_remove_bin_file(dev, &bin_attr_crb);
4373 device_remove_bin_file(dev, &bin_attr_mem);
4374 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
4375 return;
4376 device_remove_bin_file(dev, &bin_attr_pci_config);
4377 device_remove_file(dev, &dev_attr_beacon);
4378 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4379 return;
4380 device_remove_bin_file(dev, &bin_attr_esw_config);
4381 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
4382 return;
4383 device_remove_bin_file(dev, &bin_attr_npar_config);
4384 device_remove_bin_file(dev, &bin_attr_pm_config);
4385 device_remove_bin_file(dev, &bin_attr_esw_stats);
4386}
4387
4388#ifdef CONFIG_INET 2834#ifdef CONFIG_INET
4389 2835
4390#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) 2836#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
@@ -4523,7 +2969,7 @@ static void
4523qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event) 2969qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
4524{ } 2970{ }
4525#endif 2971#endif
4526static const struct pci_error_handlers qlcnic_err_handler = { 2972static struct pci_error_handlers qlcnic_err_handler = {
4527 .error_detected = qlcnic_io_error_detected, 2973 .error_detected = qlcnic_io_error_detected,
4528 .slot_reset = qlcnic_io_slot_reset, 2974 .slot_reset = qlcnic_io_slot_reset,
4529 .resume = qlcnic_io_resume, 2975 .resume = qlcnic_io_resume,
@@ -4533,7 +2979,7 @@ static struct pci_driver qlcnic_driver = {
4533 .name = qlcnic_driver_name, 2979 .name = qlcnic_driver_name,
4534 .id_table = qlcnic_pci_tbl, 2980 .id_table = qlcnic_pci_tbl,
4535 .probe = qlcnic_probe, 2981 .probe = qlcnic_probe,
4536 .remove = __devexit_p(qlcnic_remove), 2982 .remove = qlcnic_remove,
4537#ifdef CONFIG_PM 2983#ifdef CONFIG_PM
4538 .suspend = qlcnic_suspend, 2984 .suspend = qlcnic_suspend,
4539 .resume = qlcnic_resume, 2985 .resume = qlcnic_resume,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
new file mode 100644
index 000000000000..12ff29270745
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -0,0 +1,629 @@
1#include "qlcnic.h"
2#include "qlcnic_hdr.h"
3
4#include <net/ip.h>
5
6#define QLCNIC_DUMP_WCRB BIT_0
7#define QLCNIC_DUMP_RWCRB BIT_1
8#define QLCNIC_DUMP_ANDCRB BIT_2
9#define QLCNIC_DUMP_ORCRB BIT_3
10#define QLCNIC_DUMP_POLLCRB BIT_4
11#define QLCNIC_DUMP_RD_SAVE BIT_5
12#define QLCNIC_DUMP_WRT_SAVED BIT_6
13#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
14#define QLCNIC_DUMP_SKIP BIT_7
15
16#define QLCNIC_DUMP_MASK_MAX 0xff
17
18struct qlcnic_common_entry_hdr {
19 u32 type;
20 u32 offset;
21 u32 cap_size;
22 u8 mask;
23 u8 rsvd[2];
24 u8 flags;
25} __packed;
26
27struct __crb {
28 u32 addr;
29 u8 stride;
30 u8 rsvd1[3];
31 u32 data_size;
32 u32 no_ops;
33 u32 rsvd2[4];
34} __packed;
35
36struct __ctrl {
37 u32 addr;
38 u8 stride;
39 u8 index_a;
40 u16 timeout;
41 u32 data_size;
42 u32 no_ops;
43 u8 opcode;
44 u8 index_v;
45 u8 shl_val;
46 u8 shr_val;
47 u32 val1;
48 u32 val2;
49 u32 val3;
50} __packed;
51
52struct __cache {
53 u32 addr;
54 u16 stride;
55 u16 init_tag_val;
56 u32 size;
57 u32 no_ops;
58 u32 ctrl_addr;
59 u32 ctrl_val;
60 u32 read_addr;
61 u8 read_addr_stride;
62 u8 read_addr_num;
63 u8 rsvd1[2];
64} __packed;
65
66struct __ocm {
67 u8 rsvd[8];
68 u32 size;
69 u32 no_ops;
70 u8 rsvd1[8];
71 u32 read_addr;
72 u32 read_addr_stride;
73} __packed;
74
75struct __mem {
76 u8 rsvd[24];
77 u32 addr;
78 u32 size;
79} __packed;
80
81struct __mux {
82 u32 addr;
83 u8 rsvd[4];
84 u32 size;
85 u32 no_ops;
86 u32 val;
87 u32 val_stride;
88 u32 read_addr;
89 u8 rsvd2[4];
90} __packed;
91
92struct __queue {
93 u32 sel_addr;
94 u16 stride;
95 u8 rsvd[2];
96 u32 size;
97 u32 no_ops;
98 u8 rsvd2[8];
99 u32 read_addr;
100 u8 read_addr_stride;
101 u8 read_addr_cnt;
102 u8 rsvd3[2];
103} __packed;
104
105struct qlcnic_dump_entry {
106 struct qlcnic_common_entry_hdr hdr;
107 union {
108 struct __crb crb;
109 struct __cache cache;
110 struct __ocm ocm;
111 struct __mem mem;
112 struct __mux mux;
113 struct __queue que;
114 struct __ctrl ctrl;
115 } region;
116} __packed;
117
118enum qlcnic_minidump_opcode {
119 QLCNIC_DUMP_NOP = 0,
120 QLCNIC_DUMP_READ_CRB = 1,
121 QLCNIC_DUMP_READ_MUX = 2,
122 QLCNIC_DUMP_QUEUE = 3,
123 QLCNIC_DUMP_BRD_CONFIG = 4,
124 QLCNIC_DUMP_READ_OCM = 6,
125 QLCNIC_DUMP_PEG_REG = 7,
126 QLCNIC_DUMP_L1_DTAG = 8,
127 QLCNIC_DUMP_L1_ITAG = 9,
128 QLCNIC_DUMP_L1_DATA = 11,
129 QLCNIC_DUMP_L1_INST = 12,
130 QLCNIC_DUMP_L2_DTAG = 21,
131 QLCNIC_DUMP_L2_ITAG = 22,
132 QLCNIC_DUMP_L2_DATA = 23,
133 QLCNIC_DUMP_L2_INST = 24,
134 QLCNIC_DUMP_READ_ROM = 71,
135 QLCNIC_DUMP_READ_MEM = 72,
136 QLCNIC_DUMP_READ_CTRL = 98,
137 QLCNIC_DUMP_TLHDR = 99,
138 QLCNIC_DUMP_RDEND = 255
139};
140
141struct qlcnic_dump_operations {
142 enum qlcnic_minidump_opcode opcode;
143 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
144 __le32 *);
145};
146
147static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
148{
149 u32 dest;
150 void __iomem *window_reg;
151
152 dest = addr & 0xFFFF0000;
153 window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
154 writel(dest, window_reg);
155 readl(window_reg);
156 window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
157 *data = readl(window_reg);
158}
159
160static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
161{
162 u32 dest;
163 void __iomem *window_reg;
164
165 dest = addr & 0xFFFF0000;
166 window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
167 writel(dest, window_reg);
168 readl(window_reg);
169 window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
170 writel(data, window_reg);
171 readl(window_reg);
172}
173
174/* FW dump related functions */
175static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
176 struct qlcnic_dump_entry *entry, __le32 *buffer)
177{
178 int i;
179 u32 addr, data;
180 struct __crb *crb = &entry->region.crb;
181 void __iomem *base = adapter->ahw->pci_base0;
182
183 addr = crb->addr;
184
185 for (i = 0; i < crb->no_ops; i++) {
186 qlcnic_read_dump_reg(addr, base, &data);
187 *buffer++ = cpu_to_le32(addr);
188 *buffer++ = cpu_to_le32(data);
189 addr += crb->stride;
190 }
191 return crb->no_ops * 2 * sizeof(u32);
192}
193
194static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
195 struct qlcnic_dump_entry *entry, __le32 *buffer)
196{
197 int i, k, timeout = 0;
198 void __iomem *base = adapter->ahw->pci_base0;
199 u32 addr, data;
200 u8 opcode, no_ops;
201 struct __ctrl *ctr = &entry->region.ctrl;
202 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
203
204 addr = ctr->addr;
205 no_ops = ctr->no_ops;
206
207 for (i = 0; i < no_ops; i++) {
208 k = 0;
209 opcode = 0;
210 for (k = 0; k < 8; k++) {
211 if (!(ctr->opcode & (1 << k)))
212 continue;
213 switch (1 << k) {
214 case QLCNIC_DUMP_WCRB:
215 qlcnic_write_dump_reg(addr, base, ctr->val1);
216 break;
217 case QLCNIC_DUMP_RWCRB:
218 qlcnic_read_dump_reg(addr, base, &data);
219 qlcnic_write_dump_reg(addr, base, data);
220 break;
221 case QLCNIC_DUMP_ANDCRB:
222 qlcnic_read_dump_reg(addr, base, &data);
223 qlcnic_write_dump_reg(addr, base,
224 data & ctr->val2);
225 break;
226 case QLCNIC_DUMP_ORCRB:
227 qlcnic_read_dump_reg(addr, base, &data);
228 qlcnic_write_dump_reg(addr, base,
229 data | ctr->val3);
230 break;
231 case QLCNIC_DUMP_POLLCRB:
232 while (timeout <= ctr->timeout) {
233 qlcnic_read_dump_reg(addr, base, &data);
234 if ((data & ctr->val2) == ctr->val1)
235 break;
236 msleep(1);
237 timeout++;
238 }
239 if (timeout > ctr->timeout) {
240 dev_info(&adapter->pdev->dev,
241 "Timed out, aborting poll CRB\n");
242 return -EINVAL;
243 }
244 break;
245 case QLCNIC_DUMP_RD_SAVE:
246 if (ctr->index_a)
247 addr = t_hdr->saved_state[ctr->index_a];
248 qlcnic_read_dump_reg(addr, base, &data);
249 t_hdr->saved_state[ctr->index_v] = data;
250 break;
251 case QLCNIC_DUMP_WRT_SAVED:
252 if (ctr->index_v)
253 data = t_hdr->saved_state[ctr->index_v];
254 else
255 data = ctr->val1;
256 if (ctr->index_a)
257 addr = t_hdr->saved_state[ctr->index_a];
258 qlcnic_write_dump_reg(addr, base, data);
259 break;
260 case QLCNIC_DUMP_MOD_SAVE_ST:
261 data = t_hdr->saved_state[ctr->index_v];
262 data <<= ctr->shl_val;
263 data >>= ctr->shr_val;
264 if (ctr->val2)
265 data &= ctr->val2;
266 data |= ctr->val3;
267 data += ctr->val1;
268 t_hdr->saved_state[ctr->index_v] = data;
269 break;
270 default:
271 dev_info(&adapter->pdev->dev,
272 "Unknown opcode\n");
273 break;
274 }
275 }
276 addr += ctr->stride;
277 }
278 return 0;
279}
280
281static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
282 struct qlcnic_dump_entry *entry, __le32 *buffer)
283{
284 int loop;
285 u32 val, data = 0;
286 struct __mux *mux = &entry->region.mux;
287 void __iomem *base = adapter->ahw->pci_base0;
288
289 val = mux->val;
290 for (loop = 0; loop < mux->no_ops; loop++) {
291 qlcnic_write_dump_reg(mux->addr, base, val);
292 qlcnic_read_dump_reg(mux->read_addr, base, &data);
293 *buffer++ = cpu_to_le32(val);
294 *buffer++ = cpu_to_le32(data);
295 val += mux->val_stride;
296 }
297 return 2 * mux->no_ops * sizeof(u32);
298}
299
300static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
301 struct qlcnic_dump_entry *entry, __le32 *buffer)
302{
303 int i, loop;
304 u32 cnt, addr, data, que_id = 0;
305 void __iomem *base = adapter->ahw->pci_base0;
306 struct __queue *que = &entry->region.que;
307
308 addr = que->read_addr;
309 cnt = que->read_addr_cnt;
310
311 for (loop = 0; loop < que->no_ops; loop++) {
312 qlcnic_write_dump_reg(que->sel_addr, base, que_id);
313 addr = que->read_addr;
314 for (i = 0; i < cnt; i++) {
315 qlcnic_read_dump_reg(addr, base, &data);
316 *buffer++ = cpu_to_le32(data);
317 addr += que->read_addr_stride;
318 }
319 que_id += que->stride;
320 }
321 return que->no_ops * cnt * sizeof(u32);
322}
323
324static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
325 struct qlcnic_dump_entry *entry, __le32 *buffer)
326{
327 int i;
328 u32 data;
329 void __iomem *addr;
330 struct __ocm *ocm = &entry->region.ocm;
331
332 addr = adapter->ahw->pci_base0 + ocm->read_addr;
333 for (i = 0; i < ocm->no_ops; i++) {
334 data = readl(addr);
335 *buffer++ = cpu_to_le32(data);
336 addr += ocm->read_addr_stride;
337 }
338 return ocm->no_ops * sizeof(u32);
339}
340
341static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
342 struct qlcnic_dump_entry *entry, __le32 *buffer)
343{
344 int i, count = 0;
345 u32 fl_addr, size, val, lck_val, addr;
346 struct __mem *rom = &entry->region.mem;
347 void __iomem *base = adapter->ahw->pci_base0;
348
349 fl_addr = rom->addr;
350 size = rom->size/4;
351lock_try:
352 lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
353 if (!lck_val && count < MAX_CTL_CHECK) {
354 msleep(10);
355 count++;
356 goto lock_try;
357 }
358 writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
359 for (i = 0; i < size; i++) {
360 addr = fl_addr & 0xFFFF0000;
361 qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
362 addr = LSW(fl_addr) + FLASH_ROM_DATA;
363 qlcnic_read_dump_reg(addr, base, &val);
364 fl_addr += 4;
365 *buffer++ = cpu_to_le32(val);
366 }
367 readl(base + QLCNIC_FLASH_SEM2_ULK);
368 return rom->size;
369}
370
371static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
372 struct qlcnic_dump_entry *entry, __le32 *buffer)
373{
374 int i;
375 u32 cnt, val, data, addr;
376 void __iomem *base = adapter->ahw->pci_base0;
377 struct __cache *l1 = &entry->region.cache;
378
379 val = l1->init_tag_val;
380
381 for (i = 0; i < l1->no_ops; i++) {
382 qlcnic_write_dump_reg(l1->addr, base, val);
383 qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
384 addr = l1->read_addr;
385 cnt = l1->read_addr_num;
386 while (cnt) {
387 qlcnic_read_dump_reg(addr, base, &data);
388 *buffer++ = cpu_to_le32(data);
389 addr += l1->read_addr_stride;
390 cnt--;
391 }
392 val += l1->stride;
393 }
394 return l1->no_ops * l1->read_addr_num * sizeof(u32);
395}
396
397static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
398 struct qlcnic_dump_entry *entry, __le32 *buffer)
399{
400 int i;
401 u32 cnt, val, data, addr;
402 u8 poll_mask, poll_to, time_out = 0;
403 void __iomem *base = adapter->ahw->pci_base0;
404 struct __cache *l2 = &entry->region.cache;
405
406 val = l2->init_tag_val;
407 poll_mask = LSB(MSW(l2->ctrl_val));
408 poll_to = MSB(MSW(l2->ctrl_val));
409
410 for (i = 0; i < l2->no_ops; i++) {
411 qlcnic_write_dump_reg(l2->addr, base, val);
412 if (LSW(l2->ctrl_val))
413 qlcnic_write_dump_reg(l2->ctrl_addr, base,
414 LSW(l2->ctrl_val));
415 if (!poll_mask)
416 goto skip_poll;
417 do {
418 qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
419 if (!(data & poll_mask))
420 break;
421 msleep(1);
422 time_out++;
423 } while (time_out <= poll_to);
424
425 if (time_out > poll_to) {
426 dev_err(&adapter->pdev->dev,
427 "Timeout exceeded in %s, aborting dump\n",
428 __func__);
429 return -EINVAL;
430 }
431skip_poll:
432 addr = l2->read_addr;
433 cnt = l2->read_addr_num;
434 while (cnt) {
435 qlcnic_read_dump_reg(addr, base, &data);
436 *buffer++ = cpu_to_le32(data);
437 addr += l2->read_addr_stride;
438 cnt--;
439 }
440 val += l2->stride;
441 }
442 return l2->no_ops * l2->read_addr_num * sizeof(u32);
443}
444
445static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
446 struct qlcnic_dump_entry *entry, __le32 *buffer)
447{
448 u32 addr, data, test, ret = 0;
449 int i, reg_read;
450 struct __mem *mem = &entry->region.mem;
451 void __iomem *base = adapter->ahw->pci_base0;
452
453 reg_read = mem->size;
454 addr = mem->addr;
455 /* check for data size of multiple of 16 and 16 byte alignment */
456 if ((addr & 0xf) || (reg_read%16)) {
457 dev_info(&adapter->pdev->dev,
458 "Unaligned memory addr:0x%x size:0x%x\n",
459 addr, reg_read);
460 return -EINVAL;
461 }
462
463 mutex_lock(&adapter->ahw->mem_lock);
464
465 while (reg_read != 0) {
466 qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
467 qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
468 qlcnic_write_dump_reg(MIU_TEST_CTR, base,
469 TA_CTL_ENABLE | TA_CTL_START);
470
471 for (i = 0; i < MAX_CTL_CHECK; i++) {
472 qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
473 if (!(test & TA_CTL_BUSY))
474 break;
475 }
476 if (i == MAX_CTL_CHECK) {
477 if (printk_ratelimit()) {
478 dev_err(&adapter->pdev->dev,
479 "failed to read through agent\n");
480 ret = -EINVAL;
481 goto out;
482 }
483 }
484 for (i = 0; i < 4; i++) {
485 qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
486 &data);
487 *buffer++ = cpu_to_le32(data);
488 }
489 addr += 16;
490 reg_read -= 16;
491 ret += 16;
492 }
493out:
494 mutex_unlock(&adapter->ahw->mem_lock);
495 return mem->size;
496}
497
498static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
499 struct qlcnic_dump_entry *entry, __le32 *buffer)
500{
501 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
502 return 0;
503}
504
505static const struct qlcnic_dump_operations fw_dump_ops[] = {
506 { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
507 { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
508 { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
509 { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
510 { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
511 { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
512 { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
513 { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
514 { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
515 { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
516 { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
517 { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
518 { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
519 { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
520 { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
521 { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
522 { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
523 { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
524 { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
525 { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
526};
527
528/* Walk the template and collect dump for each entry in the dump template */
529static int
530qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
531 u32 size)
532{
533 int ret = 1;
534 if (size != entry->hdr.cap_size) {
535 dev_info(dev,
536 "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
537 entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
538 dev_info(dev, "Aborting further dump capture\n");
539 ret = 0;
540 }
541 return ret;
542}
543
544int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
545{
546 __le32 *buffer;
547 char mesg[64];
548 char *msg[] = {mesg, NULL};
549 int i, k, ops_cnt, ops_index, dump_size = 0;
550 u32 entry_offset, dump, no_entries, buf_offset = 0;
551 struct qlcnic_dump_entry *entry;
552 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
553 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
554
555 if (fw_dump->clr) {
556 dev_info(&adapter->pdev->dev,
557 "Previous dump not cleared, not capturing dump\n");
558 return -EIO;
559 }
560 /* Calculate the size for dump data area only */
561 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
562 if (i & tmpl_hdr->drv_cap_mask)
563 dump_size += tmpl_hdr->cap_sizes[k];
564 if (!dump_size)
565 return -EIO;
566
567 fw_dump->data = vzalloc(dump_size);
568 if (!fw_dump->data) {
569 dev_info(&adapter->pdev->dev,
570 "Unable to allocate (%d KB) for fw dump\n",
571 dump_size / 1024);
572 return -ENOMEM;
573 }
574 buffer = fw_dump->data;
575 fw_dump->size = dump_size;
576 no_entries = tmpl_hdr->num_entries;
577 ops_cnt = ARRAY_SIZE(fw_dump_ops);
578 entry_offset = tmpl_hdr->offset;
579 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
580 tmpl_hdr->sys_info[1] = adapter->fw_version;
581
582 for (i = 0; i < no_entries; i++) {
583 entry = (void *)tmpl_hdr + entry_offset;
584 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
585 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
586 entry_offset += entry->hdr.offset;
587 continue;
588 }
589 /* Find the handler for this entry */
590 ops_index = 0;
591 while (ops_index < ops_cnt) {
592 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
593 break;
594 ops_index++;
595 }
596 if (ops_index == ops_cnt) {
597 dev_info(&adapter->pdev->dev,
598 "Invalid entry type %d, exiting dump\n",
599 entry->hdr.type);
600 goto error;
601 }
602 /* Collect dump for this entry */
603 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
604 if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
605 dump))
606 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
607 buf_offset += entry->hdr.cap_size;
608 entry_offset += entry->hdr.offset;
609 buffer = fw_dump->data + buf_offset;
610 }
611 if (dump_size != buf_offset) {
612 dev_info(&adapter->pdev->dev,
613 "Captured(%d) and expected size(%d) do not match\n",
614 buf_offset, dump_size);
615 goto error;
616 } else {
617 fw_dump->clr = 1;
618 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
619 adapter->netdev->name);
620 dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
621 fw_dump->size);
622 /* Send a udev event to notify availability of FW dump */
623 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
624 return 0;
625 }
626error:
627 vfree(fw_dump->data);
628 return -EINVAL;
629}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
new file mode 100644
index 000000000000..341d37c867ff
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -0,0 +1,960 @@
1#include <linux/slab.h>
2#include <linux/vmalloc.h>
3#include <linux/interrupt.h>
4
5#include "qlcnic.h"
6
7#include <linux/swab.h>
8#include <linux/dma-mapping.h>
9#include <net/ip.h>
10#include <linux/ipv6.h>
11#include <linux/inetdevice.h>
12#include <linux/sysfs.h>
13#include <linux/aer.h>
14#include <linux/log2.h>
15
16int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
17{
18 return -EOPNOTSUPP;
19}
20
21int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
22{
23 return -EOPNOTSUPP;
24}
25
26static ssize_t qlcnic_store_bridged_mode(struct device *dev,
27 struct device_attribute *attr,
28 const char *buf, size_t len)
29{
30 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
31 unsigned long new;
32 int ret = -EINVAL;
33
34 if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG))
35 goto err_out;
36
37 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
38 goto err_out;
39
40 if (strict_strtoul(buf, 2, &new))
41 goto err_out;
42
43 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
44 ret = len;
45
46err_out:
47 return ret;
48}
49
50static ssize_t qlcnic_show_bridged_mode(struct device *dev,
51 struct device_attribute *attr,
52 char *buf)
53{
54 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
55 int bridged_mode = 0;
56
57 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
58 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
59
60 return sprintf(buf, "%d\n", bridged_mode);
61}
62
63static ssize_t qlcnic_store_diag_mode(struct device *dev,
64 struct device_attribute *attr,
65 const char *buf, size_t len)
66{
67 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
68 unsigned long new;
69
70 if (strict_strtoul(buf, 2, &new))
71 return -EINVAL;
72
73 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
74 adapter->flags ^= QLCNIC_DIAG_ENABLED;
75
76 return len;
77}
78
79static ssize_t qlcnic_show_diag_mode(struct device *dev,
80 struct device_attribute *attr, char *buf)
81{
82 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
83
84 return sprintf(buf, "%d\n",
85 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
86}
87
88static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
89 u8 *state, u8 *rate)
90{
91 *rate = LSB(beacon);
92 *state = MSB(beacon);
93
94 QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
95
96 if (!*state) {
97 *rate = __QLCNIC_MAX_LED_RATE;
98 return 0;
99 } else if (*state > __QLCNIC_MAX_LED_STATE) {
100 return -EINVAL;
101 }
102
103 if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
104 return -EINVAL;
105
106 return 0;
107}
108
109static ssize_t qlcnic_store_beacon(struct device *dev,
110 struct device_attribute *attr,
111 const char *buf, size_t len)
112{
113 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
114 int max_sds_rings = adapter->max_sds_rings;
115 u16 beacon;
116 u8 b_state, b_rate;
117 int err;
118
119 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
120 dev_warn(dev,
121 "LED test not supported in non privileged mode\n");
122 return -EOPNOTSUPP;
123 }
124
125 if (len != sizeof(u16))
126 return QL_STATUS_INVALID_PARAM;
127
128 memcpy(&beacon, buf, sizeof(u16));
129 err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
130 if (err)
131 return err;
132
133 if (adapter->ahw->beacon_state == b_state)
134 return len;
135
136 rtnl_lock();
137
138 if (!adapter->ahw->beacon_state)
139 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
140 rtnl_unlock();
141 return -EBUSY;
142 }
143
144 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
145 err = -EIO;
146 goto out;
147 }
148
149 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
150 err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
151 if (err)
152 goto out;
153 set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
154 }
155
156 err = qlcnic_config_led(adapter, b_state, b_rate);
157
158 if (!err) {
159 err = len;
160 adapter->ahw->beacon_state = b_state;
161 }
162
163 if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
164 qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
165
166 out:
167 if (!adapter->ahw->beacon_state)
168 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
169 rtnl_unlock();
170
171 return err;
172}
173
174static ssize_t qlcnic_show_beacon(struct device *dev,
175 struct device_attribute *attr, char *buf)
176{
177 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
178
179 return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
180}
181
182static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
183 loff_t offset, size_t size)
184{
185 size_t crb_size = 4;
186
187 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
188 return -EIO;
189
190 if (offset < QLCNIC_PCI_CRBSPACE) {
191 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
192 QLCNIC_PCI_CAMQM_END))
193 crb_size = 8;
194 else
195 return -EINVAL;
196 }
197
198 if ((size != crb_size) || (offset & (crb_size-1)))
199 return -EINVAL;
200
201 return 0;
202}
203
204static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
205 struct bin_attribute *attr, char *buf,
206 loff_t offset, size_t size)
207{
208 struct device *dev = container_of(kobj, struct device, kobj);
209 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
210 u32 data;
211 u64 qmdata;
212 int ret;
213
214 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
215 if (ret != 0)
216 return ret;
217
218 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
219 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
220 memcpy(buf, &qmdata, size);
221 } else {
222 data = QLCRD32(adapter, offset);
223 memcpy(buf, &data, size);
224 }
225 return size;
226}
227
228static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
229 struct bin_attribute *attr, char *buf,
230 loff_t offset, size_t size)
231{
232 struct device *dev = container_of(kobj, struct device, kobj);
233 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
234 u32 data;
235 u64 qmdata;
236 int ret;
237
238 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
239 if (ret != 0)
240 return ret;
241
242 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
243 memcpy(&qmdata, buf, size);
244 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
245 } else {
246 memcpy(&data, buf, size);
247 QLCWR32(adapter, offset, data);
248 }
249 return size;
250}
251
252static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
253 loff_t offset, size_t size)
254{
255 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
256 return -EIO;
257
258 if ((size != 8) || (offset & 0x7))
259 return -EIO;
260
261 return 0;
262}
263
264static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
265 struct bin_attribute *attr, char *buf,
266 loff_t offset, size_t size)
267{
268 struct device *dev = container_of(kobj, struct device, kobj);
269 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
270 u64 data;
271 int ret;
272
273 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
274 if (ret != 0)
275 return ret;
276
277 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
278 return -EIO;
279
280 memcpy(buf, &data, size);
281
282 return size;
283}
284
285static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
286 struct bin_attribute *attr, char *buf,
287 loff_t offset, size_t size)
288{
289 struct device *dev = container_of(kobj, struct device, kobj);
290 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
291 u64 data;
292 int ret;
293
294 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
295 if (ret != 0)
296 return ret;
297
298 memcpy(&data, buf, size);
299
300 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
301 return -EIO;
302
303 return size;
304}
305
306static int validate_pm_config(struct qlcnic_adapter *adapter,
307 struct qlcnic_pm_func_cfg *pm_cfg, int count)
308{
309 u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
310 int i;
311
312 for (i = 0; i < count; i++) {
313 src_pci_func = pm_cfg[i].pci_func;
314 dest_pci_func = pm_cfg[i].dest_npar;
315 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
316 dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
317 return QL_STATUS_INVALID_PARAM;
318
319 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
320 return QL_STATUS_INVALID_PARAM;
321
322 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
323 return QL_STATUS_INVALID_PARAM;
324
325 s_esw_id = adapter->npars[src_pci_func].phy_port;
326 d_esw_id = adapter->npars[dest_pci_func].phy_port;
327
328 if (s_esw_id != d_esw_id)
329 return QL_STATUS_INVALID_PARAM;
330 }
331 return 0;
332
333}
334
335static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
336 struct kobject *kobj,
337 struct bin_attribute *attr,
338 char *buf, loff_t offset,
339 size_t size)
340{
341 struct device *dev = container_of(kobj, struct device, kobj);
342 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
343 struct qlcnic_pm_func_cfg *pm_cfg;
344 u32 id, action, pci_func;
345 int count, rem, i, ret;
346
347 count = size / sizeof(struct qlcnic_pm_func_cfg);
348 rem = size % sizeof(struct qlcnic_pm_func_cfg);
349 if (rem)
350 return QL_STATUS_INVALID_PARAM;
351
352 pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
353
354 ret = validate_pm_config(adapter, pm_cfg, count);
355 if (ret)
356 return ret;
357 for (i = 0; i < count; i++) {
358 pci_func = pm_cfg[i].pci_func;
359 action = !!pm_cfg[i].action;
360 id = adapter->npars[pci_func].phy_port;
361 ret = qlcnic_config_port_mirroring(adapter, id, action,
362 pci_func);
363 if (ret)
364 return ret;
365 }
366
367 for (i = 0; i < count; i++) {
368 pci_func = pm_cfg[i].pci_func;
369 id = adapter->npars[pci_func].phy_port;
370 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
371 adapter->npars[pci_func].dest_npar = id;
372 }
373 return size;
374}
375
376static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
377 struct kobject *kobj,
378 struct bin_attribute *attr,
379 char *buf, loff_t offset,
380 size_t size)
381{
382 struct device *dev = container_of(kobj, struct device, kobj);
383 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
384 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
385 int i;
386
387 if (size != sizeof(pm_cfg))
388 return QL_STATUS_INVALID_PARAM;
389
390 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
391 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
392 continue;
393 pm_cfg[i].action = adapter->npars[i].enable_pm;
394 pm_cfg[i].dest_npar = 0;
395 pm_cfg[i].pci_func = i;
396 }
397 memcpy(buf, &pm_cfg, size);
398
399 return size;
400}
401
402static int validate_esw_config(struct qlcnic_adapter *adapter,
403 struct qlcnic_esw_func_cfg *esw_cfg, int count)
404{
405 u32 op_mode;
406 u8 pci_func;
407 int i;
408
409 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
410
411 for (i = 0; i < count; i++) {
412 pci_func = esw_cfg[i].pci_func;
413 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
414 return QL_STATUS_INVALID_PARAM;
415
416 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
417 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
418 return QL_STATUS_INVALID_PARAM;
419 }
420
421 switch (esw_cfg[i].op_mode) {
422 case QLCNIC_PORT_DEFAULTS:
423 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
424 QLCNIC_NON_PRIV_FUNC) {
425 if (esw_cfg[i].mac_anti_spoof != 0)
426 return QL_STATUS_INVALID_PARAM;
427 if (esw_cfg[i].mac_override != 1)
428 return QL_STATUS_INVALID_PARAM;
429 if (esw_cfg[i].promisc_mode != 1)
430 return QL_STATUS_INVALID_PARAM;
431 }
432 break;
433 case QLCNIC_ADD_VLAN:
434 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
435 return QL_STATUS_INVALID_PARAM;
436 if (!esw_cfg[i].op_type)
437 return QL_STATUS_INVALID_PARAM;
438 break;
439 case QLCNIC_DEL_VLAN:
440 if (!esw_cfg[i].op_type)
441 return QL_STATUS_INVALID_PARAM;
442 break;
443 default:
444 return QL_STATUS_INVALID_PARAM;
445 }
446 }
447 return 0;
448}
449
450static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
451 struct kobject *kobj,
452 struct bin_attribute *attr,
453 char *buf, loff_t offset,
454 size_t size)
455{
456 struct device *dev = container_of(kobj, struct device, kobj);
457 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
458 struct qlcnic_esw_func_cfg *esw_cfg;
459 struct qlcnic_npar_info *npar;
460 int count, rem, i, ret;
461 u8 pci_func, op_mode = 0;
462
463 count = size / sizeof(struct qlcnic_esw_func_cfg);
464 rem = size % sizeof(struct qlcnic_esw_func_cfg);
465 if (rem)
466 return QL_STATUS_INVALID_PARAM;
467
468 esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
469 ret = validate_esw_config(adapter, esw_cfg, count);
470 if (ret)
471 return ret;
472
473 for (i = 0; i < count; i++) {
474 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
475 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
476 return QL_STATUS_INVALID_PARAM;
477 }
478
479 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
480 continue;
481
482 op_mode = esw_cfg[i].op_mode;
483 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
484 esw_cfg[i].op_mode = op_mode;
485 esw_cfg[i].pci_func = adapter->ahw->pci_func;
486
487 switch (esw_cfg[i].op_mode) {
488 case QLCNIC_PORT_DEFAULTS:
489 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
490 break;
491 case QLCNIC_ADD_VLAN:
492 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
493 break;
494 case QLCNIC_DEL_VLAN:
495 esw_cfg[i].vlan_id = 0;
496 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
497 break;
498 }
499 }
500
501 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
502 goto out;
503
504 for (i = 0; i < count; i++) {
505 pci_func = esw_cfg[i].pci_func;
506 npar = &adapter->npars[pci_func];
507 switch (esw_cfg[i].op_mode) {
508 case QLCNIC_PORT_DEFAULTS:
509 npar->promisc_mode = esw_cfg[i].promisc_mode;
510 npar->mac_override = esw_cfg[i].mac_override;
511 npar->offload_flags = esw_cfg[i].offload_flags;
512 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
513 npar->discard_tagged = esw_cfg[i].discard_tagged;
514 break;
515 case QLCNIC_ADD_VLAN:
516 npar->pvid = esw_cfg[i].vlan_id;
517 break;
518 case QLCNIC_DEL_VLAN:
519 npar->pvid = 0;
520 break;
521 }
522 }
523out:
524 return size;
525}
526
527static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
528 struct kobject *kobj,
529 struct bin_attribute *attr,
530 char *buf, loff_t offset,
531 size_t size)
532{
533 struct device *dev = container_of(kobj, struct device, kobj);
534 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
535 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
536 u8 i;
537
538 if (size != sizeof(esw_cfg))
539 return QL_STATUS_INVALID_PARAM;
540
541 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
542 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
543 continue;
544 esw_cfg[i].pci_func = i;
545 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
546 return QL_STATUS_INVALID_PARAM;
547 }
548 memcpy(buf, &esw_cfg, size);
549
550 return size;
551}
552
553static int validate_npar_config(struct qlcnic_adapter *adapter,
554 struct qlcnic_npar_func_cfg *np_cfg,
555 int count)
556{
557 u8 pci_func, i;
558
559 for (i = 0; i < count; i++) {
560 pci_func = np_cfg[i].pci_func;
561 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
562 return QL_STATUS_INVALID_PARAM;
563
564 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
565 return QL_STATUS_INVALID_PARAM;
566
567 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
568 !IS_VALID_BW(np_cfg[i].max_bw))
569 return QL_STATUS_INVALID_PARAM;
570 }
571 return 0;
572}
573
574static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
575 struct kobject *kobj,
576 struct bin_attribute *attr,
577 char *buf, loff_t offset,
578 size_t size)
579{
580 struct device *dev = container_of(kobj, struct device, kobj);
581 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
582 struct qlcnic_info nic_info;
583 struct qlcnic_npar_func_cfg *np_cfg;
584 int i, count, rem, ret;
585 u8 pci_func;
586
587 count = size / sizeof(struct qlcnic_npar_func_cfg);
588 rem = size % sizeof(struct qlcnic_npar_func_cfg);
589 if (rem)
590 return QL_STATUS_INVALID_PARAM;
591
592 np_cfg = (struct qlcnic_npar_func_cfg *)buf;
593 ret = validate_npar_config(adapter, np_cfg, count);
594 if (ret)
595 return ret;
596
597 for (i = 0; i < count ; i++) {
598 pci_func = np_cfg[i].pci_func;
599 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
600 if (ret)
601 return ret;
602 nic_info.pci_func = pci_func;
603 nic_info.min_tx_bw = np_cfg[i].min_bw;
604 nic_info.max_tx_bw = np_cfg[i].max_bw;
605 ret = qlcnic_set_nic_info(adapter, &nic_info);
606 if (ret)
607 return ret;
608 adapter->npars[i].min_bw = nic_info.min_tx_bw;
609 adapter->npars[i].max_bw = nic_info.max_tx_bw;
610 }
611
612 return size;
613
614}
615
616static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
617 struct kobject *kobj,
618 struct bin_attribute *attr,
619 char *buf, loff_t offset,
620 size_t size)
621{
622 struct device *dev = container_of(kobj, struct device, kobj);
623 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
624 struct qlcnic_info nic_info;
625 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
626 int i, ret;
627
628 if (size != sizeof(np_cfg))
629 return QL_STATUS_INVALID_PARAM;
630
631 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
632 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
633 continue;
634 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
635 if (ret)
636 return ret;
637
638 np_cfg[i].pci_func = i;
639 np_cfg[i].op_mode = (u8)nic_info.op_mode;
640 np_cfg[i].port_num = nic_info.phys_port;
641 np_cfg[i].fw_capab = nic_info.capabilities;
642 np_cfg[i].min_bw = nic_info.min_tx_bw;
643 np_cfg[i].max_bw = nic_info.max_tx_bw;
644 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
645 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
646 }
647 memcpy(buf, &np_cfg, size);
648 return size;
649}
650
651static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
652 struct kobject *kobj,
653 struct bin_attribute *attr,
654 char *buf, loff_t offset,
655 size_t size)
656{
657 struct device *dev = container_of(kobj, struct device, kobj);
658 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
659 struct qlcnic_esw_statistics port_stats;
660 int ret;
661
662 if (size != sizeof(struct qlcnic_esw_statistics))
663 return QL_STATUS_INVALID_PARAM;
664
665 if (offset >= QLCNIC_MAX_PCI_FUNC)
666 return QL_STATUS_INVALID_PARAM;
667
668 memset(&port_stats, 0, size);
669 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
670 &port_stats.rx);
671 if (ret)
672 return ret;
673
674 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
675 &port_stats.tx);
676 if (ret)
677 return ret;
678
679 memcpy(buf, &port_stats, size);
680 return size;
681}
682
683static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
684 struct kobject *kobj,
685 struct bin_attribute *attr,
686 char *buf, loff_t offset,
687 size_t size)
688{
689 struct device *dev = container_of(kobj, struct device, kobj);
690 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
691 struct qlcnic_esw_statistics esw_stats;
692 int ret;
693
694 if (size != sizeof(struct qlcnic_esw_statistics))
695 return QL_STATUS_INVALID_PARAM;
696
697 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
698 return QL_STATUS_INVALID_PARAM;
699
700 memset(&esw_stats, 0, size);
701 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
702 &esw_stats.rx);
703 if (ret)
704 return ret;
705
706 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
707 &esw_stats.tx);
708 if (ret)
709 return ret;
710
711 memcpy(buf, &esw_stats, size);
712 return size;
713}
714
715static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
716 struct kobject *kobj,
717 struct bin_attribute *attr,
718 char *buf, loff_t offset,
719 size_t size)
720{
721 struct device *dev = container_of(kobj, struct device, kobj);
722 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
723 int ret;
724
725 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
726 return QL_STATUS_INVALID_PARAM;
727
728 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
729 QLCNIC_QUERY_RX_COUNTER);
730 if (ret)
731 return ret;
732
733 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
734 QLCNIC_QUERY_TX_COUNTER);
735 if (ret)
736 return ret;
737
738 return size;
739}
740
741static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
742 struct kobject *kobj,
743 struct bin_attribute *attr,
744 char *buf, loff_t offset,
745 size_t size)
746{
747 struct device *dev = container_of(kobj, struct device, kobj);
748 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
749 int ret;
750
751 if (offset >= QLCNIC_MAX_PCI_FUNC)
752 return QL_STATUS_INVALID_PARAM;
753
754 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
755 QLCNIC_QUERY_RX_COUNTER);
756 if (ret)
757 return ret;
758
759 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
760 QLCNIC_QUERY_TX_COUNTER);
761 if (ret)
762 return ret;
763
764 return size;
765}
766
767static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
768 struct kobject *kobj,
769 struct bin_attribute *attr,
770 char *buf, loff_t offset,
771 size_t size)
772{
773 struct device *dev = container_of(kobj, struct device, kobj);
774 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
775 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
776 struct qlcnic_pci_info *pci_info;
777 int i, ret;
778
779 if (size != sizeof(pci_cfg))
780 return QL_STATUS_INVALID_PARAM;
781
782 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
783 if (!pci_info)
784 return -ENOMEM;
785
786 ret = qlcnic_get_pci_info(adapter, pci_info);
787 if (ret) {
788 kfree(pci_info);
789 return ret;
790 }
791
792 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
793 pci_cfg[i].pci_func = pci_info[i].id;
794 pci_cfg[i].func_type = pci_info[i].type;
795 pci_cfg[i].port_num = pci_info[i].default_port;
796 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
797 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
798 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
799 }
800 memcpy(buf, &pci_cfg, size);
801 kfree(pci_info);
802 return size;
803}
804
805static struct device_attribute dev_attr_bridged_mode = {
806 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
807 .show = qlcnic_show_bridged_mode,
808 .store = qlcnic_store_bridged_mode,
809};
810
811static struct device_attribute dev_attr_diag_mode = {
812 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
813 .show = qlcnic_show_diag_mode,
814 .store = qlcnic_store_diag_mode,
815};
816
817static struct device_attribute dev_attr_beacon = {
818 .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)},
819 .show = qlcnic_show_beacon,
820 .store = qlcnic_store_beacon,
821};
822
823static struct bin_attribute bin_attr_crb = {
824 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
825 .size = 0,
826 .read = qlcnic_sysfs_read_crb,
827 .write = qlcnic_sysfs_write_crb,
828};
829
830static struct bin_attribute bin_attr_mem = {
831 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
832 .size = 0,
833 .read = qlcnic_sysfs_read_mem,
834 .write = qlcnic_sysfs_write_mem,
835};
836
837static struct bin_attribute bin_attr_npar_config = {
838 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
839 .size = 0,
840 .read = qlcnic_sysfs_read_npar_config,
841 .write = qlcnic_sysfs_write_npar_config,
842};
843
844static struct bin_attribute bin_attr_pci_config = {
845 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
846 .size = 0,
847 .read = qlcnic_sysfs_read_pci_config,
848 .write = NULL,
849};
850
851static struct bin_attribute bin_attr_port_stats = {
852 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
853 .size = 0,
854 .read = qlcnic_sysfs_get_port_stats,
855 .write = qlcnic_sysfs_clear_port_stats,
856};
857
858static struct bin_attribute bin_attr_esw_stats = {
859 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
860 .size = 0,
861 .read = qlcnic_sysfs_get_esw_stats,
862 .write = qlcnic_sysfs_clear_esw_stats,
863};
864
865static struct bin_attribute bin_attr_esw_config = {
866 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
867 .size = 0,
868 .read = qlcnic_sysfs_read_esw_config,
869 .write = qlcnic_sysfs_write_esw_config,
870};
871
872static struct bin_attribute bin_attr_pm_config = {
873 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
874 .size = 0,
875 .read = qlcnic_sysfs_read_pm_config,
876 .write = qlcnic_sysfs_write_pm_config,
877};
878
879void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
880{
881 struct device *dev = &adapter->pdev->dev;
882
883 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
884 if (device_create_file(dev, &dev_attr_bridged_mode))
885 dev_warn(dev,
886 "failed to create bridged_mode sysfs entry\n");
887}
888
889void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
890{
891 struct device *dev = &adapter->pdev->dev;
892
893 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
894 device_remove_file(dev, &dev_attr_bridged_mode);
895}
896
897void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
898{
899 struct device *dev = &adapter->pdev->dev;
900 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
901
902 if (device_create_bin_file(dev, &bin_attr_port_stats))
903 dev_info(dev, "failed to create port stats sysfs entry");
904
905 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
906 return;
907 if (device_create_file(dev, &dev_attr_diag_mode))
908 dev_info(dev, "failed to create diag_mode sysfs entry\n");
909 if (device_create_bin_file(dev, &bin_attr_crb))
910 dev_info(dev, "failed to create crb sysfs entry\n");
911 if (device_create_bin_file(dev, &bin_attr_mem))
912 dev_info(dev, "failed to create mem sysfs entry\n");
913
914 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
915 return;
916
917 if (device_create_bin_file(dev, &bin_attr_pci_config))
918 dev_info(dev, "failed to create pci config sysfs entry");
919 if (device_create_file(dev, &dev_attr_beacon))
920 dev_info(dev, "failed to create beacon sysfs entry");
921
922 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
923 return;
924 if (device_create_bin_file(dev, &bin_attr_esw_config))
925 dev_info(dev, "failed to create esw config sysfs entry");
926 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
927 return;
928 if (device_create_bin_file(dev, &bin_attr_npar_config))
929 dev_info(dev, "failed to create npar config sysfs entry");
930 if (device_create_bin_file(dev, &bin_attr_pm_config))
931 dev_info(dev, "failed to create pm config sysfs entry");
932 if (device_create_bin_file(dev, &bin_attr_esw_stats))
933 dev_info(dev, "failed to create eswitch stats sysfs entry");
934}
935
936void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
937{
938 struct device *dev = &adapter->pdev->dev;
939 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
940
941 device_remove_bin_file(dev, &bin_attr_port_stats);
942
943 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
944 return;
945 device_remove_file(dev, &dev_attr_diag_mode);
946 device_remove_bin_file(dev, &bin_attr_crb);
947 device_remove_bin_file(dev, &bin_attr_mem);
948 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
949 return;
950 device_remove_bin_file(dev, &bin_attr_pci_config);
951 device_remove_file(dev, &dev_attr_beacon);
952 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
953 return;
954 device_remove_bin_file(dev, &bin_attr_esw_config);
955 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
956 return;
957 device_remove_bin_file(dev, &bin_attr_npar_config);
958 device_remove_bin_file(dev, &bin_attr_pm_config);
959 device_remove_bin_file(dev, &bin_attr_esw_stats);
960}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 58185b604b72..10093f0c4c0f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -86,7 +86,7 @@ exit:
86} 86}
87 87
88/* Read out the SERDES registers */ 88/* Read out the SERDES registers */
89static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data) 89static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
90{ 90{
91 int status; 91 int status;
92 92
@@ -364,7 +364,7 @@ exit:
364/* Read the 400 xgmac control/statistics registers 364/* Read the 400 xgmac control/statistics registers
365 * skipping unused locations. 365 * skipping unused locations.
366 */ 366 */
367static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf, 367static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
368 unsigned int other_function) 368 unsigned int other_function)
369{ 369{
370 int status = 0; 370 int status = 0;
@@ -405,7 +405,7 @@ static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
405 return status; 405 return status;
406} 406}
407 407
408static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf) 408static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
409{ 409{
410 int status = 0; 410 int status = 0;
411 int i; 411 int i;
@@ -423,7 +423,7 @@ static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
423 return status; 423 return status;
424} 424}
425 425
426static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf) 426static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
427{ 427{
428 int i; 428 int i;
429 429
@@ -434,7 +434,7 @@ static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
434 } 434 }
435} 435}
436 436
437static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf) 437static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
438{ 438{
439 int i, status; 439 int i, status;
440 u32 value[3]; 440 u32 value[3];
@@ -471,7 +471,7 @@ err:
471 return status; 471 return status;
472} 472}
473 473
474static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf) 474static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
475{ 475{
476 int status; 476 int status;
477 u32 value, i; 477 u32 value, i;
@@ -496,7 +496,7 @@ err:
496} 496}
497 497
498/* Read the MPI Processor shadow registers */ 498/* Read the MPI Processor shadow registers */
499static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf) 499static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
500{ 500{
501 u32 i; 501 u32 i;
502 int status; 502 int status;
@@ -515,7 +515,7 @@ end:
515} 515}
516 516
517/* Read the MPI Processor core registers */ 517/* Read the MPI Processor core registers */
518static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf, 518static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
519 u32 offset, u32 count) 519 u32 offset, u32 count)
520{ 520{
521 int i, status = 0; 521 int i, status = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b262d6156816..f80cd975daed 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4491,8 +4491,8 @@ static void ql_release_all(struct pci_dev *pdev)
4491 pci_set_drvdata(pdev, NULL); 4491 pci_set_drvdata(pdev, NULL);
4492} 4492}
4493 4493
4494static int __devinit ql_init_device(struct pci_dev *pdev, 4494static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4495 struct net_device *ndev, int cards_found) 4495 int cards_found)
4496{ 4496{
4497 struct ql_adapter *qdev = netdev_priv(ndev); 4497 struct ql_adapter *qdev = netdev_priv(ndev);
4498 int err = 0; 4498 int err = 0;
@@ -4656,8 +4656,8 @@ static void ql_timer(unsigned long data)
4656 mod_timer(&qdev->timer, jiffies + (5*HZ)); 4656 mod_timer(&qdev->timer, jiffies + (5*HZ));
4657} 4657}
4658 4658
4659static int __devinit qlge_probe(struct pci_dev *pdev, 4659static int qlge_probe(struct pci_dev *pdev,
4660 const struct pci_device_id *pci_entry) 4660 const struct pci_device_id *pci_entry)
4661{ 4661{
4662 struct net_device *ndev = NULL; 4662 struct net_device *ndev = NULL;
4663 struct ql_adapter *qdev = NULL; 4663 struct ql_adapter *qdev = NULL;
@@ -4729,7 +4729,7 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4729 return ql_clean_inbound_rx_ring(rx_ring, budget); 4729 return ql_clean_inbound_rx_ring(rx_ring, budget);
4730} 4730}
4731 4731
4732static void __devexit qlge_remove(struct pci_dev *pdev) 4732static void qlge_remove(struct pci_dev *pdev)
4733{ 4733{
4734 struct net_device *ndev = pci_get_drvdata(pdev); 4734 struct net_device *ndev = pci_get_drvdata(pdev);
4735 struct ql_adapter *qdev = netdev_priv(ndev); 4735 struct ql_adapter *qdev = netdev_priv(ndev);
@@ -4921,7 +4921,7 @@ static struct pci_driver qlge_driver = {
4921 .name = DRV_NAME, 4921 .name = DRV_NAME,
4922 .id_table = qlge_pci_tbl, 4922 .id_table = qlge_pci_tbl,
4923 .probe = qlge_probe, 4923 .probe = qlge_probe,
4924 .remove = __devexit_p(qlge_remove), 4924 .remove = qlge_remove,
4925#ifdef CONFIG_PM 4925#ifdef CONFIG_PM
4926 .suspend = qlge_suspend, 4926 .suspend = qlge_suspend,
4927 .resume = qlge_resume, 4927 .resume = qlge_resume,
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 557a26545d75..63c13125db6c 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -206,7 +206,7 @@ struct r6040_private {
206 int old_duplex; 206 int old_duplex;
207}; 207};
208 208
209static char version[] __devinitdata = DRV_NAME 209static char version[] = DRV_NAME
210 ": RDC R6040 NAPI net driver," 210 ": RDC R6040 NAPI net driver,"
211 "version "DRV_VERSION " (" DRV_RELDATE ")"; 211 "version "DRV_VERSION " (" DRV_RELDATE ")";
212 212
@@ -1073,8 +1073,7 @@ static int r6040_mii_probe(struct net_device *dev)
1073 return 0; 1073 return 0;
1074} 1074}
1075 1075
1076static int __devinit r6040_init_one(struct pci_dev *pdev, 1076static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1077 const struct pci_device_id *ent)
1078{ 1077{
1079 struct net_device *dev; 1078 struct net_device *dev;
1080 struct r6040_private *lp; 1079 struct r6040_private *lp;
@@ -1246,7 +1245,7 @@ err_out:
1246 return err; 1245 return err;
1247} 1246}
1248 1247
1249static void __devexit r6040_remove_one(struct pci_dev *pdev) 1248static void r6040_remove_one(struct pci_dev *pdev)
1250{ 1249{
1251 struct net_device *dev = pci_get_drvdata(pdev); 1250 struct net_device *dev = pci_get_drvdata(pdev);
1252 struct r6040_private *lp = netdev_priv(dev); 1251 struct r6040_private *lp = netdev_priv(dev);
@@ -1274,7 +1273,7 @@ static struct pci_driver r6040_driver = {
1274 .name = DRV_NAME, 1273 .name = DRV_NAME,
1275 .id_table = r6040_pci_tbl, 1274 .id_table = r6040_pci_tbl,
1276 .probe = r6040_init_one, 1275 .probe = r6040_init_one,
1277 .remove = __devexit_p(r6040_remove_one), 1276 .remove = r6040_remove_one,
1278}; 1277};
1279 1278
1280module_pci_driver(r6040_driver); 1279module_pci_driver(r6040_driver);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 609125a249d9..cb6fc5a743ca 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -648,6 +648,7 @@ static void cp_tx (struct cp_private *cp)
648{ 648{
649 unsigned tx_head = cp->tx_head; 649 unsigned tx_head = cp->tx_head;
650 unsigned tx_tail = cp->tx_tail; 650 unsigned tx_tail = cp->tx_tail;
651 unsigned bytes_compl = 0, pkts_compl = 0;
651 652
652 while (tx_tail != tx_head) { 653 while (tx_tail != tx_head) {
653 struct cp_desc *txd = cp->tx_ring + tx_tail; 654 struct cp_desc *txd = cp->tx_ring + tx_tail;
@@ -666,6 +667,9 @@ static void cp_tx (struct cp_private *cp)
666 le32_to_cpu(txd->opts1) & 0xffff, 667 le32_to_cpu(txd->opts1) & 0xffff,
667 PCI_DMA_TODEVICE); 668 PCI_DMA_TODEVICE);
668 669
670 bytes_compl += skb->len;
671 pkts_compl++;
672
669 if (status & LastFrag) { 673 if (status & LastFrag) {
670 if (status & (TxError | TxFIFOUnder)) { 674 if (status & (TxError | TxFIFOUnder)) {
671 netif_dbg(cp, tx_err, cp->dev, 675 netif_dbg(cp, tx_err, cp->dev,
@@ -697,6 +701,7 @@ static void cp_tx (struct cp_private *cp)
697 701
698 cp->tx_tail = tx_tail; 702 cp->tx_tail = tx_tail;
699 703
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
700 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) 705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
701 netif_wake_queue(cp->dev); 706 netif_wake_queue(cp->dev);
702} 707}
@@ -843,6 +848,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
843 wmb(); 848 wmb();
844 } 849 }
845 cp->tx_head = entry; 850 cp->tx_head = entry;
851
852 netdev_sent_queue(dev, skb->len);
846 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", 853 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
847 entry, skb->len); 854 entry, skb->len);
848 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) 855 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
@@ -937,6 +944,8 @@ static void cp_stop_hw (struct cp_private *cp)
937 944
938 cp->rx_tail = 0; 945 cp->rx_tail = 0;
939 cp->tx_head = cp->tx_tail = 0; 946 cp->tx_head = cp->tx_tail = 0;
947
948 netdev_reset_queue(cp->dev);
940} 949}
941 950
942static void cp_reset_hw (struct cp_private *cp) 951static void cp_reset_hw (struct cp_private *cp)
@@ -957,8 +966,38 @@ static void cp_reset_hw (struct cp_private *cp)
957 966
958static inline void cp_start_hw (struct cp_private *cp) 967static inline void cp_start_hw (struct cp_private *cp)
959{ 968{
969 dma_addr_t ring_dma;
970
960 cpw16(CpCmd, cp->cpcmd); 971 cpw16(CpCmd, cp->cpcmd);
972
973 /*
974 * These (at least TxRingAddr) need to be configured after the
975 * corresponding bits in CpCmd are enabled. Datasheet v1.6 §6.33
976 * (C+ Command Register) recommends that these and more be configured
977 * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
978 * it's been observed that the TxRingAddr is actually reset to garbage
979 * when C+ mode Tx is enabled in CpCmd.
980 */
981 cpw32_f(HiTxRingAddr, 0);
982 cpw32_f(HiTxRingAddr + 4, 0);
983
984 ring_dma = cp->ring_dma;
985 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
986 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
987
988 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
989 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
990 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
991
992 /*
993 * Strictly speaking, the datasheet says this should be enabled
994 * *before* setting the descriptor addresses. But what, then, would
995 * prevent it from doing DMA to random unconfigured addresses?
996 * This variant appears to work fine.
997 */
961 cpw8(Cmd, RxOn | TxOn); 998 cpw8(Cmd, RxOn | TxOn);
999
1000 netdev_reset_queue(cp->dev);
962} 1001}
963 1002
964static void cp_enable_irq(struct cp_private *cp) 1003static void cp_enable_irq(struct cp_private *cp)
@@ -969,7 +1008,6 @@ static void cp_enable_irq(struct cp_private *cp)
969static void cp_init_hw (struct cp_private *cp) 1008static void cp_init_hw (struct cp_private *cp)
970{ 1009{
971 struct net_device *dev = cp->dev; 1010 struct net_device *dev = cp->dev;
972 dma_addr_t ring_dma;
973 1011
974 cp_reset_hw(cp); 1012 cp_reset_hw(cp);
975 1013
@@ -992,17 +1030,6 @@ static void cp_init_hw (struct cp_private *cp)
992 1030
993 cpw8(Config5, cpr8(Config5) & PMEStatus); 1031 cpw8(Config5, cpr8(Config5) & PMEStatus);
994 1032
995 cpw32_f(HiTxRingAddr, 0);
996 cpw32_f(HiTxRingAddr + 4, 0);
997
998 ring_dma = cp->ring_dma;
999 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006 cpw16(MultiIntr, 0); 1033 cpw16(MultiIntr, 0);
1007 1034
1008 cpw8_f(Cfg9346, Cfg9346_Lock); 1035 cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1197,18 +1224,16 @@ static void cp_tx_timeout(struct net_device *dev)
1197 cp_clean_rings(cp); 1224 cp_clean_rings(cp);
1198 rc = cp_init_rings(cp); 1225 rc = cp_init_rings(cp);
1199 cp_start_hw(cp); 1226 cp_start_hw(cp);
1227 cp_enable_irq(cp);
1200 1228
1201 netif_wake_queue(dev); 1229 netif_wake_queue(dev);
1202 1230
1203 spin_unlock_irqrestore(&cp->lock, flags); 1231 spin_unlock_irqrestore(&cp->lock, flags);
1204} 1232}
1205 1233
1206#ifdef BROKEN
1207static int cp_change_mtu(struct net_device *dev, int new_mtu) 1234static int cp_change_mtu(struct net_device *dev, int new_mtu)
1208{ 1235{
1209 struct cp_private *cp = netdev_priv(dev); 1236 struct cp_private *cp = netdev_priv(dev);
1210 int rc;
1211 unsigned long flags;
1212 1237
1213 /* check for invalid MTU, according to hardware limits */ 1238 /* check for invalid MTU, according to hardware limits */
1214 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU) 1239 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
@@ -1221,22 +1246,12 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
1221 return 0; 1246 return 0;
1222 } 1247 }
1223 1248
1224 spin_lock_irqsave(&cp->lock, flags); 1249 /* network IS up, close it, reset MTU, and come up again. */
1225 1250 cp_close(dev);
1226 cp_stop_hw(cp); /* stop h/w and free rings */
1227 cp_clean_rings(cp);
1228
1229 dev->mtu = new_mtu; 1251 dev->mtu = new_mtu;
1230 cp_set_rxbufsize(cp); /* set new rx buf size */ 1252 cp_set_rxbufsize(cp);
1231 1253 return cp_open(dev);
1232 rc = cp_init_rings(cp); /* realloc and restart h/w */
1233 cp_start_hw(cp);
1234
1235 spin_unlock_irqrestore(&cp->lock, flags);
1236
1237 return rc;
1238} 1254}
1239#endif /* BROKEN */
1240 1255
1241static const char mii_2_8139_map[8] = { 1256static const char mii_2_8139_map[8] = {
1242 BasicModeCtrl, 1257 BasicModeCtrl,
@@ -1812,9 +1827,7 @@ static const struct net_device_ops cp_netdev_ops = {
1812 .ndo_start_xmit = cp_start_xmit, 1827 .ndo_start_xmit = cp_start_xmit,
1813 .ndo_tx_timeout = cp_tx_timeout, 1828 .ndo_tx_timeout = cp_tx_timeout,
1814 .ndo_set_features = cp_set_features, 1829 .ndo_set_features = cp_set_features,
1815#ifdef BROKEN
1816 .ndo_change_mtu = cp_change_mtu, 1830 .ndo_change_mtu = cp_change_mtu,
1817#endif
1818 1831
1819#ifdef CONFIG_NET_POLL_CONTROLLER 1832#ifdef CONFIG_NET_POLL_CONTROLLER
1820 .ndo_poll_controller = cp_poll_controller, 1833 .ndo_poll_controller = cp_poll_controller,
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 3ed7add23c12..5dc161630127 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -228,7 +228,7 @@ typedef enum {
228static const struct { 228static const struct {
229 const char *name; 229 const char *name;
230 u32 hw_flags; 230 u32 hw_flags;
231} board_info[] __devinitconst = { 231} board_info[] = {
232 { "RealTek RTL8139", RTL8139_CAPS }, 232 { "RealTek RTL8139", RTL8139_CAPS },
233 { "RealTek RTL8129", RTL8129_CAPS }, 233 { "RealTek RTL8129", RTL8129_CAPS },
234}; 234};
@@ -748,7 +748,7 @@ static void rtl8139_chip_reset (void __iomem *ioaddr)
748} 748}
749 749
750 750
751static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) 751static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
752{ 752{
753 struct device *d = &pdev->dev; 753 struct device *d = &pdev->dev;
754 void __iomem *ioaddr; 754 void __iomem *ioaddr;
@@ -935,8 +935,8 @@ static const struct net_device_ops rtl8139_netdev_ops = {
935 .ndo_set_features = rtl8139_set_features, 935 .ndo_set_features = rtl8139_set_features,
936}; 936};
937 937
938static int __devinit rtl8139_init_one (struct pci_dev *pdev, 938static int rtl8139_init_one(struct pci_dev *pdev,
939 const struct pci_device_id *ent) 939 const struct pci_device_id *ent)
940{ 940{
941 struct net_device *dev = NULL; 941 struct net_device *dev = NULL;
942 struct rtl8139_private *tp; 942 struct rtl8139_private *tp;
@@ -1103,7 +1103,7 @@ err_out:
1103} 1103}
1104 1104
1105 1105
1106static void __devexit rtl8139_remove_one (struct pci_dev *pdev) 1106static void rtl8139_remove_one(struct pci_dev *pdev)
1107{ 1107{
1108 struct net_device *dev = pci_get_drvdata (pdev); 1108 struct net_device *dev = pci_get_drvdata (pdev);
1109 struct rtl8139_private *tp = netdev_priv(dev); 1109 struct rtl8139_private *tp = netdev_priv(dev);
@@ -1141,7 +1141,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
1141#define EE_READ_CMD (6) 1141#define EE_READ_CMD (6)
1142#define EE_ERASE_CMD (7) 1142#define EE_ERASE_CMD (7)
1143 1143
1144static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_len) 1144static int read_eeprom(void __iomem *ioaddr, int location, int addr_len)
1145{ 1145{
1146 int i; 1146 int i;
1147 unsigned retval = 0; 1147 unsigned retval = 0;
@@ -2652,7 +2652,7 @@ static struct pci_driver rtl8139_pci_driver = {
2652 .name = DRV_NAME, 2652 .name = DRV_NAME,
2653 .id_table = rtl8139_pci_tbl, 2653 .id_table = rtl8139_pci_tbl,
2654 .probe = rtl8139_init_one, 2654 .probe = rtl8139_init_one,
2655 .remove = __devexit_p(rtl8139_remove_one), 2655 .remove = rtl8139_remove_one,
2656#ifdef CONFIG_PM 2656#ifdef CONFIG_PM
2657 .suspend = rtl8139_suspend, 2657 .suspend = rtl8139_suspend,
2658 .resume = rtl8139_resume, 2658 .resume = rtl8139_resume,
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index e02f04d7f3ad..9f2d416de750 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -175,8 +175,7 @@ struct net_local {
175 unsigned int tx_unit_busy:1; 175 unsigned int tx_unit_busy:1;
176 unsigned char re_tx, /* Number of packet retransmissions. */ 176 unsigned char re_tx, /* Number of packet retransmissions. */
177 addr_mode, /* Current Rx filter e.g. promiscuous, etc. */ 177 addr_mode, /* Current Rx filter e.g. promiscuous, etc. */
178 pac_cnt_in_tx_buf, 178 pac_cnt_in_tx_buf;
179 chip_type;
180}; 179};
181 180
182/* This code, written by wwc@super.org, resets the adapter every 181/* This code, written by wwc@super.org, resets the adapter every
@@ -339,7 +338,6 @@ static int __init atp_probe1(long ioaddr)
339 write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX); 338 write_reg_high(ioaddr, CMR1, CMR1h_RESET | CMR1h_MUX);
340 339
341 lp = netdev_priv(dev); 340 lp = netdev_priv(dev);
342 lp->chip_type = RTL8002;
343 lp->addr_mode = CMR2h_Normal; 341 lp->addr_mode = CMR2h_Normal;
344 spin_lock_init(&lp->lock); 342 spin_lock_init(&lp->lock);
345 343
@@ -852,7 +850,7 @@ net_close(struct net_device *dev)
852 * Set or clear the multicast filter for this adapter. 850 * Set or clear the multicast filter for this adapter.
853 */ 851 */
854 852
855static void set_rx_mode_8002(struct net_device *dev) 853static void set_rx_mode(struct net_device *dev)
856{ 854{
857 struct net_local *lp = netdev_priv(dev); 855 struct net_local *lp = netdev_priv(dev);
858 long ioaddr = dev->base_addr; 856 long ioaddr = dev->base_addr;
@@ -864,58 +862,6 @@ static void set_rx_mode_8002(struct net_device *dev)
864 write_reg_high(ioaddr, CMR2, lp->addr_mode); 862 write_reg_high(ioaddr, CMR2, lp->addr_mode);
865} 863}
866 864
867static void set_rx_mode_8012(struct net_device *dev)
868{
869 struct net_local *lp = netdev_priv(dev);
870 long ioaddr = dev->base_addr;
871 unsigned char new_mode, mc_filter[8]; /* Multicast hash filter */
872 int i;
873
874 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
875 new_mode = CMR2h_PROMISC;
876 } else if ((netdev_mc_count(dev) > 1000) ||
877 (dev->flags & IFF_ALLMULTI)) {
878 /* Too many to filter perfectly -- accept all multicasts. */
879 memset(mc_filter, 0xff, sizeof(mc_filter));
880 new_mode = CMR2h_Normal;
881 } else {
882 struct netdev_hw_addr *ha;
883
884 memset(mc_filter, 0, sizeof(mc_filter));
885 netdev_for_each_mc_addr(ha, dev) {
886 int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
887 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
888 }
889 new_mode = CMR2h_Normal;
890 }
891 lp->addr_mode = new_mode;
892 write_reg(ioaddr, CMR2, CMR2_IRQOUT | 0x04); /* Switch to page 1. */
893 for (i = 0; i < 8; i++)
894 write_reg_byte(ioaddr, i, mc_filter[i]);
895 if (net_debug > 2 || 1) {
896 lp->addr_mode = 1;
897 printk(KERN_DEBUG "%s: Mode %d, setting multicast filter to",
898 dev->name, lp->addr_mode);
899 for (i = 0; i < 8; i++)
900 printk(" %2.2x", mc_filter[i]);
901 printk(".\n");
902 }
903
904 write_reg_high(ioaddr, CMR2, lp->addr_mode);
905 write_reg(ioaddr, CMR2, CMR2_IRQOUT); /* Switch back to page 0 */
906}
907
908static void set_rx_mode(struct net_device *dev)
909{
910 struct net_local *lp = netdev_priv(dev);
911
912 if (lp->chip_type == RTL8002)
913 return set_rx_mode_8002(dev);
914 else
915 return set_rx_mode_8012(dev);
916}
917
918
919static int __init atp_init_module(void) { 865static int __init atp_init_module(void) {
920 if (debug) /* Emit version even if no cards detected. */ 866 if (debug) /* Emit version even if no cards detected. */
921 printk(KERN_INFO "%s", version); 867 printk(KERN_INFO "%s", version);
diff --git a/drivers/net/ethernet/realtek/atp.h b/drivers/net/ethernet/realtek/atp.h
index 0edc642c2c2f..040b13739947 100644
--- a/drivers/net/ethernet/realtek/atp.h
+++ b/drivers/net/ethernet/realtek/atp.h
@@ -16,8 +16,6 @@ struct rx_header {
16#define PAR_STATUS 1 16#define PAR_STATUS 1
17#define PAR_CONTROL 2 17#define PAR_CONTROL 2
18 18
19enum chip_type { RTL8002, RTL8012 };
20
21#define Ctrl_LNibRead 0x08 /* LP_PSELECP */ 19#define Ctrl_LNibRead 0x08 /* LP_PSELECP */
22#define Ctrl_HNibRead 0 20#define Ctrl_HNibRead 0
23#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */ 21#define Ctrl_LNibWrite 0x08 /* LP_PSELECP */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 927aa33d4349..ed96f309bca8 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -78,24 +78,18 @@ static const int multicast_filter_limit = 32;
78 78
79#define MAX_READ_REQUEST_SHIFT 12 79#define MAX_READ_REQUEST_SHIFT 12
80#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ 80#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
82#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ 81#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 82
84#define R8169_REGS_SIZE 256 83#define R8169_REGS_SIZE 256
85#define R8169_NAPI_WEIGHT 64 84#define R8169_NAPI_WEIGHT 64
86#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ 85#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
87#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */ 86#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
88#define RX_BUF_SIZE 1536 /* Rx Buffer size */
89#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) 87#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
90#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) 88#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
91 89
92#define RTL8169_TX_TIMEOUT (6*HZ) 90#define RTL8169_TX_TIMEOUT (6*HZ)
93#define RTL8169_PHY_TIMEOUT (10*HZ) 91#define RTL8169_PHY_TIMEOUT (10*HZ)
94 92
95#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
96#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
97#define RTL_EEPROM_SIG_ADDR 0x0000
98
99/* write/read MMIO register */ 93/* write/read MMIO register */
100#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) 94#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
101#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) 95#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
@@ -456,6 +450,7 @@ enum rtl8168_registers {
456#define PWM_EN (1 << 22) 450#define PWM_EN (1 << 22)
457#define RXDV_GATED_EN (1 << 19) 451#define RXDV_GATED_EN (1 << 19)
458#define EARLY_TALLY_EN (1 << 16) 452#define EARLY_TALLY_EN (1 << 16)
453#define FORCE_CLK (1 << 15) /* force clock request */
459}; 454};
460 455
461enum rtl_register_content { 456enum rtl_register_content {
@@ -519,6 +514,7 @@ enum rtl_register_content {
519 PMEnable = (1 << 0), /* Power Management Enable */ 514 PMEnable = (1 << 0), /* Power Management Enable */
520 515
521 /* Config2 register p. 25 */ 516 /* Config2 register p. 25 */
517 ClkReqEn = (1 << 7), /* Clock Request Enable */
522 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ 518 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
523 PCI_Clock_66MHz = 0x01, 519 PCI_Clock_66MHz = 0x01,
524 PCI_Clock_33MHz = 0x00, 520 PCI_Clock_33MHz = 0x00,
@@ -539,6 +535,7 @@ enum rtl_register_content {
539 Spi_en = (1 << 3), 535 Spi_en = (1 << 3),
540 LanWake = (1 << 1), /* LanWake enable/disable */ 536 LanWake = (1 << 1), /* LanWake enable/disable */
541 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ 537 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 ASPM_en = (1 << 0), /* ASPM enable */
542 539
543 /* TBICSR p.28 */ 540 /* TBICSR p.28 */
544 TBIReset = 0x80000000, 541 TBIReset = 0x80000000,
@@ -687,6 +684,7 @@ enum features {
687 RTL_FEATURE_WOL = (1 << 0), 684 RTL_FEATURE_WOL = (1 << 0),
688 RTL_FEATURE_MSI = (1 << 1), 685 RTL_FEATURE_MSI = (1 << 1),
689 RTL_FEATURE_GMII = (1 << 2), 686 RTL_FEATURE_GMII = (1 << 2),
687 RTL_FEATURE_FW_LOADED = (1 << 3),
690}; 688};
691 689
692struct rtl8169_counters { 690struct rtl8169_counters {
@@ -1816,8 +1814,7 @@ static int rtl8169_set_features(struct net_device *dev,
1816} 1814}
1817 1815
1818 1816
1819static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, 1817static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1820 struct sk_buff *skb)
1821{ 1818{
1822 return (vlan_tx_tag_present(skb)) ? 1819 return (vlan_tx_tag_present(skb)) ?
1823 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; 1820 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
@@ -2394,8 +2391,10 @@ static void rtl_apply_firmware(struct rtl8169_private *tp)
2394 struct rtl_fw *rtl_fw = tp->rtl_fw; 2391 struct rtl_fw *rtl_fw = tp->rtl_fw;
2395 2392
2396 /* TODO: release firmware once rtl_phy_write_fw signals failures. */ 2393 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2397 if (!IS_ERR_OR_NULL(rtl_fw)) 2394 if (!IS_ERR_OR_NULL(rtl_fw)) {
2398 rtl_phy_write_fw(tp, rtl_fw); 2395 rtl_phy_write_fw(tp, rtl_fw);
2396 tp->features |= RTL_FEATURE_FW_LOADED;
2397 }
2399} 2398}
2400 2399
2401static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) 2400static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
@@ -2406,6 +2405,31 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2406 rtl_apply_firmware(tp); 2405 rtl_apply_firmware(tp);
2407} 2406}
2408 2407
2408static void r810x_aldps_disable(struct rtl8169_private *tp)
2409{
2410 rtl_writephy(tp, 0x1f, 0x0000);
2411 rtl_writephy(tp, 0x18, 0x0310);
2412 msleep(100);
2413}
2414
2415static void r810x_aldps_enable(struct rtl8169_private *tp)
2416{
2417 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2418 return;
2419
2420 rtl_writephy(tp, 0x1f, 0x0000);
2421 rtl_writephy(tp, 0x18, 0x8310);
2422}
2423
2424static void r8168_aldps_enable_1(struct rtl8169_private *tp)
2425{
2426 if (!(tp->features & RTL_FEATURE_FW_LOADED))
2427 return;
2428
2429 rtl_writephy(tp, 0x1f, 0x0000);
2430 rtl_w1w0_phy(tp, 0x15, 0x1000, 0x0000);
2431}
2432
2409static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) 2433static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2410{ 2434{
2411 static const struct phy_reg phy_reg_init[] = { 2435 static const struct phy_reg phy_reg_init[] = {
@@ -3096,6 +3120,23 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3096 rtl_writephy(tp, 0x0d, 0x0000); 3120 rtl_writephy(tp, 0x0d, 0x0000);
3097} 3121}
3098 3122
3123static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3124{
3125 const u16 w[] = {
3126 addr[0] | (addr[1] << 8),
3127 addr[2] | (addr[3] << 8),
3128 addr[4] | (addr[5] << 8)
3129 };
3130 const struct exgmac_reg e[] = {
3131 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3132 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3133 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3134 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3135 };
3136
3137 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3138}
3139
3099static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) 3140static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3100{ 3141{
3101 static const struct phy_reg phy_reg_init[] = { 3142 static const struct phy_reg phy_reg_init[] = {
@@ -3178,6 +3219,11 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3178 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); 3219 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3179 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); 3220 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3180 rtl_writephy(tp, 0x1f, 0x0000); 3221 rtl_writephy(tp, 0x1f, 0x0000);
3222
3223 r8168_aldps_enable_1(tp);
3224
3225 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3226 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3181} 3227}
3182 3228
3183static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) 3229static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
@@ -3250,6 +3296,8 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3250 rtl_writephy(tp, 0x05, 0x8b85); 3296 rtl_writephy(tp, 0x05, 0x8b85);
3251 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); 3297 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3252 rtl_writephy(tp, 0x1f, 0x0000); 3298 rtl_writephy(tp, 0x1f, 0x0000);
3299
3300 r8168_aldps_enable_1(tp);
3253} 3301}
3254 3302
3255static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) 3303static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3257,6 +3305,8 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3257 rtl_apply_firmware(tp); 3305 rtl_apply_firmware(tp);
3258 3306
3259 rtl8168f_hw_phy_config(tp); 3307 rtl8168f_hw_phy_config(tp);
3308
3309 r8168_aldps_enable_1(tp);
3260} 3310}
3261 3311
3262static void rtl8411_hw_phy_config(struct rtl8169_private *tp) 3312static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
@@ -3354,6 +3404,8 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3354 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); 3404 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3355 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); 3405 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3356 rtl_writephy(tp, 0x1f, 0x0000); 3406 rtl_writephy(tp, 0x1f, 0x0000);
3407
3408 r8168_aldps_enable_1(tp);
3357} 3409}
3358 3410
3359static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) 3411static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
@@ -3439,21 +3491,19 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3439 }; 3491 };
3440 3492
3441 /* Disable ALDPS before ram code */ 3493 /* Disable ALDPS before ram code */
3442 rtl_writephy(tp, 0x1f, 0x0000); 3494 r810x_aldps_disable(tp);
3443 rtl_writephy(tp, 0x18, 0x0310);
3444 msleep(100);
3445 3495
3446 rtl_apply_firmware(tp); 3496 rtl_apply_firmware(tp);
3447 3497
3448 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3498 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3499
3500 r810x_aldps_enable(tp);
3449} 3501}
3450 3502
3451static void rtl8402_hw_phy_config(struct rtl8169_private *tp) 3503static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3452{ 3504{
3453 /* Disable ALDPS before setting firmware */ 3505 /* Disable ALDPS before setting firmware */
3454 rtl_writephy(tp, 0x1f, 0x0000); 3506 r810x_aldps_disable(tp);
3455 rtl_writephy(tp, 0x18, 0x0310);
3456 msleep(20);
3457 3507
3458 rtl_apply_firmware(tp); 3508 rtl_apply_firmware(tp);
3459 3509
@@ -3463,6 +3513,8 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3463 rtl_writephy(tp, 0x10, 0x401f); 3513 rtl_writephy(tp, 0x10, 0x401f);
3464 rtl_writephy(tp, 0x19, 0x7030); 3514 rtl_writephy(tp, 0x19, 0x7030);
3465 rtl_writephy(tp, 0x1f, 0x0000); 3515 rtl_writephy(tp, 0x1f, 0x0000);
3516
3517 r810x_aldps_enable(tp);
3466} 3518}
3467 3519
3468static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) 3520static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
@@ -3475,9 +3527,7 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3475 }; 3527 };
3476 3528
3477 /* Disable ALDPS before ram code */ 3529 /* Disable ALDPS before ram code */
3478 rtl_writephy(tp, 0x1f, 0x0000); 3530 r810x_aldps_disable(tp);
3479 rtl_writephy(tp, 0x18, 0x0310);
3480 msleep(100);
3481 3531
3482 rtl_apply_firmware(tp); 3532 rtl_apply_firmware(tp);
3483 3533
@@ -3485,6 +3535,8 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3485 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3535 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3486 3536
3487 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 3537 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3538
3539 r810x_aldps_enable(tp);
3488} 3540}
3489 3541
3490static void rtl_hw_phy_config(struct net_device *dev) 3542static void rtl_hw_phy_config(struct net_device *dev)
@@ -3708,33 +3760,19 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3708static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) 3760static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3709{ 3761{
3710 void __iomem *ioaddr = tp->mmio_addr; 3762 void __iomem *ioaddr = tp->mmio_addr;
3711 u32 high;
3712 u32 low;
3713
3714 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
3715 high = addr[4] | (addr[5] << 8);
3716 3763
3717 rtl_lock_work(tp); 3764 rtl_lock_work(tp);
3718 3765
3719 RTL_W8(Cfg9346, Cfg9346_Unlock); 3766 RTL_W8(Cfg9346, Cfg9346_Unlock);
3720 3767
3721 RTL_W32(MAC4, high); 3768 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3722 RTL_R32(MAC4); 3769 RTL_R32(MAC4);
3723 3770
3724 RTL_W32(MAC0, low); 3771 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3725 RTL_R32(MAC0); 3772 RTL_R32(MAC0);
3726 3773
3727 if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 3774 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3728 const struct exgmac_reg e[] = { 3775 rtl_rar_exgmac_set(tp, addr);
3729 { .addr = 0xe0, ERIAR_MASK_1111, .val = low },
3730 { .addr = 0xe4, ERIAR_MASK_1111, .val = high },
3731 { .addr = 0xf0, ERIAR_MASK_1111, .val = low << 16 },
3732 { .addr = 0xf4, ERIAR_MASK_1111, .val = high << 16 |
3733 low >> 16 },
3734 };
3735
3736 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3737 }
3738 3776
3739 RTL_W8(Cfg9346, Cfg9346_Lock); 3777 RTL_W8(Cfg9346, Cfg9346_Lock);
3740 3778
@@ -3796,7 +3834,7 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3796 } 3834 }
3797} 3835}
3798 3836
3799static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) 3837static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3800{ 3838{
3801 struct mdio_ops *ops = &tp->mdio_ops; 3839 struct mdio_ops *ops = &tp->mdio_ops;
3802 3840
@@ -4048,7 +4086,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
4048 rtl_generic_op(tp, tp->pll_power_ops.up); 4086 rtl_generic_op(tp, tp->pll_power_ops.up);
4049} 4087}
4050 4088
4051static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) 4089static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4052{ 4090{
4053 struct pll_power_ops *ops = &tp->pll_power_ops; 4091 struct pll_power_ops *ops = &tp->pll_power_ops;
4054 4092
@@ -4242,7 +4280,7 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4242 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); 4280 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4243} 4281}
4244 4282
4245static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp) 4283static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4246{ 4284{
4247 struct jumbo_ops *ops = &tp->jumbo_ops; 4285 struct jumbo_ops *ops = &tp->jumbo_ops;
4248 4286
@@ -4683,7 +4721,7 @@ static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4683 RTL_R32(CSIDR) : ~0; 4721 RTL_R32(CSIDR) : ~0;
4684} 4722}
4685 4723
4686static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp) 4724static void rtl_init_csi_ops(struct rtl8169_private *tp)
4687{ 4725{
4688 struct csi_ops *ops = &tp->csi_ops; 4726 struct csi_ops *ops = &tp->csi_ops;
4689 4727
@@ -5015,8 +5053,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5015 5053
5016 RTL_W8(MaxTxPacketSize, EarlySize); 5054 RTL_W8(MaxTxPacketSize, EarlySize);
5017 5055
5018 rtl_disable_clock_request(pdev);
5019
5020 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5056 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5021 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5057 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5022 5058
@@ -5025,7 +5061,8 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5025 5061
5026 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5062 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5027 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); 5063 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5028 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 5064 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5065 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5029} 5066}
5030 5067
5031static void rtl_hw_start_8168f(struct rtl8169_private *tp) 5068static void rtl_hw_start_8168f(struct rtl8169_private *tp)
@@ -5050,13 +5087,12 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5050 5087
5051 RTL_W8(MaxTxPacketSize, EarlySize); 5088 RTL_W8(MaxTxPacketSize, EarlySize);
5052 5089
5053 rtl_disable_clock_request(pdev);
5054
5055 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5090 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5056 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5091 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5057 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5092 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5058 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); 5093 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN | FORCE_CLK);
5059 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 5094 RTL_W8(Config5, (RTL_R8(Config5) & ~Spi_en) | ASPM_en);
5095 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5060} 5096}
5061 5097
5062static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) 5098static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
@@ -5113,8 +5149,10 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5113 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5149 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5114 5150
5115 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5151 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5116 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); 5152 RTL_W32(MISC, (RTL_R32(MISC) | FORCE_CLK) & ~RXDV_GATED_EN);
5117 RTL_W8(MaxTxPacketSize, EarlySize); 5153 RTL_W8(MaxTxPacketSize, EarlySize);
5154 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5155 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5118 5156
5119 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5157 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5120 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5158 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
@@ -5330,6 +5368,9 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5330 5368
5331 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5369 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5332 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5370 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5371 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5372 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5373 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5333 5374
5334 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5375 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5335} 5376}
@@ -5355,6 +5396,9 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5355 5396
5356 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 5397 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5357 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 5398 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5399 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5400 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5401 RTL_W32(MISC, RTL_R32(MISC) | FORCE_CLK);
5358 5402
5359 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); 5403 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5360 5404
@@ -5376,7 +5420,10 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5376 /* Force LAN exit from ASPM if Rx/Tx are not idle */ 5420 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5377 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); 5421 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5378 5422
5379 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); 5423 RTL_W32(MISC,
5424 (RTL_R32(MISC) | DISABLE_LAN_EN | FORCE_CLK) & ~EARLY_TALLY_EN);
5425 RTL_W8(Config5, RTL_R8(Config5) | ASPM_en);
5426 RTL_W8(Config2, RTL_R8(Config2) | ClkReqEn);
5380 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5427 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5381 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); 5428 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5382} 5429}
@@ -5774,7 +5821,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5774 tp->tx_skb[entry].len = len; 5821 tp->tx_skb[entry].len = len;
5775 txd->addr = cpu_to_le64(mapping); 5822 txd->addr = cpu_to_le64(mapping);
5776 5823
5777 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb)); 5824 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5778 opts[0] = DescOwn; 5825 opts[0] = DescOwn;
5779 5826
5780 rtl8169_tso_csum(tp, skb, opts); 5827 rtl8169_tso_csum(tp, skb, opts);
@@ -6569,7 +6616,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
6569 pm_runtime_put_noidle(d); 6616 pm_runtime_put_noidle(d);
6570} 6617}
6571 6618
6572static void __devexit rtl_remove_one(struct pci_dev *pdev) 6619static void rtl_remove_one(struct pci_dev *pdev)
6573{ 6620{
6574 struct net_device *dev = pci_get_drvdata(pdev); 6621 struct net_device *dev = pci_get_drvdata(pdev);
6575 struct rtl8169_private *tp = netdev_priv(dev); 6622 struct rtl8169_private *tp = netdev_priv(dev);
@@ -6689,7 +6736,7 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6689 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY; 6736 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6690} 6737}
6691 6738
6692static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp) 6739static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6693{ 6740{
6694 void __iomem *ioaddr = tp->mmio_addr; 6741 void __iomem *ioaddr = tp->mmio_addr;
6695 u32 data; 6742 u32 data;
@@ -6723,7 +6770,7 @@ static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
6723 return; 6770 return;
6724} 6771}
6725 6772
6726static void __devinit rtl_hw_initialize(struct rtl8169_private *tp) 6773static void rtl_hw_initialize(struct rtl8169_private *tp)
6727{ 6774{
6728 switch (tp->mac_version) { 6775 switch (tp->mac_version) {
6729 case RTL_GIGA_MAC_VER_40: 6776 case RTL_GIGA_MAC_VER_40:
@@ -6736,7 +6783,7 @@ static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
6736 } 6783 }
6737} 6784}
6738 6785
6739static int __devinit 6786static int
6740rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6787rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6741{ 6788{
6742 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; 6789 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -6987,20 +7034,9 @@ static struct pci_driver rtl8169_pci_driver = {
6987 .name = MODULENAME, 7034 .name = MODULENAME,
6988 .id_table = rtl8169_pci_tbl, 7035 .id_table = rtl8169_pci_tbl,
6989 .probe = rtl_init_one, 7036 .probe = rtl_init_one,
6990 .remove = __devexit_p(rtl_remove_one), 7037 .remove = rtl_remove_one,
6991 .shutdown = rtl_shutdown, 7038 .shutdown = rtl_shutdown,
6992 .driver.pm = RTL8169_PM_OPS, 7039 .driver.pm = RTL8169_PM_OPS,
6993}; 7040};
6994 7041
6995static int __init rtl8169_init_module(void) 7042module_pci_driver(rtl8169_pci_driver);
6996{
6997 return pci_register_driver(&rtl8169_pci_driver);
6998}
6999
7000static void __exit rtl8169_cleanup_module(void)
7001{
7002 pci_unregister_driver(&rtl8169_pci_driver);
7003}
7004
7005module_init(rtl8169_init_module);
7006module_exit(rtl8169_cleanup_module);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c8bfea0524dd..3d705862bd7d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2286,7 +2286,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2286 for (i = 0; i < PHY_MAX_ADDR; i++) 2286 for (i = 0; i < PHY_MAX_ADDR; i++)
2287 mdp->mii_bus->irq[i] = PHY_POLL; 2287 mdp->mii_bus->irq[i] = PHY_POLL;
2288 2288
2289 /* regist mdio bus */ 2289 /* register mdio bus */
2290 ret = mdiobus_register(mdp->mii_bus); 2290 ret = mdiobus_register(mdp->mii_bus);
2291 if (ret) 2291 if (ret)
2292 goto out_free_irq; 2292 goto out_free_irq;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 2ed3ab4b3c2d..72fc57dd084d 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -954,7 +954,7 @@ static struct net_device_stats *s6gmac_stats(struct net_device *dev)
954 return st; 954 return st;
955} 955}
956 956
957static int __devinit s6gmac_probe(struct platform_device *pdev) 957static int s6gmac_probe(struct platform_device *pdev)
958{ 958{
959 struct net_device *dev; 959 struct net_device *dev;
960 struct s6gmac *pd; 960 struct s6gmac *pd;
@@ -1030,7 +1030,7 @@ errirq:
1030 return res; 1030 return res;
1031} 1031}
1032 1032
1033static int __devexit s6gmac_remove(struct platform_device *pdev) 1033static int s6gmac_remove(struct platform_device *pdev)
1034{ 1034{
1035 struct net_device *dev = platform_get_drvdata(pdev); 1035 struct net_device *dev = platform_get_drvdata(pdev);
1036 if (dev) { 1036 if (dev) {
@@ -1046,7 +1046,7 @@ static int __devexit s6gmac_remove(struct platform_device *pdev)
1046 1046
1047static struct platform_driver s6gmac_driver = { 1047static struct platform_driver s6gmac_driver = {
1048 .probe = s6gmac_probe, 1048 .probe = s6gmac_probe,
1049 .remove = __devexit_p(s6gmac_remove), 1049 .remove = s6gmac_remove,
1050 .driver = { 1050 .driver = {
1051 .name = "s6gmac", 1051 .name = "s6gmac",
1052 .owner = THIS_MODULE, 1052 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 6a40dd03a32f..3aca57853ed4 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -67,7 +67,7 @@
67#include <asm/ecard.h> 67#include <asm/ecard.h>
68#include <asm/io.h> 68#include <asm/io.h>
69 69
70static char version[] __devinitdata = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n"; 70static char version[] = "ether3 ethernet driver (c) 1995-2000 R.M.King v1.17\n";
71 71
72#include "ether3.h" 72#include "ether3.h"
73 73
@@ -194,7 +194,7 @@ static inline void ether3_ledon(struct net_device *dev)
194 * Read the ethernet address string from the on board rom. 194 * Read the ethernet address string from the on board rom.
195 * This is an ascii string!!! 195 * This is an ascii string!!!
196 */ 196 */
197static int __devinit 197static int
198ether3_addr(char *addr, struct expansion_card *ec) 198ether3_addr(char *addr, struct expansion_card *ec)
199{ 199{
200 struct in_chunk_dir cd; 200 struct in_chunk_dir cd;
@@ -219,7 +219,7 @@ ether3_addr(char *addr, struct expansion_card *ec)
219 219
220/* --------------------------------------------------------------------------- */ 220/* --------------------------------------------------------------------------- */
221 221
222static int __devinit 222static int
223ether3_ramtest(struct net_device *dev, unsigned char byte) 223ether3_ramtest(struct net_device *dev, unsigned char byte)
224{ 224{
225 unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL); 225 unsigned char *buffer = kmalloc(RX_END, GFP_KERNEL);
@@ -268,7 +268,7 @@ ether3_ramtest(struct net_device *dev, unsigned char byte)
268 268
269/* ------------------------------------------------------------------------------- */ 269/* ------------------------------------------------------------------------------- */
270 270
271static int __devinit ether3_init_2(struct net_device *dev) 271static int ether3_init_2(struct net_device *dev)
272{ 272{
273 int i; 273 int i;
274 274
@@ -399,12 +399,6 @@ ether3_probe_bus_16(struct net_device *dev, int val)
399static int 399static int
400ether3_open(struct net_device *dev) 400ether3_open(struct net_device *dev)
401{ 401{
402 if (!is_valid_ether_addr(dev->dev_addr)) {
403 printk(KERN_WARNING "%s: invalid ethernet MAC address\n",
404 dev->name);
405 return -EINVAL;
406 }
407
408 if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev)) 402 if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
409 return -EAGAIN; 403 return -EAGAIN;
410 404
@@ -748,7 +742,7 @@ static void ether3_tx(struct net_device *dev)
748 } 742 }
749} 743}
750 744
751static void __devinit ether3_banner(void) 745static void ether3_banner(void)
752{ 746{
753 static unsigned version_printed = 0; 747 static unsigned version_printed = 0;
754 748
@@ -767,7 +761,7 @@ static const struct net_device_ops ether3_netdev_ops = {
767 .ndo_set_mac_address = eth_mac_addr, 761 .ndo_set_mac_address = eth_mac_addr,
768}; 762};
769 763
770static int __devinit 764static int
771ether3_probe(struct expansion_card *ec, const struct ecard_id *id) 765ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
772{ 766{
773 const struct ether3_data *data = id->data; 767 const struct ether3_data *data = id->data;
@@ -864,7 +858,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
864 return ret; 858 return ret;
865} 859}
866 860
867static void __devexit ether3_remove(struct expansion_card *ec) 861static void ether3_remove(struct expansion_card *ec)
868{ 862{
869 struct net_device *dev = ecard_get_drvdata(ec); 863 struct net_device *dev = ecard_get_drvdata(ec);
870 864
@@ -894,7 +888,7 @@ static const struct ecard_id ether3_ids[] = {
894 888
895static struct ecard_driver ether3_driver = { 889static struct ecard_driver ether3_driver = {
896 .probe = ether3_probe, 890 .probe = ether3_probe,
897 .remove = __devexit_p(ether3_remove), 891 .remove = ether3_remove,
898 .id_table = ether3_ids, 892 .id_table = ether3_ids,
899 .drv = { 893 .drv = {
900 .name = "ether3", 894 .name = "ether3",
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 4d15bf413bdc..0fde9ca28269 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -721,7 +721,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
721 .ndo_validate_addr = eth_validate_addr, 721 .ndo_validate_addr = eth_validate_addr,
722}; 722};
723 723
724static int __devinit sgiseeq_probe(struct platform_device *pdev) 724static int sgiseeq_probe(struct platform_device *pdev)
725{ 725{
726 struct sgiseeq_platform_data *pd = pdev->dev.platform_data; 726 struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
727 struct hpc3_regs *hpcregs = pd->hpc; 727 struct hpc3_regs *hpcregs = pd->hpc;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 25906c1d1b15..435b4f1e3488 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,10 +1,11 @@
1config SFC 1config SFC
2 tristate "Solarflare SFC4000/SFC9000-family support" 2 tristate "Solarflare SFC4000/SFC9000-family support"
3 depends on PCI && INET 3 depends on PCI
4 select MDIO 4 select MDIO
5 select CRC32 5 select CRC32
6 select I2C 6 select I2C
7 select I2C_ALGOBIT 7 select I2C_ALGOBIT
8 select PTP_1588_CLOCK
8 ---help--- 9 ---help---
9 This driver supports 10-gigabit Ethernet cards based on 10 This driver supports 10-gigabit Ethernet cards based on
10 the Solarflare SFC4000 and SFC9000-family controllers. 11 the Solarflare SFC4000 and SFC9000-family controllers.
@@ -34,10 +35,3 @@ config SFC_SRIOV
34 This enables support for the SFC9000 I/O Virtualization 35 This enables support for the SFC9000 I/O Virtualization
35 features, allowing accelerated network performance in 36 features, allowing accelerated network performance in
36 virtualized environments. 37 virtualized environments.
37config SFC_PTP
38 bool "Solarflare SFC9000-family PTP support"
39 depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
40 default y
41 ---help---
42 This enables support for the Precision Time Protocol (PTP)
43 on SFC9000-family NICs
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index e11f2ecf69d9..945bf06e69ef 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -2,9 +2,8 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
2 falcon_xmac.o mcdi_mac.o \ 2 falcon_xmac.o mcdi_mac.o \
3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ 3 selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
4 tenxpress.o txc43128_phy.o falcon_boards.o \ 4 tenxpress.o txc43128_phy.o falcon_boards.o \
5 mcdi.o mcdi_phy.o mcdi_mon.o 5 mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
6sfc-$(CONFIG_SFC_MTD) += mtd.o 6sfc-$(CONFIG_SFC_MTD) += mtd.o
7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o 7sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
8sfc-$(CONFIG_SFC_PTP) += ptp.o
9 8
10obj-$(CONFIG_SFC) += sfc.o 9obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 4f86d0cd516a..bf57b3cb16ab 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -106,8 +106,8 @@ static struct workqueue_struct *reset_workqueue;
106 * 106 *
107 * This is only used in MSI-X interrupt mode 107 * This is only used in MSI-X interrupt mode
108 */ 108 */
109static unsigned int separate_tx_channels; 109static bool separate_tx_channels;
110module_param(separate_tx_channels, uint, 0444); 110module_param(separate_tx_channels, bool, 0444);
111MODULE_PARM_DESC(separate_tx_channels, 111MODULE_PARM_DESC(separate_tx_channels,
112 "Use separate channels for TX and RX"); 112 "Use separate channels for TX and RX");
113 113
@@ -160,8 +160,8 @@ static unsigned int rss_cpus;
160module_param(rss_cpus, uint, 0444); 160module_param(rss_cpus, uint, 0444);
161MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); 161MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
162 162
163static int phy_flash_cfg; 163static bool phy_flash_cfg;
164module_param(phy_flash_cfg, int, 0644); 164module_param(phy_flash_cfg, bool, 0644);
165MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); 165MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
166 166
167static unsigned irq_adapt_low_thresh = 8000; 167static unsigned irq_adapt_low_thresh = 8000;
@@ -2279,7 +2279,7 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
2279 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", 2279 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2280 RESET_TYPE(method)); 2280 RESET_TYPE(method));
2281 2281
2282 netif_device_detach(efx->net_dev); 2282 efx_device_detach_sync(efx);
2283 efx_reset_down(efx, method); 2283 efx_reset_down(efx, method);
2284 2284
2285 rc = efx->type->reset(efx, method); 2285 rc = efx->type->reset(efx, method);
@@ -2669,8 +2669,8 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2669 * transmission; this is left to the first time one of the network 2669 * transmission; this is left to the first time one of the network
2670 * interfaces is brought up (i.e. efx_net_open). 2670 * interfaces is brought up (i.e. efx_net_open).
2671 */ 2671 */
2672static int __devinit efx_pci_probe(struct pci_dev *pci_dev, 2672static int efx_pci_probe(struct pci_dev *pci_dev,
2673 const struct pci_device_id *entry) 2673 const struct pci_device_id *entry)
2674{ 2674{
2675 struct net_device *net_dev; 2675 struct net_device *net_dev;
2676 struct efx_nic *efx; 2676 struct efx_nic *efx;
@@ -2758,7 +2758,7 @@ static int efx_pm_freeze(struct device *dev)
2758 if (efx->state != STATE_DISABLED) { 2758 if (efx->state != STATE_DISABLED) {
2759 efx->state = STATE_UNINIT; 2759 efx->state = STATE_UNINIT;
2760 2760
2761 netif_device_detach(efx->net_dev); 2761 efx_device_detach_sync(efx);
2762 2762
2763 efx_stop_all(efx); 2763 efx_stop_all(efx);
2764 efx_stop_interrupts(efx, false); 2764 efx_stop_interrupts(efx, false);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index f11170bc48bf..50247dfe8f57 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -163,4 +163,17 @@ extern void efx_link_status_changed(struct efx_nic *efx);
163extern void efx_link_set_advertising(struct efx_nic *efx, u32); 163extern void efx_link_set_advertising(struct efx_nic *efx, u32);
164extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); 164extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
165 165
166static inline void efx_device_detach_sync(struct efx_nic *efx)
167{
168 struct net_device *dev = efx->net_dev;
169
170 /* Lock/freeze all TX queues so that we can be sure the
171 * TX scheduler is stopped when we're done and before
172 * netif_device_present() becomes false.
173 */
174 netif_tx_lock(dev);
175 netif_device_detach(dev);
176 netif_tx_unlock(dev);
177}
178
166#endif /* EFX_EFX_H */ 179#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 90f078eff8e6..8e61cd06f66a 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -816,6 +816,9 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
816/* MAC address mask including only MC flag */ 816/* MAC address mask including only MC flag */
817static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 817static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
818 818
819#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
820#define PORT_FULL_MASK ((__force __be16)~0)
821
819static int efx_ethtool_get_class_rule(struct efx_nic *efx, 822static int efx_ethtool_get_class_rule(struct efx_nic *efx,
820 struct ethtool_rx_flow_spec *rule) 823 struct ethtool_rx_flow_spec *rule)
821{ 824{
@@ -865,12 +868,12 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
865 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, 868 &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
866 &ip_entry->ip4src, &ip_entry->psrc); 869 &ip_entry->ip4src, &ip_entry->psrc);
867 EFX_WARN_ON_PARANOID(rc); 870 EFX_WARN_ON_PARANOID(rc);
868 ip_mask->ip4src = ~0; 871 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
869 ip_mask->psrc = ~0; 872 ip_mask->psrc = PORT_FULL_MASK;
870 } 873 }
871 rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW; 874 rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
872 ip_mask->ip4dst = ~0; 875 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
873 ip_mask->pdst = ~0; 876 ip_mask->pdst = PORT_FULL_MASK;
874 return rc; 877 return rc;
875} 878}
876 879
@@ -971,7 +974,7 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
971 974
972 /* Check for unsupported extensions */ 975 /* Check for unsupported extensions */
973 if ((rule->flow_type & FLOW_EXT) && 976 if ((rule->flow_type & FLOW_EXT) &&
974 (rule->m_ext.vlan_etype | rule->m_ext.data[0] | 977 (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
975 rule->m_ext.data[1])) 978 rule->m_ext.data[1]))
976 return -EINVAL; 979 return -EINVAL;
977 980
@@ -986,16 +989,16 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
986 IPPROTO_TCP : IPPROTO_UDP); 989 IPPROTO_TCP : IPPROTO_UDP);
987 990
988 /* Must match all of destination, */ 991 /* Must match all of destination, */
989 if ((__force u32)~ip_mask->ip4dst | 992 if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
990 (__force u16)~ip_mask->pdst) 993 ip_mask->pdst == PORT_FULL_MASK))
991 return -EINVAL; 994 return -EINVAL;
992 /* all or none of source, */ 995 /* all or none of source, */
993 if ((ip_mask->ip4src | ip_mask->psrc) && 996 if ((ip_mask->ip4src || ip_mask->psrc) &&
994 ((__force u32)~ip_mask->ip4src | 997 !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
995 (__force u16)~ip_mask->psrc)) 998 ip_mask->psrc == PORT_FULL_MASK))
996 return -EINVAL; 999 return -EINVAL;
997 /* and nothing else */ 1000 /* and nothing else */
998 if (ip_mask->tos | rule->m_ext.vlan_tci) 1001 if (ip_mask->tos || rule->m_ext.vlan_tci)
999 return -EINVAL; 1002 return -EINVAL;
1000 1003
1001 if (ip_mask->ip4src) 1004 if (ip_mask->ip4src)
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 12b573a8e82b..49bcd196e10d 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1792,6 +1792,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
1792 .remove_port = falcon_remove_port, 1792 .remove_port = falcon_remove_port,
1793 .handle_global_event = falcon_handle_global_event, 1793 .handle_global_event = falcon_handle_global_event,
1794 .prepare_flush = falcon_prepare_flush, 1794 .prepare_flush = falcon_prepare_flush,
1795 .finish_flush = efx_port_dummy_op_void,
1795 .update_stats = falcon_update_nic_stats, 1796 .update_stats = falcon_update_nic_stats,
1796 .start_stats = falcon_start_nic_stats, 1797 .start_stats = falcon_start_nic_stats,
1797 .stop_stats = falcon_stop_nic_stats, 1798 .stop_stats = falcon_stop_nic_stats,
@@ -1834,6 +1835,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1834 .remove_port = falcon_remove_port, 1835 .remove_port = falcon_remove_port,
1835 .handle_global_event = falcon_handle_global_event, 1836 .handle_global_event = falcon_handle_global_event,
1836 .prepare_flush = falcon_prepare_flush, 1837 .prepare_flush = falcon_prepare_flush,
1838 .finish_flush = efx_port_dummy_op_void,
1837 .update_stats = falcon_update_nic_stats, 1839 .update_stats = falcon_update_nic_stats,
1838 .start_stats = falcon_start_nic_stats, 1840 .start_stats = falcon_start_nic_stats,
1839 .stop_stats = falcon_stop_nic_stats, 1841 .stop_stats = falcon_stop_nic_stats,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 751d1ec112cc..96759aee1c6c 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -22,22 +22,21 @@
22 * 22 *
23 * Notes on locking strategy: 23 * Notes on locking strategy:
24 * 24 *
25 * Most CSRs are 128-bit (oword) and therefore cannot be read or 25 * Many CSRs are very wide and cannot be read or written atomically.
26 * written atomically. Access from the host is buffered by the Bus 26 * Writes from the host are buffered by the Bus Interface Unit (BIU)
27 * Interface Unit (BIU). Whenever the host reads from the lowest 27 * up to 128 bits. Whenever the host writes part of such a register,
28 * address of such a register, or from the address of a different such 28 * the BIU collects the written value and does not write to the
29 * register, the BIU latches the register's value. Subsequent reads 29 * underlying register until all 4 dwords have been written. A
30 * from higher addresses of the same register will read the latched 30 * similar buffering scheme applies to host access to the NIC's 64-bit
31 * value. Whenever the host writes part of such a register, the BIU 31 * SRAM.
32 * collects the written value and does not write to the underlying
33 * register until all 4 dwords have been written. A similar buffering
34 * scheme applies to host access to the NIC's 64-bit SRAM.
35 * 32 *
36 * Access to different CSRs and 64-bit SRAM words must be serialised, 33 * Writes to different CSRs and 64-bit SRAM words must be serialised,
37 * since interleaved access can result in lost writes or lost 34 * since interleaved access can result in lost writes. We use
38 * information from read-to-clear fields. We use efx_nic::biu_lock 35 * efx_nic::biu_lock for this.
39 * for this. (We could use separate locks for read and write, but 36 *
40 * this is not normally a performance bottleneck.) 37 * We also serialise reads from 128-bit CSRs and SRAM with the same
38 * spinlock. This may not be necessary, but it doesn't really matter
39 * as there are no such reads on the fast path.
41 * 40 *
42 * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are 41 * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
43 * 128-bit but are special-cased in the BIU to avoid the need for 42 * 128-bit but are special-cased in the BIU to avoid the need for
@@ -204,20 +203,6 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
204 efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); 203 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
205} 204}
206 205
207/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
208static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
209 unsigned int reg, unsigned int index)
210{
211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
212}
213
214/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
215static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
216 unsigned int reg, unsigned int index)
217{
218 efx_readd(efx, value, reg + index * sizeof(efx_dword_t));
219}
220
221/* Page-mapped register block size */ 206/* Page-mapped register block size */
222#define EFX_PAGE_BLOCK_SIZE 0x2000 207#define EFX_PAGE_BLOCK_SIZE 0x2000
223 208
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index aea43cbd0520..0095ce95150b 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -22,7 +22,7 @@
22 ************************************************************************** 22 **************************************************************************
23 */ 23 */
24 24
25#define MCDI_RPC_TIMEOUT 10 /*seconds */ 25#define MCDI_RPC_TIMEOUT (10 * HZ)
26 26
27#define MCDI_PDU(efx) \ 27#define MCDI_PDU(efx) \
28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST) 28 (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
120static int efx_mcdi_poll(struct efx_nic *efx) 120static int efx_mcdi_poll(struct efx_nic *efx)
121{ 121{
122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx); 122 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
123 unsigned int time, finish; 123 unsigned long time, finish;
124 unsigned int respseq, respcmd, error; 124 unsigned int respseq, respcmd, error;
125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); 125 unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
126 unsigned int rc, spins; 126 unsigned int rc, spins;
@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
136 * and poll once a jiffy (approximately) 136 * and poll once a jiffy (approximately)
137 */ 137 */
138 spins = TICK_USEC; 138 spins = TICK_USEC;
139 finish = get_seconds() + MCDI_RPC_TIMEOUT; 139 finish = jiffies + MCDI_RPC_TIMEOUT;
140 140
141 while (1) { 141 while (1) {
142 if (spins != 0) { 142 if (spins != 0) {
@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
146 schedule_timeout_uninterruptible(1); 146 schedule_timeout_uninterruptible(1);
147 } 147 }
148 148
149 time = get_seconds(); 149 time = jiffies;
150 150
151 rmb(); 151 rmb();
152 efx_readd(efx, &reg, pdu); 152 efx_readd(efx, &reg, pdu);
@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) 158 EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
159 break; 159 break;
160 160
161 if (time >= finish) 161 if (time_after(time, finish))
162 return -ETIMEDOUT; 162 return -ETIMEDOUT;
163 } 163 }
164 164
@@ -207,7 +207,9 @@ out:
207 return 0; 207 return 0;
208} 208}
209 209
210/* Test and clear MC-rebooted flag for this port/function */ 210/* Test and clear MC-rebooted flag for this port/function; reset
211 * software state as necessary.
212 */
211int efx_mcdi_poll_reboot(struct efx_nic *efx) 213int efx_mcdi_poll_reboot(struct efx_nic *efx)
212{ 214{
213 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); 215 unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
@@ -223,6 +225,11 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx)
223 if (value == 0) 225 if (value == 0)
224 return 0; 226 return 0;
225 227
228 /* MAC statistics have been cleared on the NIC; clear our copy
229 * so that efx_update_diff_stat() can continue to work.
230 */
231 memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
232
226 EFX_ZERO_DWORD(reg); 233 EFX_ZERO_DWORD(reg);
227 efx_writed(efx, &reg, addr); 234 efx_writed(efx, &reg, addr);
228 235
@@ -250,7 +257,7 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
250 if (wait_event_timeout( 257 if (wait_event_timeout(
251 mcdi->wq, 258 mcdi->wq,
252 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, 259 atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
253 msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) 260 MCDI_RPC_TIMEOUT) == 0)
254 return -ETIMEDOUT; 261 return -ETIMEDOUT;
255 262
256 /* Check if efx_mcdi_set_mode() switched us back to polled completions. 263 /* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -1216,7 +1223,7 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
1216 1223
1217 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid, 1224 rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
1218 count * sizeof(*qid), NULL, 0, NULL); 1225 count * sizeof(*qid), NULL, 0, NULL);
1219 WARN_ON(rc > 0); 1226 WARN_ON(rc < 0);
1220 1227
1221 kfree(qid); 1228 kfree(qid);
1222 1229
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 576a31091165..2d756c1d7142 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -200,6 +200,7 @@ struct efx_tx_queue {
200 /* Members shared between paths and sometimes updated */ 200 /* Members shared between paths and sometimes updated */
201 unsigned int empty_read_count ____cacheline_aligned_in_smp; 201 unsigned int empty_read_count ____cacheline_aligned_in_smp;
202#define EFX_EMPTY_COUNT_VALID 0x80000000 202#define EFX_EMPTY_COUNT_VALID 0x80000000
203 atomic_t flush_outstanding;
203}; 204};
204 205
205/** 206/**
@@ -868,9 +869,7 @@ struct efx_nic {
868 struct work_struct peer_work; 869 struct work_struct peer_work;
869#endif 870#endif
870 871
871#ifdef CONFIG_SFC_PTP
872 struct efx_ptp_data *ptp_data; 872 struct efx_ptp_data *ptp_data;
873#endif
874 873
875 /* The following fields may be written more often */ 874 /* The following fields may be written more often */
876 875
@@ -909,6 +908,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
909 * @remove_port: Free resources allocated by probe_port() 908 * @remove_port: Free resources allocated by probe_port()
910 * @handle_global_event: Handle a "global" event (may be %NULL) 909 * @handle_global_event: Handle a "global" event (may be %NULL)
911 * @prepare_flush: Prepare the hardware for flushing the DMA queues 910 * @prepare_flush: Prepare the hardware for flushing the DMA queues
911 * @finish_flush: Clean up after flushing the DMA queues
912 * @update_stats: Update statistics not provided by event handling 912 * @update_stats: Update statistics not provided by event handling
913 * @start_stats: Start the regular fetching of statistics 913 * @start_stats: Start the regular fetching of statistics
914 * @stop_stats: Stop the regular fetching of statistics 914 * @stop_stats: Stop the regular fetching of statistics
@@ -956,6 +956,7 @@ struct efx_nic_type {
956 void (*remove_port)(struct efx_nic *efx); 956 void (*remove_port)(struct efx_nic *efx);
957 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); 957 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
958 void (*prepare_flush)(struct efx_nic *efx); 958 void (*prepare_flush)(struct efx_nic *efx);
959 void (*finish_flush)(struct efx_nic *efx);
959 void (*update_stats)(struct efx_nic *efx); 960 void (*update_stats)(struct efx_nic *efx);
960 void (*start_stats)(struct efx_nic *efx); 961 void (*start_stats)(struct efx_nic *efx);
961 void (*stop_stats)(struct efx_nic *efx); 962 void (*stop_stats)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index aab7cacb2e34..0ad790cc473c 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -73,6 +73,8 @@
73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
74 (_tx_queue)->queue) 74 (_tx_queue)->queue)
75 75
76static void efx_magic_event(struct efx_channel *channel, u32 magic);
77
76/************************************************************************** 78/**************************************************************************
77 * 79 *
78 * Solarstorm hardware access 80 * Solarstorm hardware access
@@ -255,9 +257,6 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
255 buffer->entries = len / EFX_BUF_SIZE; 257 buffer->entries = len / EFX_BUF_SIZE;
256 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 258 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
257 259
258 /* All zeros is a potentially valid event so memset to 0xff */
259 memset(buffer->addr, 0xff, len);
260
261 /* Select new buffer ID */ 260 /* Select new buffer ID */
262 buffer->index = efx->next_buffer_table; 261 buffer->index = efx->next_buffer_table;
263 efx->next_buffer_table += buffer->entries; 262 efx->next_buffer_table += buffer->entries;
@@ -494,6 +493,9 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
494 struct efx_nic *efx = tx_queue->efx; 493 struct efx_nic *efx = tx_queue->efx;
495 efx_oword_t tx_flush_descq; 494 efx_oword_t tx_flush_descq;
496 495
496 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
497 atomic_set(&tx_queue->flush_outstanding, 1);
498
497 EFX_POPULATE_OWORD_2(tx_flush_descq, 499 EFX_POPULATE_OWORD_2(tx_flush_descq,
498 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 500 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
499 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 501 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
@@ -669,6 +671,47 @@ static bool efx_flush_wake(struct efx_nic *efx)
669 && atomic_read(&efx->rxq_flush_pending) > 0)); 671 && atomic_read(&efx->rxq_flush_pending) > 0));
670} 672}
671 673
674static bool efx_check_tx_flush_complete(struct efx_nic *efx)
675{
676 bool i = true;
677 efx_oword_t txd_ptr_tbl;
678 struct efx_channel *channel;
679 struct efx_tx_queue *tx_queue;
680
681 efx_for_each_channel(channel, efx) {
682 efx_for_each_channel_tx_queue(tx_queue, channel) {
683 efx_reado_table(efx, &txd_ptr_tbl,
684 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
685 if (EFX_OWORD_FIELD(txd_ptr_tbl,
686 FRF_AZ_TX_DESCQ_FLUSH) ||
687 EFX_OWORD_FIELD(txd_ptr_tbl,
688 FRF_AZ_TX_DESCQ_EN)) {
689 netif_dbg(efx, hw, efx->net_dev,
690 "flush did not complete on TXQ %d\n",
691 tx_queue->queue);
692 i = false;
693 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
694 1, 0)) {
695 /* The flush is complete, but we didn't
696 * receive a flush completion event
697 */
698 netif_dbg(efx, hw, efx->net_dev,
699 "flush complete on TXQ %d, so drain "
700 "the queue\n", tx_queue->queue);
701 /* Don't need to increment drain_pending as it
702 * has already been incremented for the queues
703 * which did not drain
704 */
705 efx_magic_event(channel,
706 EFX_CHANNEL_MAGIC_TX_DRAIN(
707 tx_queue));
708 }
709 }
710 }
711
712 return i;
713}
714
672/* Flush all the transmit queues, and continue flushing receive queues until 715/* Flush all the transmit queues, and continue flushing receive queues until
673 * they're all flushed. Wait for the DRAIN events to be recieved so that there 716 * they're all flushed. Wait for the DRAIN events to be recieved so that there
674 * are no more RX and TX events left on any channel. */ 717 * are no more RX and TX events left on any channel. */
@@ -680,7 +723,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
680 struct efx_tx_queue *tx_queue; 723 struct efx_tx_queue *tx_queue;
681 int rc = 0; 724 int rc = 0;
682 725
683 efx->fc_disable++;
684 efx->type->prepare_flush(efx); 726 efx->type->prepare_flush(efx);
685 727
686 efx_for_each_channel(channel, efx) { 728 efx_for_each_channel(channel, efx) {
@@ -730,7 +772,8 @@ int efx_nic_flush_queues(struct efx_nic *efx)
730 timeout); 772 timeout);
731 } 773 }
732 774
733 if (atomic_read(&efx->drain_pending)) { 775 if (atomic_read(&efx->drain_pending) &&
776 !efx_check_tx_flush_complete(efx)) {
734 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 777 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
735 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 778 "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
736 atomic_read(&efx->rxq_flush_outstanding), 779 atomic_read(&efx->rxq_flush_outstanding),
@@ -742,7 +785,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
742 atomic_set(&efx->rxq_flush_outstanding, 0); 785 atomic_set(&efx->rxq_flush_outstanding, 0);
743 } 786 }
744 787
745 efx->fc_disable--; 788 efx->type->finish_flush(efx);
746 789
747 return rc; 790 return rc;
748} 791}
@@ -766,8 +809,13 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
766 809
767 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 810 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
768 channel->eventq_read_ptr & channel->eventq_mask); 811 channel->eventq_read_ptr & channel->eventq_mask);
769 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base, 812
770 channel->channel); 813 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
814 * of 4 bytes, but it is really 16 bytes just like later revisions.
815 */
816 efx_writed(efx, &reg,
817 efx->type->evq_rptr_tbl_base +
818 FR_BZ_EVQ_RPTR_STEP * channel->channel);
771} 819}
772 820
773/* Use HW to insert a SW defined event */ 821/* Use HW to insert a SW defined event */
@@ -1017,9 +1065,10 @@ efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1017 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1065 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1018 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1066 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1019 qid % EFX_TXQ_TYPES); 1067 qid % EFX_TXQ_TYPES);
1020 1068 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1021 efx_magic_event(tx_queue->channel, 1069 efx_magic_event(tx_queue->channel,
1022 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1070 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1071 }
1023 } 1072 }
1024} 1073}
1025 1074
@@ -1565,7 +1614,9 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1565 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1614 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1566 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1615 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1567 efx->rx_indir_table[i]); 1616 efx->rx_indir_table[i]);
1568 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1617 efx_writed(efx, &dword,
1618 FR_BZ_RX_INDIRECTION_TBL +
1619 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1569 } 1620 }
1570} 1621}
1571 1622
@@ -2029,15 +2080,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
2029 2080
2030 for (i = 0; i < table->rows; i++) { 2081 for (i = 0; i < table->rows; i++) {
2031 switch (table->step) { 2082 switch (table->step) {
2032 case 4: /* 32-bit register or SRAM */ 2083 case 4: /* 32-bit SRAM */
2033 efx_readd_table(efx, buf, table->offset, i); 2084 efx_readd(efx, buf, table->offset + 4 * i);
2034 break; 2085 break;
2035 case 8: /* 64-bit SRAM */ 2086 case 8: /* 64-bit SRAM */
2036 efx_sram_readq(efx, 2087 efx_sram_readq(efx,
2037 efx->membase + table->offset, 2088 efx->membase + table->offset,
2038 buf, i); 2089 buf, i);
2039 break; 2090 break;
2040 case 16: /* 128-bit register */ 2091 case 16: /* 128-bit-readable register */
2041 efx_reado_table(efx, buf, table->offset, i); 2092 efx_reado_table(efx, buf, table->offset, i);
2042 break; 2093 break;
2043 case 32: /* 128-bit register, interleaved */ 2094 case 32: /* 128-bit register, interleaved */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 438cef11f727..1b0003323498 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -252,7 +252,6 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
252 bool spoofchk); 252 bool spoofchk);
253 253
254struct ethtool_ts_info; 254struct ethtool_ts_info;
255#ifdef CONFIG_SFC_PTP
256extern void efx_ptp_probe(struct efx_nic *efx); 255extern void efx_ptp_probe(struct efx_nic *efx);
257extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); 256extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
258extern int efx_ptp_get_ts_info(struct net_device *net_dev, 257extern int efx_ptp_get_ts_info(struct net_device *net_dev,
@@ -260,31 +259,6 @@ extern int efx_ptp_get_ts_info(struct net_device *net_dev,
260extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 259extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
261extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); 260extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
262extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); 261extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
263#else
264static inline void efx_ptp_probe(struct efx_nic *efx) {}
265static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
266{
267 return -EOPNOTSUPP;
268}
269static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
270 struct ethtool_ts_info *ts_info)
271{
272 ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
273 SOF_TIMESTAMPING_RX_SOFTWARE);
274 ts_info->phc_index = -1;
275
276 return 0;
277}
278static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
279{
280 return false;
281}
282static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
283{
284 return NETDEV_TX_OK;
285}
286static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
287#endif
288 262
289extern const struct efx_nic_type falcon_a1_nic_type; 263extern const struct efx_nic_type falcon_a1_nic_type;
290extern const struct efx_nic_type falcon_b0_nic_type; 264extern const struct efx_nic_type falcon_b0_nic_type;
@@ -370,6 +344,8 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
370 344
371/* Global Resources */ 345/* Global Resources */
372extern int efx_nic_flush_queues(struct efx_nic *efx); 346extern int efx_nic_flush_queues(struct efx_nic *efx);
347extern void siena_prepare_flush(struct efx_nic *efx);
348extern void siena_finish_flush(struct efx_nic *efx);
373extern void falcon_start_nic_stats(struct efx_nic *efx); 349extern void falcon_start_nic_stats(struct efx_nic *efx);
374extern void falcon_stop_nic_stats(struct efx_nic *efx); 350extern void falcon_stop_nic_stats(struct efx_nic *efx);
375extern void falcon_setup_xaui(struct efx_nic *efx); 351extern void falcon_setup_xaui(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 9e0ad1b75c33..d780a0d096b4 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -187,7 +187,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
187 struct efx_nic *efx = rx_queue->efx; 187 struct efx_nic *efx = rx_queue->efx;
188 struct efx_rx_buffer *rx_buf; 188 struct efx_rx_buffer *rx_buf;
189 struct page *page; 189 struct page *page;
190 void *page_addr;
191 struct efx_rx_page_state *state; 190 struct efx_rx_page_state *state;
192 dma_addr_t dma_addr; 191 dma_addr_t dma_addr;
193 unsigned index, count; 192 unsigned index, count;
@@ -207,12 +206,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
207 __free_pages(page, efx->rx_buffer_order); 206 __free_pages(page, efx->rx_buffer_order);
208 return -EIO; 207 return -EIO;
209 } 208 }
210 page_addr = page_address(page); 209 state = page_address(page);
211 state = page_addr;
212 state->refcnt = 0; 210 state->refcnt = 0;
213 state->dma_addr = dma_addr; 211 state->dma_addr = dma_addr;
214 212
215 page_addr += sizeof(struct efx_rx_page_state);
216 dma_addr += sizeof(struct efx_rx_page_state); 213 dma_addr += sizeof(struct efx_rx_page_state);
217 214
218 split: 215 split:
@@ -230,7 +227,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
230 /* Use the second half of the page */ 227 /* Use the second half of the page */
231 get_page(page); 228 get_page(page);
232 dma_addr += (PAGE_SIZE >> 1); 229 dma_addr += (PAGE_SIZE >> 1);
233 page_addr += (PAGE_SIZE >> 1);
234 ++count; 230 ++count;
235 goto split; 231 goto split;
236 } 232 }
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index ce72ae4f399f..2069f51b2aa9 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -373,7 +373,7 @@ static void efx_iterate_state(struct efx_nic *efx)
373 /* saddr set later and used as incrementing count */ 373 /* saddr set later and used as incrementing count */
374 payload->ip.daddr = htonl(INADDR_LOOPBACK); 374 payload->ip.daddr = htonl(INADDR_LOOPBACK);
375 payload->ip.ihl = 5; 375 payload->ip.ihl = 5;
376 payload->ip.check = htons(0xdead); 376 payload->ip.check = (__force __sum16) htons(0xdead);
377 payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); 377 payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
378 payload->ip.version = IPVERSION; 378 payload->ip.version = IPVERSION;
379 payload->ip.protocol = IPPROTO_UDP; 379 payload->ip.protocol = IPPROTO_UDP;
@@ -722,7 +722,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
722 /* Detach the device so the kernel doesn't transmit during the 722 /* Detach the device so the kernel doesn't transmit during the
723 * loopback test and the watchdog timeout doesn't fire. 723 * loopback test and the watchdog timeout doesn't fire.
724 */ 724 */
725 netif_device_detach(efx->net_dev); 725 efx_device_detach_sync(efx);
726 726
727 if (efx->type->test_chip) { 727 if (efx->type->test_chip) {
728 rc_reset = efx->type->test_chip(efx, tests); 728 rc_reset = efx->type->test_chip(efx, tests);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 84b41bf08a38..ba40f67e4f05 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -127,6 +127,18 @@ static void siena_remove_port(struct efx_nic *efx)
127 efx_nic_free_buffer(efx, &efx->stats_buffer); 127 efx_nic_free_buffer(efx, &efx->stats_buffer);
128} 128}
129 129
130void siena_prepare_flush(struct efx_nic *efx)
131{
132 if (efx->fc_disable++ == 0)
133 efx_mcdi_set_mac(efx);
134}
135
136void siena_finish_flush(struct efx_nic *efx)
137{
138 if (--efx->fc_disable == 0)
139 efx_mcdi_set_mac(efx);
140}
141
130static const struct efx_nic_register_test siena_register_tests[] = { 142static const struct efx_nic_register_test siena_register_tests[] = {
131 { FR_AZ_ADR_REGION, 143 { FR_AZ_ADR_REGION,
132 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, 144 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
@@ -158,7 +170,7 @@ static const struct efx_nic_register_test siena_register_tests[] = {
158 170
159static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 171static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
160{ 172{
161 enum reset_type reset_method = reset_method; 173 enum reset_type reset_method = RESET_TYPE_ALL;
162 int rc, rc2; 174 int rc, rc2;
163 175
164 efx_reset_down(efx, reset_method); 176 efx_reset_down(efx, reset_method);
@@ -659,7 +671,8 @@ const struct efx_nic_type siena_a0_nic_type = {
659 .reset = siena_reset_hw, 671 .reset = siena_reset_hw,
660 .probe_port = siena_probe_port, 672 .probe_port = siena_probe_port,
661 .remove_port = siena_remove_port, 673 .remove_port = siena_remove_port,
662 .prepare_flush = efx_port_dummy_op_void, 674 .prepare_flush = siena_prepare_flush,
675 .finish_flush = siena_finish_flush,
663 .update_stats = siena_update_nic_stats, 676 .update_stats = siena_update_nic_stats,
664 .start_stats = siena_start_nic_stats, 677 .start_stats = siena_start_nic_stats,
665 .stop_stats = siena_stop_nic_stats, 678 .stop_stats = siena_stop_nic_stats,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index d49b53dc2a50..90f8d1604f5f 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -695,8 +695,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
695 return VFDI_RC_ENOMEM; 695 return VFDI_RC_ENOMEM;
696 696
697 rtnl_lock(); 697 rtnl_lock();
698 if (efx->fc_disable++ == 0) 698 siena_prepare_flush(efx);
699 efx_mcdi_set_mac(efx);
700 rtnl_unlock(); 699 rtnl_unlock();
701 700
702 /* Flush all the initialized queues */ 701 /* Flush all the initialized queues */
@@ -733,8 +732,7 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
733 } 732 }
734 733
735 rtnl_lock(); 734 rtnl_lock();
736 if (--efx->fc_disable == 0) 735 siena_finish_flush(efx);
737 efx_mcdi_set_mac(efx);
738 rtnl_unlock(); 736 rtnl_unlock();
739 737
740 /* Irrespective of success/failure, fini the queues */ 738 /* Irrespective of success/failure, fini the queues */
@@ -995,7 +993,7 @@ static void efx_sriov_reset_vf(struct efx_vf *vf, struct efx_buffer *buffer)
995 FRF_AZ_EVQ_BUF_BASE_ID, buftbl); 993 FRF_AZ_EVQ_BUF_BASE_ID, buftbl);
996 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq); 994 efx_writeo_table(efx, &reg, FR_BZ_EVQ_PTR_TBL, abs_evq);
997 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0); 995 EFX_POPULATE_DWORD_1(ptr, FRF_AZ_EVQ_RPTR, 0);
998 efx_writed_table(efx, &ptr, FR_BZ_EVQ_RPTR, abs_evq); 996 efx_writed(efx, &ptr, FR_BZ_EVQ_RPTR + FR_BZ_EVQ_RPTR_STEP * abs_evq);
999 997
1000 mutex_unlock(&vf->status_lock); 998 mutex_unlock(&vf->status_lock);
1001} 999}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 3e5519a0acc7..dc171b4961e4 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1143,7 +1143,7 @@ static int ioc3_is_menet(struct pci_dev *pdev)
1143 * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been 1143 * Can't use UPF_IOREMAP as the whole of IOC3 resources have already been
1144 * registered. 1144 * registered.
1145 */ 1145 */
1146static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart) 1146static void ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
1147{ 1147{
1148#define COSMISC_CONSTANT 6 1148#define COSMISC_CONSTANT 6
1149 1149
@@ -1169,7 +1169,7 @@ static void __devinit ioc3_8250_register(struct ioc3_uartregs __iomem *uart)
1169 serial8250_register_8250_port(&port); 1169 serial8250_register_8250_port(&port);
1170} 1170}
1171 1171
1172static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) 1172static void ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3)
1173{ 1173{
1174 /* 1174 /*
1175 * We need to recognice and treat the fourth MENET serial as it 1175 * We need to recognice and treat the fourth MENET serial as it
@@ -1229,8 +1229,7 @@ static const struct net_device_ops ioc3_netdev_ops = {
1229 .ndo_change_mtu = eth_change_mtu, 1229 .ndo_change_mtu = eth_change_mtu,
1230}; 1230};
1231 1231
1232static int __devinit ioc3_probe(struct pci_dev *pdev, 1232static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1233 const struct pci_device_id *ent)
1234{ 1233{
1235 unsigned int sw_physid1, sw_physid2; 1234 unsigned int sw_physid1, sw_physid2;
1236 struct net_device *dev = NULL; 1235 struct net_device *dev = NULL;
@@ -1368,7 +1367,7 @@ out:
1368 return err; 1367 return err;
1369} 1368}
1370 1369
1371static void __devexit ioc3_remove_one (struct pci_dev *pdev) 1370static void ioc3_remove_one(struct pci_dev *pdev)
1372{ 1371{
1373 struct net_device *dev = pci_get_drvdata(pdev); 1372 struct net_device *dev = pci_get_drvdata(pdev);
1374 struct ioc3_private *ip = netdev_priv(dev); 1373 struct ioc3_private *ip = netdev_priv(dev);
@@ -1396,7 +1395,7 @@ static struct pci_driver ioc3_driver = {
1396 .name = "ioc3-eth", 1395 .name = "ioc3-eth",
1397 .id_table = ioc3_pci_tbl, 1396 .id_table = ioc3_pci_tbl,
1398 .probe = ioc3_probe, 1397 .probe = ioc3_probe,
1399 .remove = __devexit_p(ioc3_remove_one), 1398 .remove = ioc3_remove_one,
1400}; 1399};
1401 1400
1402static int __init ioc3_init_module(void) 1401static int __init ioc3_init_module(void)
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 53efe7c7b1c0..79ad9c94a21b 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -825,7 +825,7 @@ static const struct net_device_ops meth_netdev_ops = {
825/* 825/*
826 * The init function. 826 * The init function.
827 */ 827 */
828static int __devinit meth_probe(struct platform_device *pdev) 828static int meth_probe(struct platform_device *pdev)
829{ 829{
830 struct net_device *dev; 830 struct net_device *dev;
831 struct meth_private *priv; 831 struct meth_private *priv;
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 32e55664df6e..b2315324cc6d 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1395,8 +1395,7 @@ static const struct net_device_ops sc92031_netdev_ops = {
1395#endif 1395#endif
1396}; 1396};
1397 1397
1398static int __devinit sc92031_probe(struct pci_dev *pdev, 1398static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1399 const struct pci_device_id *id)
1400{ 1399{
1401 int err; 1400 int err;
1402 void __iomem* port_base; 1401 void __iomem* port_base;
@@ -1489,7 +1488,7 @@ out_enable_device:
1489 return err; 1488 return err;
1490} 1489}
1491 1490
1492static void __devexit sc92031_remove(struct pci_dev *pdev) 1491static void sc92031_remove(struct pci_dev *pdev)
1493{ 1492{
1494 struct net_device *dev = pci_get_drvdata(pdev); 1493 struct net_device *dev = pci_get_drvdata(pdev);
1495 struct sc92031_priv *priv = netdev_priv(dev); 1494 struct sc92031_priv *priv = netdev_priv(dev);
@@ -1574,7 +1573,7 @@ static struct pci_driver sc92031_pci_driver = {
1574 .name = SC92031_NAME, 1573 .name = SC92031_NAME,
1575 .id_table = sc92031_pci_device_id_table, 1574 .id_table = sc92031_pci_device_id_table,
1576 .probe = sc92031_probe, 1575 .probe = sc92031_probe,
1577 .remove = __devexit_p(sc92031_remove), 1576 .remove = sc92031_remove,
1578 .suspend = sc92031_suspend, 1577 .suspend = sc92031_suspend,
1579 .resume = sc92031_resume, 1578 .resume = sc92031_resume,
1580}; 1579};
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index d8166012b7d4..9a9c379420d1 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -415,7 +415,7 @@ static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
415 return mdio_read(ioaddr, phy_id, reg); 415 return mdio_read(ioaddr, phy_id, reg);
416} 416}
417 417
418static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg) 418static u16 sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
419{ 419{
420 u16 data = 0xffff; 420 u16 data = 0xffff;
421 unsigned int i; 421 unsigned int i;
@@ -1379,7 +1379,7 @@ static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp)
1379 * Identify and set current phy if found one, 1379 * Identify and set current phy if found one,
1380 * return error if it failed to found. 1380 * return error if it failed to found.
1381 */ 1381 */
1382static int __devinit sis190_mii_probe(struct net_device *dev) 1382static int sis190_mii_probe(struct net_device *dev)
1383{ 1383{
1384 struct sis190_private *tp = netdev_priv(dev); 1384 struct sis190_private *tp = netdev_priv(dev);
1385 struct mii_if_info *mii_if = &tp->mii_if; 1385 struct mii_if_info *mii_if = &tp->mii_if;
@@ -1451,7 +1451,7 @@ static void sis190_release_board(struct pci_dev *pdev)
1451 free_netdev(dev); 1451 free_netdev(dev);
1452} 1452}
1453 1453
1454static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) 1454static struct net_device *sis190_init_board(struct pci_dev *pdev)
1455{ 1455{
1456 struct sis190_private *tp; 1456 struct sis190_private *tp;
1457 struct net_device *dev; 1457 struct net_device *dev;
@@ -1573,8 +1573,8 @@ static void sis190_set_rgmii(struct sis190_private *tp, u8 reg)
1573 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0; 1573 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0;
1574} 1574}
1575 1575
1576static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, 1576static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1577 struct net_device *dev) 1577 struct net_device *dev)
1578{ 1578{
1579 struct sis190_private *tp = netdev_priv(dev); 1579 struct sis190_private *tp = netdev_priv(dev);
1580 void __iomem *ioaddr = tp->mmio_addr; 1580 void __iomem *ioaddr = tp->mmio_addr;
@@ -1615,10 +1615,10 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
1615 * APC CMOS RAM is accessed through ISA bridge. 1615 * APC CMOS RAM is accessed through ISA bridge.
1616 * MAC address is read into @net_dev->dev_addr. 1616 * MAC address is read into @net_dev->dev_addr.
1617 */ 1617 */
1618static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, 1618static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
1619 struct net_device *dev) 1619 struct net_device *dev)
1620{ 1620{
1621 static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 }; 1621 static const u16 ids[] = { 0x0965, 0x0966, 0x0968 };
1622 struct sis190_private *tp = netdev_priv(dev); 1622 struct sis190_private *tp = netdev_priv(dev);
1623 struct pci_dev *isa_bridge; 1623 struct pci_dev *isa_bridge;
1624 u8 reg, tmp8; 1624 u8 reg, tmp8;
@@ -1693,8 +1693,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
1693 SIS_PCI_COMMIT(); 1693 SIS_PCI_COMMIT();
1694} 1694}
1695 1695
1696static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, 1696static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
1697 struct net_device *dev)
1698{ 1697{
1699 int rc; 1698 int rc;
1700 1699
@@ -1845,8 +1844,8 @@ static const struct net_device_ops sis190_netdev_ops = {
1845#endif 1844#endif
1846}; 1845};
1847 1846
1848static int __devinit sis190_init_one(struct pci_dev *pdev, 1847static int sis190_init_one(struct pci_dev *pdev,
1849 const struct pci_device_id *ent) 1848 const struct pci_device_id *ent)
1850{ 1849{
1851 static int printed_version = 0; 1850 static int printed_version = 0;
1852 struct sis190_private *tp; 1851 struct sis190_private *tp;
@@ -1916,7 +1915,7 @@ err_release_board:
1916 goto out; 1915 goto out;
1917} 1916}
1918 1917
1919static void __devexit sis190_remove_one(struct pci_dev *pdev) 1918static void sis190_remove_one(struct pci_dev *pdev)
1920{ 1919{
1921 struct net_device *dev = pci_get_drvdata(pdev); 1920 struct net_device *dev = pci_get_drvdata(pdev);
1922 struct sis190_private *tp = netdev_priv(dev); 1921 struct sis190_private *tp = netdev_priv(dev);
@@ -1932,7 +1931,7 @@ static struct pci_driver sis190_pci_driver = {
1932 .name = DRV_NAME, 1931 .name = DRV_NAME,
1933 .id_table = sis190_pci_tbl, 1932 .id_table = sis190_pci_tbl,
1934 .probe = sis190_init_one, 1933 .probe = sis190_init_one,
1935 .remove = __devexit_p(sis190_remove_one), 1934 .remove = sis190_remove_one,
1936}; 1935};
1937 1936
1938static int __init sis190_init_module(void) 1937static int __init sis190_init_module(void)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index edf5edb13140..5bffd9749a58 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -81,7 +81,7 @@
81#define SIS900_MODULE_NAME "sis900" 81#define SIS900_MODULE_NAME "sis900"
82#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006" 82#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
83 83
84static const char version[] __devinitconst = 84static const char version[] =
85 KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n"; 85 KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
86 86
87static int max_interrupt_work = 40; 87static int max_interrupt_work = 40;
@@ -251,7 +251,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
251 * @net_dev->perm_addr. 251 * @net_dev->perm_addr.
252 */ 252 */
253 253
254static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) 254static int sis900_get_mac_addr(struct pci_dev *pci_dev,
255 struct net_device *net_dev)
255{ 256{
256 struct sis900_private *sis_priv = netdev_priv(net_dev); 257 struct sis900_private *sis_priv = netdev_priv(net_dev);
257 void __iomem *ioaddr = sis_priv->ioaddr; 258 void __iomem *ioaddr = sis_priv->ioaddr;
@@ -287,8 +288,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
287 * @net_dev->perm_addr. 288 * @net_dev->perm_addr.
288 */ 289 */
289 290
290static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev, 291static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
291 struct net_device *net_dev) 292 struct net_device *net_dev)
292{ 293{
293 struct pci_dev *isa_bridge = NULL; 294 struct pci_dev *isa_bridge = NULL;
294 u8 reg; 295 u8 reg;
@@ -330,8 +331,8 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
330 * @net_dev->dev_addr and @net_dev->perm_addr. 331 * @net_dev->dev_addr and @net_dev->perm_addr.
331 */ 332 */
332 333
333static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, 334static int sis635_get_mac_addr(struct pci_dev *pci_dev,
334 struct net_device *net_dev) 335 struct net_device *net_dev)
335{ 336{
336 struct sis900_private *sis_priv = netdev_priv(net_dev); 337 struct sis900_private *sis_priv = netdev_priv(net_dev);
337 void __iomem *ioaddr = sis_priv->ioaddr; 338 void __iomem *ioaddr = sis_priv->ioaddr;
@@ -377,8 +378,8 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
377 * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr. 378 * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
378 */ 379 */
379 380
380static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, 381static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
381 struct net_device *net_dev) 382 struct net_device *net_dev)
382{ 383{
383 struct sis900_private *sis_priv = netdev_priv(net_dev); 384 struct sis900_private *sis_priv = netdev_priv(net_dev);
384 void __iomem *ioaddr = sis_priv->ioaddr; 385 void __iomem *ioaddr = sis_priv->ioaddr;
@@ -433,8 +434,8 @@ static const struct net_device_ops sis900_netdev_ops = {
433 * ie: sis900_open(), sis900_start_xmit(), sis900_close(), etc. 434 * ie: sis900_open(), sis900_start_xmit(), sis900_close(), etc.
434 */ 435 */
435 436
436static int __devinit sis900_probe(struct pci_dev *pci_dev, 437static int sis900_probe(struct pci_dev *pci_dev,
437 const struct pci_device_id *pci_id) 438 const struct pci_device_id *pci_id)
438{ 439{
439 struct sis900_private *sis_priv; 440 struct sis900_private *sis_priv;
440 struct net_device *net_dev; 441 struct net_device *net_dev;
@@ -605,7 +606,7 @@ err_out_cleardev:
605 * return error if it failed to found. 606 * return error if it failed to found.
606 */ 607 */
607 608
608static int __devinit sis900_mii_probe(struct net_device * net_dev) 609static int sis900_mii_probe(struct net_device *net_dev)
609{ 610{
610 struct sis900_private *sis_priv = netdev_priv(net_dev); 611 struct sis900_private *sis_priv = netdev_priv(net_dev);
611 const char *dev_name = pci_name(sis_priv->pci_dev); 612 const char *dev_name = pci_name(sis_priv->pci_dev);
@@ -824,7 +825,7 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
824 * Note that location is in word (16 bits) unit 825 * Note that location is in word (16 bits) unit
825 */ 826 */
826 827
827static u16 __devinit read_eeprom(void __iomem *ioaddr, int location) 828static u16 read_eeprom(void __iomem *ioaddr, int location)
828{ 829{
829 u32 read_cmd = location | EEread; 830 u32 read_cmd = location | EEread;
830 int i; 831 int i;
@@ -2410,7 +2411,7 @@ static void sis900_reset(struct net_device *net_dev)
2410 * remove and release SiS900 net device 2411 * remove and release SiS900 net device
2411 */ 2412 */
2412 2413
2413static void __devexit sis900_remove(struct pci_dev *pci_dev) 2414static void sis900_remove(struct pci_dev *pci_dev)
2414{ 2415{
2415 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2416 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2416 struct sis900_private *sis_priv = netdev_priv(net_dev); 2417 struct sis900_private *sis_priv = netdev_priv(net_dev);
@@ -2496,7 +2497,7 @@ static struct pci_driver sis900_pci_driver = {
2496 .name = SIS900_MODULE_NAME, 2497 .name = SIS900_MODULE_NAME,
2497 .id_table = sis900_pci_tbl, 2498 .id_table = sis900_pci_tbl,
2498 .probe = sis900_probe, 2499 .probe = sis900_probe,
2499 .remove = __devexit_p(sis900_remove), 2500 .remove = sis900_remove,
2500#ifdef CONFIG_PM 2501#ifdef CONFIG_PM
2501 .suspend = sis900_suspend, 2502 .suspend = sis900_suspend,
2502 .resume = sis900_resume, 2503 .resume = sis900_resume,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index d01e59c348ad..03b256af7ed5 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -90,9 +90,9 @@ static int rx_copybreak;
90#include <asm/byteorder.h> 90#include <asm/byteorder.h>
91 91
92/* These identify the driver base version and may not be removed. */ 92/* These identify the driver base version and may not be removed. */
93static char version[] __devinitdata = 93static char version[] =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n"; 94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata = 95static char version2[] =
96" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n"; 96" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
97 97
98MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); 98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
@@ -318,8 +318,7 @@ static const struct net_device_ops epic_netdev_ops = {
318 .ndo_validate_addr = eth_validate_addr, 318 .ndo_validate_addr = eth_validate_addr,
319}; 319};
320 320
321static int __devinit epic_init_one(struct pci_dev *pdev, 321static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
322 const struct pci_device_id *ent)
323{ 322{
324 static int card_idx = -1; 323 static int card_idx = -1;
325 void __iomem *ioaddr; 324 void __iomem *ioaddr;
@@ -569,7 +568,7 @@ static inline void epic_napi_irq_on(struct net_device *dev,
569 ew32(INTMASK, ep->irq_mask | EpicNapiEvent); 568 ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
570} 569}
571 570
572static int __devinit read_eeprom(struct epic_private *ep, int location) 571static int read_eeprom(struct epic_private *ep, int location)
573{ 572{
574 void __iomem *ioaddr = ep->ioaddr; 573 void __iomem *ioaddr = ep->ioaddr;
575 int i; 574 int i;
@@ -1524,7 +1523,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1524} 1523}
1525 1524
1526 1525
1527static void __devexit epic_remove_one(struct pci_dev *pdev) 1526static void epic_remove_one(struct pci_dev *pdev)
1528{ 1527{
1529 struct net_device *dev = pci_get_drvdata(pdev); 1528 struct net_device *dev = pci_get_drvdata(pdev);
1530 struct epic_private *ep = netdev_priv(dev); 1529 struct epic_private *ep = netdev_priv(dev);
@@ -1577,7 +1576,7 @@ static struct pci_driver epic_driver = {
1577 .name = DRV_NAME, 1576 .name = DRV_NAME,
1578 .id_table = epic_pci_tbl, 1577 .id_table = epic_pci_tbl,
1579 .probe = epic_init_one, 1578 .probe = epic_init_one,
1580 .remove = __devexit_p(epic_remove_one), 1579 .remove = epic_remove_one,
1581#ifdef CONFIG_PM 1580#ifdef CONFIG_PM
1582 .suspend = epic_suspend, 1581 .suspend = epic_suspend,
1583 .resume = epic_resume, 1582 .resume = epic_resume,
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8d15f7a74b45..59a6f88da867 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1400,16 +1400,6 @@ smc911x_open(struct net_device *dev)
1400 1400
1401 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1401 DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
1402 1402
1403 /*
1404 * Check that the address is valid. If its not, refuse
1405 * to bring the device up. The user must specify an
1406 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1407 */
1408 if (!is_valid_ether_addr(dev->dev_addr)) {
1409 PRINTK("%s: no valid ethernet hw addr\n", __func__);
1410 return -EINVAL;
1411 }
1412
1413 /* reset the hardware */ 1403 /* reset the hardware */
1414 smc911x_reset(dev); 1404 smc911x_reset(dev);
1415 1405
@@ -1722,7 +1712,7 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
1722 * This routine has a simple purpose -- make the SMC chip generate an 1712 * This routine has a simple purpose -- make the SMC chip generate an
1723 * interrupt, so an auto-detect routine can detect it, and find the IRQ, 1713 * interrupt, so an auto-detect routine can detect it, and find the IRQ,
1724 */ 1714 */
1725static int __devinit smc911x_findirq(struct net_device *dev) 1715static int smc911x_findirq(struct net_device *dev)
1726{ 1716{
1727 struct smc911x_local *lp = netdev_priv(dev); 1717 struct smc911x_local *lp = netdev_priv(dev);
1728 int timeout = 20; 1718 int timeout = 20;
@@ -1800,7 +1790,7 @@ static const struct net_device_ops smc911x_netdev_ops = {
1800 * o actually GRAB the irq. 1790 * o actually GRAB the irq.
1801 * o GRAB the region 1791 * o GRAB the region
1802 */ 1792 */
1803static int __devinit smc911x_probe(struct net_device *dev) 1793static int smc911x_probe(struct net_device *dev)
1804{ 1794{
1805 struct smc911x_local *lp = netdev_priv(dev); 1795 struct smc911x_local *lp = netdev_priv(dev);
1806 int i, retval; 1796 int i, retval;
@@ -2040,7 +2030,7 @@ err_out:
2040 * 0 --> there is a device 2030 * 0 --> there is a device
2041 * anything else, error 2031 * anything else, error
2042 */ 2032 */
2043static int __devinit smc911x_drv_probe(struct platform_device *pdev) 2033static int smc911x_drv_probe(struct platform_device *pdev)
2044{ 2034{
2045 struct net_device *ndev; 2035 struct net_device *ndev;
2046 struct resource *res; 2036 struct resource *res;
@@ -2115,7 +2105,7 @@ out:
2115 return ret; 2105 return ret;
2116} 2106}
2117 2107
2118static int __devexit smc911x_drv_remove(struct platform_device *pdev) 2108static int smc911x_drv_remove(struct platform_device *pdev)
2119{ 2109{
2120 struct net_device *ndev = platform_get_drvdata(pdev); 2110 struct net_device *ndev = platform_get_drvdata(pdev);
2121 struct smc911x_local *lp = netdev_priv(ndev); 2111 struct smc911x_local *lp = netdev_priv(ndev);
@@ -2186,7 +2176,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
2186 2176
2187static struct platform_driver smc911x_driver = { 2177static struct platform_driver smc911x_driver = {
2188 .probe = smc911x_drv_probe, 2178 .probe = smc911x_drv_probe,
2189 .remove = __devexit_p(smc911x_drv_remove), 2179 .remove = smc911x_drv_remove,
2190 .suspend = smc911x_drv_suspend, 2180 .suspend = smc911x_drv_suspend,
2191 .resume = smc911x_drv_resume, 2181 .resume = smc911x_drv_resume,
2192 .driver = { 2182 .driver = {
diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h
index 3269292efecc..d51261ba4642 100644
--- a/drivers/net/ethernet/smsc/smc911x.h
+++ b/drivers/net/ethernet/smsc/smc911x.h
@@ -159,12 +159,12 @@ static inline void SMC_insl(struct smc911x_local *lp, int reg,
159 void __iomem *ioaddr = lp->base + reg; 159 void __iomem *ioaddr = lp->base + reg;
160 160
161 if (lp->cfg.flags & SMC911X_USE_32BIT) { 161 if (lp->cfg.flags & SMC911X_USE_32BIT) {
162 readsl(ioaddr, addr, count); 162 ioread32_rep(ioaddr, addr, count);
163 return; 163 return;
164 } 164 }
165 165
166 if (lp->cfg.flags & SMC911X_USE_16BIT) { 166 if (lp->cfg.flags & SMC911X_USE_16BIT) {
167 readsw(ioaddr, addr, count * 2); 167 ioread16_rep(ioaddr, addr, count * 2);
168 return; 168 return;
169 } 169 }
170 170
@@ -177,12 +177,12 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
177 void __iomem *ioaddr = lp->base + reg; 177 void __iomem *ioaddr = lp->base + reg;
178 178
179 if (lp->cfg.flags & SMC911X_USE_32BIT) { 179 if (lp->cfg.flags & SMC911X_USE_32BIT) {
180 writesl(ioaddr, addr, count); 180 iowrite32_rep(ioaddr, addr, count);
181 return; 181 return;
182 } 182 }
183 183
184 if (lp->cfg.flags & SMC911X_USE_16BIT) { 184 if (lp->cfg.flags & SMC911X_USE_16BIT) {
185 writesw(ioaddr, addr, count * 2); 185 iowrite16_rep(ioaddr, addr, count * 2);
186 return; 186 return;
187 } 187 }
188 188
@@ -196,14 +196,14 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
196 writew(v & 0xFFFF, (lp)->base + (r)); \ 196 writew(v & 0xFFFF, (lp)->base + (r)); \
197 writew(v >> 16, (lp)->base + (r) + 2); \ 197 writew(v >> 16, (lp)->base + (r) + 2); \
198 } while (0) 198 } while (0)
199#define SMC_insl(lp, r, p, l) readsw((short*)((lp)->base + (r)), p, l*2) 199#define SMC_insl(lp, r, p, l) ioread16_rep((short*)((lp)->base + (r)), p, l*2)
200#define SMC_outsl(lp, r, p, l) writesw((short*)((lp)->base + (r)), p, l*2) 200#define SMC_outsl(lp, r, p, l) iowrite16_rep((short*)((lp)->base + (r)), p, l*2)
201 201
202#elif SMC_USE_32BIT 202#elif SMC_USE_32BIT
203#define SMC_inl(lp, r) readl((lp)->base + (r)) 203#define SMC_inl(lp, r) readl((lp)->base + (r))
204#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r)) 204#define SMC_outl(v, lp, r) writel(v, (lp)->base + (r))
205#define SMC_insl(lp, r, p, l) readsl((int*)((lp)->base + (r)), p, l) 205#define SMC_insl(lp, r, p, l) ioread32_rep((int*)((lp)->base + (r)), p, l)
206#define SMC_outsl(lp, r, p, l) writesl((int*)((lp)->base + (r)), p, l) 206#define SMC_outsl(lp, r, p, l) iowrite32_rep((int*)((lp)->base + (r)), p, l)
207 207
208#endif /* SMC_USE_16BIT */ 208#endif /* SMC_USE_16BIT */
209#endif /* SMC_DYNAMIC_BUS_CONFIG */ 209#endif /* SMC_DYNAMIC_BUS_CONFIG */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 318adc935a53..022b45bc14ff 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1474,16 +1474,6 @@ smc_open(struct net_device *dev)
1474 1474
1475 DBG(2, "%s: %s\n", dev->name, __func__); 1475 DBG(2, "%s: %s\n", dev->name, __func__);
1476 1476
1477 /*
1478 * Check that the address is valid. If its not, refuse
1479 * to bring the device up. The user must specify an
1480 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1481 */
1482 if (!is_valid_ether_addr(dev->dev_addr)) {
1483 PRINTK("%s: no valid ethernet hw addr\n", __func__);
1484 return -EINVAL;
1485 }
1486
1487 /* Setup the default Register Modes */ 1477 /* Setup the default Register Modes */
1488 lp->tcr_cur_mode = TCR_DEFAULT; 1478 lp->tcr_cur_mode = TCR_DEFAULT;
1489 lp->rcr_cur_mode = RCR_DEFAULT; 1479 lp->rcr_cur_mode = RCR_DEFAULT;
@@ -1789,7 +1779,7 @@ static const struct net_device_ops smc_netdev_ops = {
1789 * I just deleted auto_irq.c, since it was never built... 1779 * I just deleted auto_irq.c, since it was never built...
1790 * --jgarzik 1780 * --jgarzik
1791 */ 1781 */
1792static int __devinit smc_findirq(struct smc_local *lp) 1782static int smc_findirq(struct smc_local *lp)
1793{ 1783{
1794 void __iomem *ioaddr = lp->base; 1784 void __iomem *ioaddr = lp->base;
1795 int timeout = 20; 1785 int timeout = 20;
@@ -1863,8 +1853,8 @@ static int __devinit smc_findirq(struct smc_local *lp)
1863 * o actually GRAB the irq. 1853 * o actually GRAB the irq.
1864 * o GRAB the region 1854 * o GRAB the region
1865 */ 1855 */
1866static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr, 1856static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
1867 unsigned long irq_flags) 1857 unsigned long irq_flags)
1868{ 1858{
1869 struct smc_local *lp = netdev_priv(dev); 1859 struct smc_local *lp = netdev_priv(dev);
1870 static int version_printed = 0; 1860 static int version_printed = 0;
@@ -2211,7 +2201,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2211 * 0 --> there is a device 2201 * 0 --> there is a device
2212 * anything else, error 2202 * anything else, error
2213 */ 2203 */
2214static int __devinit smc_drv_probe(struct platform_device *pdev) 2204static int smc_drv_probe(struct platform_device *pdev)
2215{ 2205{
2216 struct smc91x_platdata *pd = pdev->dev.platform_data; 2206 struct smc91x_platdata *pd = pdev->dev.platform_data;
2217 struct smc_local *lp; 2207 struct smc_local *lp;
@@ -2324,7 +2314,7 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
2324 return ret; 2314 return ret;
2325} 2315}
2326 2316
2327static int __devexit smc_drv_remove(struct platform_device *pdev) 2317static int smc_drv_remove(struct platform_device *pdev)
2328{ 2318{
2329 struct net_device *ndev = platform_get_drvdata(pdev); 2319 struct net_device *ndev = platform_get_drvdata(pdev);
2330 struct smc_local *lp = netdev_priv(ndev); 2320 struct smc_local *lp = netdev_priv(ndev);
@@ -2407,7 +2397,7 @@ static struct dev_pm_ops smc_drv_pm_ops = {
2407 2397
2408static struct platform_driver smc_driver = { 2398static struct platform_driver smc_driver = {
2409 .probe = smc_drv_probe, 2399 .probe = smc_drv_probe,
2410 .remove = __devexit_p(smc_drv_remove), 2400 .remove = smc_drv_remove,
2411 .driver = { 2401 .driver = {
2412 .name = CARDNAME, 2402 .name = CARDNAME,
2413 .owner = THIS_MODULE, 2403 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 5f53fbbf67be..370e13dde115 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -286,16 +286,16 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
286 286
287#define SMC_IO_SHIFT (lp->io_shift) 287#define SMC_IO_SHIFT (lp->io_shift)
288 288
289#define SMC_inb(a, r) readb((a) + (r)) 289#define SMC_inb(a, r) ioread8((a) + (r))
290#define SMC_inw(a, r) readw((a) + (r)) 290#define SMC_inw(a, r) ioread16((a) + (r))
291#define SMC_inl(a, r) readl((a) + (r)) 291#define SMC_inl(a, r) ioread32((a) + (r))
292#define SMC_outb(v, a, r) writeb(v, (a) + (r)) 292#define SMC_outb(v, a, r) iowrite8(v, (a) + (r))
293#define SMC_outw(v, a, r) writew(v, (a) + (r)) 293#define SMC_outw(v, a, r) iowrite16(v, (a) + (r))
294#define SMC_outl(v, a, r) writel(v, (a) + (r)) 294#define SMC_outl(v, a, r) iowrite32(v, (a) + (r))
295#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 295#define SMC_insw(a, r, p, l) ioread16_rep((a) + (r), p, l)
296#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 296#define SMC_outsw(a, r, p, l) iowrite16_rep((a) + (r), p, l)
297#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 297#define SMC_insl(a, r, p, l) ioread32_rep((a) + (r), p, l)
298#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 298#define SMC_outsl(a, r, p, l) iowrite32_rep((a) + (r), p, l)
299 299
300#define RPC_LSA_DEFAULT RPC_LED_100_10 300#define RPC_LSA_DEFAULT RPC_LED_100_10
301#define RPC_LSB_DEFAULT RPC_LED_TX_RX 301#define RPC_LSB_DEFAULT RPC_LED_TX_RX
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c53c0f4e2ce3..4616bf27d515 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -253,7 +253,7 @@ smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf,
253 } 253 }
254 254
255 if (pdata->config.flags & SMSC911X_USE_32BIT) { 255 if (pdata->config.flags & SMSC911X_USE_32BIT) {
256 writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); 256 iowrite32_rep(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount);
257 goto out; 257 goto out;
258 } 258 }
259 259
@@ -285,7 +285,7 @@ smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
285 } 285 }
286 286
287 if (pdata->config.flags & SMSC911X_USE_32BIT) { 287 if (pdata->config.flags & SMSC911X_USE_32BIT) {
288 writesl(pdata->ioaddr + __smsc_shift(pdata, 288 iowrite32_rep(pdata->ioaddr + __smsc_shift(pdata,
289 TX_DATA_FIFO), buf, wordcount); 289 TX_DATA_FIFO), buf, wordcount);
290 goto out; 290 goto out;
291 } 291 }
@@ -319,7 +319,7 @@ smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
319 } 319 }
320 320
321 if (pdata->config.flags & SMSC911X_USE_32BIT) { 321 if (pdata->config.flags & SMSC911X_USE_32BIT) {
322 readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); 322 ioread32_rep(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount);
323 goto out; 323 goto out;
324 } 324 }
325 325
@@ -351,7 +351,7 @@ smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
351 } 351 }
352 352
353 if (pdata->config.flags & SMSC911X_USE_32BIT) { 353 if (pdata->config.flags & SMSC911X_USE_32BIT) {
354 readsl(pdata->ioaddr + __smsc_shift(pdata, 354 ioread32_rep(pdata->ioaddr + __smsc_shift(pdata,
355 RX_DATA_FIFO), buf, wordcount); 355 RX_DATA_FIFO), buf, wordcount);
356 goto out; 356 goto out;
357 } 357 }
@@ -1031,8 +1031,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
1031 return 0; 1031 return 0;
1032} 1032}
1033 1033
1034static int __devinit smsc911x_mii_init(struct platform_device *pdev, 1034static int smsc911x_mii_init(struct platform_device *pdev,
1035 struct net_device *dev) 1035 struct net_device *dev)
1036{ 1036{
1037 struct smsc911x_data *pdata = netdev_priv(dev); 1037 struct smsc911x_data *pdata = netdev_priv(dev);
1038 int err = -ENXIO, i; 1038 int err = -ENXIO, i;
@@ -1463,11 +1463,6 @@ static int smsc911x_open(struct net_device *dev)
1463 return -EAGAIN; 1463 return -EAGAIN;
1464 } 1464 }
1465 1465
1466 if (!is_valid_ether_addr(dev->dev_addr)) {
1467 SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address");
1468 return -EADDRNOTAVAIL;
1469 }
1470
1471 /* Reset the LAN911x */ 1466 /* Reset the LAN911x */
1472 if (smsc911x_soft_reset(pdata)) { 1467 if (smsc911x_soft_reset(pdata)) {
1473 SMSC_WARN(pdata, hw, "soft reset failed"); 1468 SMSC_WARN(pdata, hw, "soft reset failed");
@@ -2092,7 +2087,7 @@ static const struct net_device_ops smsc911x_netdev_ops = {
2092}; 2087};
2093 2088
2094/* copies the current mac address from hardware to dev->dev_addr */ 2089/* copies the current mac address from hardware to dev->dev_addr */
2095static void __devinit smsc911x_read_mac_address(struct net_device *dev) 2090static void smsc911x_read_mac_address(struct net_device *dev)
2096{ 2091{
2097 struct smsc911x_data *pdata = netdev_priv(dev); 2092 struct smsc911x_data *pdata = netdev_priv(dev);
2098 u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH); 2093 u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
@@ -2107,7 +2102,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev)
2107} 2102}
2108 2103
2109/* Initializing private device structures, only called from probe */ 2104/* Initializing private device structures, only called from probe */
2110static int __devinit smsc911x_init(struct net_device *dev) 2105static int smsc911x_init(struct net_device *dev)
2111{ 2106{
2112 struct smsc911x_data *pdata = netdev_priv(dev); 2107 struct smsc911x_data *pdata = netdev_priv(dev);
2113 unsigned int byte_test, mask; 2108 unsigned int byte_test, mask;
@@ -2244,7 +2239,7 @@ static int __devinit smsc911x_init(struct net_device *dev)
2244 return 0; 2239 return 0;
2245} 2240}
2246 2241
2247static int __devexit smsc911x_drv_remove(struct platform_device *pdev) 2242static int smsc911x_drv_remove(struct platform_device *pdev)
2248{ 2243{
2249 struct net_device *dev; 2244 struct net_device *dev;
2250 struct smsc911x_data *pdata; 2245 struct smsc911x_data *pdata;
@@ -2301,9 +2296,8 @@ static const struct smsc911x_ops shifted_smsc911x_ops = {
2301}; 2296};
2302 2297
2303#ifdef CONFIG_OF 2298#ifdef CONFIG_OF
2304static int __devinit smsc911x_probe_config_dt( 2299static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config,
2305 struct smsc911x_platform_config *config, 2300 struct device_node *np)
2306 struct device_node *np)
2307{ 2301{
2308 const char *mac; 2302 const char *mac;
2309 u32 width = 0; 2303 u32 width = 0;
@@ -2351,7 +2345,7 @@ static inline int smsc911x_probe_config_dt(
2351} 2345}
2352#endif /* CONFIG_OF */ 2346#endif /* CONFIG_OF */
2353 2347
2354static int __devinit smsc911x_drv_probe(struct platform_device *pdev) 2348static int smsc911x_drv_probe(struct platform_device *pdev)
2355{ 2349{
2356 struct device_node *np = pdev->dev.of_node; 2350 struct device_node *np = pdev->dev.of_node;
2357 struct net_device *dev; 2351 struct net_device *dev;
@@ -2589,7 +2583,7 @@ MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
2589 2583
2590static struct platform_driver smsc911x_driver = { 2584static struct platform_driver smsc911x_driver = {
2591 .probe = smsc911x_drv_probe, 2585 .probe = smsc911x_drv_probe,
2592 .remove = __devexit_p(smsc911x_drv_remove), 2586 .remove = smsc911x_drv_remove,
2593 .driver = { 2587 .driver = {
2594 .name = SMSC_CHIPNAME, 2588 .name = SMSC_CHIPNAME,
2595 .owner = THIS_MODULE, 2589 .owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 1fcd914ec39b..3c586585e1b3 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1577,7 +1577,7 @@ static const struct net_device_ops smsc9420_netdev_ops = {
1577#endif /* CONFIG_NET_POLL_CONTROLLER */ 1577#endif /* CONFIG_NET_POLL_CONTROLLER */
1578}; 1578};
1579 1579
1580static int __devinit 1580static int
1581smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1581smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1582{ 1582{
1583 struct net_device *dev; 1583 struct net_device *dev;
@@ -1702,7 +1702,7 @@ out_0:
1702 return -ENODEV; 1702 return -ENODEV;
1703} 1703}
1704 1704
1705static void __devexit smsc9420_remove(struct pci_dev *pdev) 1705static void smsc9420_remove(struct pci_dev *pdev)
1706{ 1706{
1707 struct net_device *dev; 1707 struct net_device *dev;
1708 struct smsc9420_pdata *pd; 1708 struct smsc9420_pdata *pd;
@@ -1736,7 +1736,7 @@ static struct pci_driver smsc9420_driver = {
1736 .name = DRV_NAME, 1736 .name = DRV_NAME,
1737 .id_table = smsc9420_id_table, 1737 .id_table = smsc9420_id_table,
1738 .probe = smsc9420_probe, 1738 .probe = smsc9420_probe,
1739 .remove = __devexit_p(smsc9420_remove), 1739 .remove = smsc9420_remove,
1740#ifdef CONFIG_PM 1740#ifdef CONFIG_PM
1741 .suspend = smsc9420_suspend, 1741 .suspend = smsc9420_suspend,
1742 .resume = smsc9420_resume, 1742 .resume = smsc9420_resume,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 9f448279e12a..1164930a40a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -54,31 +54,6 @@ config STMMAC_DA
54 By default, the DMA arbitration scheme is based on Round-robin 54 By default, the DMA arbitration scheme is based on Round-robin
55 (rx:tx priority is 1:1). 55 (rx:tx priority is 1:1).
56 56
57config STMMAC_TIMER
58 bool "STMMAC Timer optimisation"
59 default n
60 depends on RTC_HCTOSYS_DEVICE
61 ---help---
62 Use an external timer for mitigating the number of network
63 interrupts. Currently, for SH architectures, it is possible
64 to use the TMU channel 2 and the SH-RTC device.
65
66choice
67 prompt "Select Timer device"
68 depends on STMMAC_TIMER
69
70config STMMAC_TMU_TIMER
71 bool "TMU channel 2"
72 depends on CPU_SH4
73 ---help---
74
75config STMMAC_RTC_TIMER
76 bool "Real time clock"
77 depends on RTC_CLASS
78 ---help---
79
80endchoice
81
82choice 57choice
83 prompt "Select the DMA TX/RX descriptor operating modes" 58 prompt "Select the DMA TX/RX descriptor operating modes"
84 depends on STMMAC_ETH 59 depends on STMMAC_ETH
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index bc965ac9e025..c8e8ea60ac19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,5 +1,4 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o 1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o 2stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
4stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o 3stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
5stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o 4stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 719be3912aa9..186d14806122 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -48,6 +48,10 @@
48#define CHIP_DBG(fmt, args...) do { } while (0) 48#define CHIP_DBG(fmt, args...) do { } while (0)
49#endif 49#endif
50 50
51/* Synopsys Core versions */
52#define DWMAC_CORE_3_40 0x34
53#define DWMAC_CORE_3_50 0x35
54
51#undef FRAME_FILTER_DEBUG 55#undef FRAME_FILTER_DEBUG
52/* #define FRAME_FILTER_DEBUG */ 56/* #define FRAME_FILTER_DEBUG */
53 57
@@ -81,7 +85,7 @@ struct stmmac_extra_stats {
81 unsigned long rx_missed_cntr; 85 unsigned long rx_missed_cntr;
82 unsigned long rx_overflow_cntr; 86 unsigned long rx_overflow_cntr;
83 unsigned long rx_vlan; 87 unsigned long rx_vlan;
84 /* Tx/Rx IRQ errors */ 88 /* Tx/Rx IRQ error info */
85 unsigned long tx_undeflow_irq; 89 unsigned long tx_undeflow_irq;
86 unsigned long tx_process_stopped_irq; 90 unsigned long tx_process_stopped_irq;
87 unsigned long tx_jabber_irq; 91 unsigned long tx_jabber_irq;
@@ -91,18 +95,23 @@ struct stmmac_extra_stats {
91 unsigned long rx_watchdog_irq; 95 unsigned long rx_watchdog_irq;
92 unsigned long tx_early_irq; 96 unsigned long tx_early_irq;
93 unsigned long fatal_bus_error_irq; 97 unsigned long fatal_bus_error_irq;
94 /* Extra info */ 98 /* Tx/Rx IRQ Events */
99 unsigned long rx_early_irq;
95 unsigned long threshold; 100 unsigned long threshold;
96 unsigned long tx_pkt_n; 101 unsigned long tx_pkt_n;
97 unsigned long rx_pkt_n; 102 unsigned long rx_pkt_n;
98 unsigned long poll_n;
99 unsigned long sched_timer_n;
100 unsigned long normal_irq_n; 103 unsigned long normal_irq_n;
104 unsigned long rx_normal_irq_n;
105 unsigned long napi_poll;
106 unsigned long tx_normal_irq_n;
107 unsigned long tx_clean;
108 unsigned long tx_reset_ic_bit;
109 unsigned long irq_receive_pmt_irq_n;
110 /* MMC info */
101 unsigned long mmc_tx_irq_n; 111 unsigned long mmc_tx_irq_n;
102 unsigned long mmc_rx_irq_n; 112 unsigned long mmc_rx_irq_n;
103 unsigned long mmc_rx_csum_offload_irq_n; 113 unsigned long mmc_rx_csum_offload_irq_n;
104 /* EEE */ 114 /* EEE */
105 unsigned long irq_receive_pmt_irq_n;
106 unsigned long irq_tx_path_in_lpi_mode_n; 115 unsigned long irq_tx_path_in_lpi_mode_n;
107 unsigned long irq_tx_path_exit_lpi_mode_n; 116 unsigned long irq_tx_path_exit_lpi_mode_n;
108 unsigned long irq_rx_path_in_lpi_mode_n; 117 unsigned long irq_rx_path_in_lpi_mode_n;
@@ -162,6 +171,15 @@ struct stmmac_extra_stats {
162#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */ 171#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */
163#define DEFAULT_DMA_PBL 8 172#define DEFAULT_DMA_PBL 8
164 173
174/* Max/Min RI Watchdog Timer count value */
175#define MAX_DMA_RIWT 0xff
176#define MIN_DMA_RIWT 0x20
177/* Tx coalesce parameters */
178#define STMMAC_COAL_TX_TIMER 40000
179#define STMMAC_MAX_COAL_TX_TICK 100000
180#define STMMAC_TX_MAX_FRAMES 256
181#define STMMAC_TX_FRAMES 64
182
165enum rx_frame_status { /* IPC status */ 183enum rx_frame_status { /* IPC status */
166 good_frame = 0, 184 good_frame = 0,
167 discard_frame = 1, 185 discard_frame = 1,
@@ -169,10 +187,11 @@ enum rx_frame_status { /* IPC status */
169 llc_snap = 4, 187 llc_snap = 4,
170}; 188};
171 189
172enum tx_dma_irq_status { 190enum dma_irq_status {
173 tx_hard_error = 1, 191 tx_hard_error = 0x1,
174 tx_hard_error_bump_tc = 2, 192 tx_hard_error_bump_tc = 0x2,
175 handle_tx_rx = 3, 193 handle_rx = 0x4,
194 handle_tx = 0x8,
176}; 195};
177 196
178enum core_specific_irq_mask { 197enum core_specific_irq_mask {
@@ -296,6 +315,8 @@ struct stmmac_dma_ops {
296 struct stmmac_extra_stats *x); 315 struct stmmac_extra_stats *x);
297 /* If supported then get the optional core features */ 316 /* If supported then get the optional core features */
298 unsigned int (*get_hw_feature) (void __iomem *ioaddr); 317 unsigned int (*get_hw_feature) (void __iomem *ioaddr);
318 /* Program the HW RX Watchdog */
319 void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
299}; 320};
300 321
301struct stmmac_ops { 322struct stmmac_ops {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 0e4cacedc1f0..7ad56afd6324 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -230,8 +230,5 @@ enum rtc_control {
230#define GMAC_MMC_TX_INTR 0x108 230#define GMAC_MMC_TX_INTR 0x108
231#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 231#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
232 232
233/* Synopsys Core versions */
234#define DWMAC_CORE_3_40 0x34
235
236extern const struct stmmac_dma_ops dwmac1000_dma_ops; 233extern const struct stmmac_dma_ops dwmac1000_dma_ops;
237#endif /* __DWMAC1000_H__ */ 234#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 033500090f55..bf83c03bfd06 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -174,6 +174,11 @@ static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
174 return readl(ioaddr + DMA_HW_FEATURE); 174 return readl(ioaddr + DMA_HW_FEATURE);
175} 175}
176 176
177static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
178{
179 writel(riwt, ioaddr + DMA_RX_WATCHDOG);
180}
181
177const struct stmmac_dma_ops dwmac1000_dma_ops = { 182const struct stmmac_dma_ops dwmac1000_dma_ops = {
178 .init = dwmac1000_dma_init, 183 .init = dwmac1000_dma_init,
179 .dump_regs = dwmac1000_dump_dma_regs, 184 .dump_regs = dwmac1000_dump_dma_regs,
@@ -187,4 +192,5 @@ const struct stmmac_dma_ops dwmac1000_dma_ops = {
187 .stop_rx = dwmac_dma_stop_rx, 192 .stop_rx = dwmac_dma_stop_rx,
188 .dma_interrupt = dwmac_dma_interrupt, 193 .dma_interrupt = dwmac_dma_interrupt,
189 .get_hw_feature = dwmac1000_get_hw_feature, 194 .get_hw_feature = dwmac1000_get_hw_feature,
195 .rx_watchdog = dwmac1000_rx_watchdog,
190}; 196};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e49c9a0fd6ff..ab4896ecac1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -35,7 +35,10 @@
35#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ 35#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
36#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ 36#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
37#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ 37#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
38#define DMA_AXI_BUS_MODE 0x00001028 /* AXI Bus Mode */ 38/* Rx watchdog register */
39#define DMA_RX_WATCHDOG 0x00001024
40/* AXI Bus Mode */
41#define DMA_AXI_BUS_MODE 0x00001028
39#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ 42#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
40#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ 43#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
41#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ 44#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
@@ -77,8 +80,6 @@
77#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ 80#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
78#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ 81#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
79#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */ 82#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
80#define DMA_STATUS_GMI 0x08000000
81#define DMA_STATUS_GLI 0x04000000
82#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ 83#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
83#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ 84#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
84#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ 85#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 4e0e18a44fcc..491d7e930603 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -204,16 +204,28 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
204 } 204 }
205 } 205 }
206 /* TX/RX NORMAL interrupts */ 206 /* TX/RX NORMAL interrupts */
207 if (intr_status & DMA_STATUS_NIS) { 207 if (likely(intr_status & DMA_STATUS_NIS)) {
208 x->normal_irq_n++; 208 x->normal_irq_n++;
209 if (likely((intr_status & DMA_STATUS_RI) || 209 if (likely(intr_status & DMA_STATUS_RI)) {
210 (intr_status & (DMA_STATUS_TI)))) 210 u32 value = readl(ioaddr + DMA_INTR_ENA);
211 ret = handle_tx_rx; 211 /* to schedule NAPI on real RIE event. */
212 if (likely(value & DMA_INTR_ENA_RIE)) {
213 x->rx_normal_irq_n++;
214 ret |= handle_rx;
215 }
216 }
217 if (likely(intr_status & DMA_STATUS_TI)) {
218 x->tx_normal_irq_n++;
219 ret |= handle_tx;
220 }
221 if (unlikely(intr_status & DMA_STATUS_ERI))
222 x->rx_early_irq++;
212 } 223 }
213 /* Optional hardware blocks, interrupts should be disabled */ 224 /* Optional hardware blocks, interrupts should be disabled */
214 if (unlikely(intr_status & 225 if (unlikely(intr_status &
215 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) 226 (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
216 pr_info("%s: unexpected status %08x\n", __func__, intr_status); 227 pr_info("%s: unexpected status %08x\n", __func__, intr_status);
228
217 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ 229 /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
218 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); 230 writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
219 231
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 7d51a65ab099..023a4fb4efa5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,16 +24,13 @@
24#define __STMMAC_H__ 24#define __STMMAC_H__
25 25
26#define STMMAC_RESOURCE_NAME "stmmaceth" 26#define STMMAC_RESOURCE_NAME "stmmaceth"
27#define DRV_MODULE_VERSION "March_2012" 27#define DRV_MODULE_VERSION "Nov_2012"
28 28
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/stmmac.h> 30#include <linux/stmmac.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include "common.h" 33#include "common.h"
34#ifdef CONFIG_STMMAC_TIMER
35#include "stmmac_timer.h"
36#endif
37 34
38struct stmmac_priv { 35struct stmmac_priv {
39 /* Frequently used values are kept adjacent for cache effect */ 36 /* Frequently used values are kept adjacent for cache effect */
@@ -77,9 +74,6 @@ struct stmmac_priv {
77 spinlock_t tx_lock; 74 spinlock_t tx_lock;
78 int wolopts; 75 int wolopts;
79 int wol_irq; 76 int wol_irq;
80#ifdef CONFIG_STMMAC_TIMER
81 struct stmmac_timer *tm;
82#endif
83 struct plat_stmmacenet_data *plat; 77 struct plat_stmmacenet_data *plat;
84 struct stmmac_counters mmc; 78 struct stmmac_counters mmc;
85 struct dma_features dma_cap; 79 struct dma_features dma_cap;
@@ -93,6 +87,12 @@ struct stmmac_priv {
93 int eee_enabled; 87 int eee_enabled;
94 int eee_active; 88 int eee_active;
95 int tx_lpi_timer; 89 int tx_lpi_timer;
90 struct timer_list txtimer;
91 u32 tx_count_frames;
92 u32 tx_coal_frames;
93 u32 tx_coal_timer;
94 int use_riwt;
95 u32 rx_riwt;
96}; 96};
97 97
98extern int phyaddr; 98extern int phyaddr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 76fd61aa005f..1372ce210b58 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -76,7 +76,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
76 STMMAC_STAT(rx_missed_cntr), 76 STMMAC_STAT(rx_missed_cntr),
77 STMMAC_STAT(rx_overflow_cntr), 77 STMMAC_STAT(rx_overflow_cntr),
78 STMMAC_STAT(rx_vlan), 78 STMMAC_STAT(rx_vlan),
79 /* Tx/Rx IRQ errors */ 79 /* Tx/Rx IRQ error info */
80 STMMAC_STAT(tx_undeflow_irq), 80 STMMAC_STAT(tx_undeflow_irq),
81 STMMAC_STAT(tx_process_stopped_irq), 81 STMMAC_STAT(tx_process_stopped_irq),
82 STMMAC_STAT(tx_jabber_irq), 82 STMMAC_STAT(tx_jabber_irq),
@@ -86,18 +86,23 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
86 STMMAC_STAT(rx_watchdog_irq), 86 STMMAC_STAT(rx_watchdog_irq),
87 STMMAC_STAT(tx_early_irq), 87 STMMAC_STAT(tx_early_irq),
88 STMMAC_STAT(fatal_bus_error_irq), 88 STMMAC_STAT(fatal_bus_error_irq),
89 /* Extra info */ 89 /* Tx/Rx IRQ Events */
90 STMMAC_STAT(rx_early_irq),
90 STMMAC_STAT(threshold), 91 STMMAC_STAT(threshold),
91 STMMAC_STAT(tx_pkt_n), 92 STMMAC_STAT(tx_pkt_n),
92 STMMAC_STAT(rx_pkt_n), 93 STMMAC_STAT(rx_pkt_n),
93 STMMAC_STAT(poll_n),
94 STMMAC_STAT(sched_timer_n),
95 STMMAC_STAT(normal_irq_n),
96 STMMAC_STAT(normal_irq_n), 94 STMMAC_STAT(normal_irq_n),
95 STMMAC_STAT(rx_normal_irq_n),
96 STMMAC_STAT(napi_poll),
97 STMMAC_STAT(tx_normal_irq_n),
98 STMMAC_STAT(tx_clean),
99 STMMAC_STAT(tx_reset_ic_bit),
100 STMMAC_STAT(irq_receive_pmt_irq_n),
101 /* MMC info */
97 STMMAC_STAT(mmc_tx_irq_n), 102 STMMAC_STAT(mmc_tx_irq_n),
98 STMMAC_STAT(mmc_rx_irq_n), 103 STMMAC_STAT(mmc_rx_irq_n),
99 STMMAC_STAT(mmc_rx_csum_offload_irq_n), 104 STMMAC_STAT(mmc_rx_csum_offload_irq_n),
100 STMMAC_STAT(irq_receive_pmt_irq_n), 105 /* EEE */
101 STMMAC_STAT(irq_tx_path_in_lpi_mode_n), 106 STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
102 STMMAC_STAT(irq_tx_path_exit_lpi_mode_n), 107 STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
103 STMMAC_STAT(irq_rx_path_in_lpi_mode_n), 108 STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
@@ -519,6 +524,87 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
519 return phy_ethtool_set_eee(priv->phydev, edata); 524 return phy_ethtool_set_eee(priv->phydev, edata);
520} 525}
521 526
527static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
528{
529 unsigned long clk = clk_get_rate(priv->stmmac_clk);
530
531 if (!clk)
532 return 0;
533
534 return (usec * (clk / 1000000)) / 256;
535}
536
537static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
538{
539 unsigned long clk = clk_get_rate(priv->stmmac_clk);
540
541 if (!clk)
542 return 0;
543
544 return (riwt * 256) / (clk / 1000000);
545}
546
547static int stmmac_get_coalesce(struct net_device *dev,
548 struct ethtool_coalesce *ec)
549{
550 struct stmmac_priv *priv = netdev_priv(dev);
551
552 ec->tx_coalesce_usecs = priv->tx_coal_timer;
553 ec->tx_max_coalesced_frames = priv->tx_coal_frames;
554
555 if (priv->use_riwt)
556 ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
557
558 return 0;
559}
560
561static int stmmac_set_coalesce(struct net_device *dev,
562 struct ethtool_coalesce *ec)
563{
564 struct stmmac_priv *priv = netdev_priv(dev);
565 unsigned int rx_riwt;
566
567 /* Check not supported parameters */
568 if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
569 (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
570 (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
571 (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
572 (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
573 (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
574 (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
575 (ec->rx_max_coalesced_frames_high) ||
576 (ec->tx_max_coalesced_frames_irq) ||
577 (ec->stats_block_coalesce_usecs) ||
578 (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
579 return -EOPNOTSUPP;
580
581 if (ec->rx_coalesce_usecs == 0)
582 return -EINVAL;
583
584 if ((ec->tx_coalesce_usecs == 0) &&
585 (ec->tx_max_coalesced_frames == 0))
586 return -EINVAL;
587
588 if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) ||
589 (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
590 return -EINVAL;
591
592 rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
593
594 if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
595 return -EINVAL;
596 else if (!priv->use_riwt)
597 return -EOPNOTSUPP;
598
599 /* Only copy relevant parameters, ignore all others. */
600 priv->tx_coal_frames = ec->tx_max_coalesced_frames;
601 priv->tx_coal_timer = ec->tx_coalesce_usecs;
602 priv->rx_riwt = rx_riwt;
603 priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
604
605 return 0;
606}
607
522static const struct ethtool_ops stmmac_ethtool_ops = { 608static const struct ethtool_ops stmmac_ethtool_ops = {
523 .begin = stmmac_check_if_running, 609 .begin = stmmac_check_if_running,
524 .get_drvinfo = stmmac_ethtool_getdrvinfo, 610 .get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -539,6 +625,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
539 .set_eee = stmmac_ethtool_op_set_eee, 625 .set_eee = stmmac_ethtool_op_set_eee,
540 .get_sset_count = stmmac_get_sset_count, 626 .get_sset_count = stmmac_get_sset_count,
541 .get_ts_info = ethtool_op_get_ts_info, 627 .get_ts_info = ethtool_op_get_ts_info,
628 .get_coalesce = stmmac_get_coalesce,
629 .set_coalesce = stmmac_set_coalesce,
542}; 630};
543 631
544void stmmac_set_ethtool_ops(struct net_device *netdev) 632void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c6cdbc4eb05e..542edbcd92c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -115,16 +115,6 @@ static int tc = TC_DEFAULT;
115module_param(tc, int, S_IRUGO | S_IWUSR); 115module_param(tc, int, S_IRUGO | S_IWUSR);
116MODULE_PARM_DESC(tc, "DMA threshold control value"); 116MODULE_PARM_DESC(tc, "DMA threshold control value");
117 117
118/* Pay attention to tune this parameter; take care of both
119 * hardware capability and network stabitily/performance impact.
120 * Many tests showed that ~4ms latency seems to be good enough. */
121#ifdef CONFIG_STMMAC_TIMER
122#define DEFAULT_PERIODIC_RATE 256
123static int tmrate = DEFAULT_PERIODIC_RATE;
124module_param(tmrate, int, S_IRUGO | S_IWUSR);
125MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)");
126#endif
127
128#define DMA_BUFFER_SIZE BUF_SIZE_2KiB 118#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
129static int buf_sz = DMA_BUFFER_SIZE; 119static int buf_sz = DMA_BUFFER_SIZE;
130module_param(buf_sz, int, S_IRUGO | S_IWUSR); 120module_param(buf_sz, int, S_IRUGO | S_IWUSR);
@@ -147,6 +137,8 @@ static int stmmac_init_fs(struct net_device *dev);
147static void stmmac_exit_fs(void); 137static void stmmac_exit_fs(void);
148#endif 138#endif
149 139
140#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
141
150/** 142/**
151 * stmmac_verify_args - verify the driver parameters. 143 * stmmac_verify_args - verify the driver parameters.
152 * Description: it verifies if some wrong parameter is passed to the driver. 144 * Description: it verifies if some wrong parameter is passed to the driver.
@@ -536,12 +528,6 @@ static void init_dma_desc_rings(struct net_device *dev)
536 else 528 else
537 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 529 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
538 530
539#ifdef CONFIG_STMMAC_TIMER
540 /* Disable interrupts on completion for the reception if timer is on */
541 if (likely(priv->tm->enable))
542 dis_ic = 1;
543#endif
544
545 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 531 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
546 txsize, rxsize, bfsize); 532 txsize, rxsize, bfsize);
547 533
@@ -617,6 +603,8 @@ static void init_dma_desc_rings(struct net_device *dev)
617 priv->dirty_tx = 0; 603 priv->dirty_tx = 0;
618 priv->cur_tx = 0; 604 priv->cur_tx = 0;
619 605
606 if (priv->use_riwt)
607 dis_ic = 1;
620 /* Clear the Rx/Tx descriptors */ 608 /* Clear the Rx/Tx descriptors */
621 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic); 609 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
622 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize); 610 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
@@ -704,16 +692,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
704} 692}
705 693
706/** 694/**
707 * stmmac_tx: 695 * stmmac_tx_clean:
708 * @priv: private driver structure 696 * @priv: private data pointer
709 * Description: it reclaims resources after transmission completes. 697 * Description: it reclaims resources after transmission completes.
710 */ 698 */
711static void stmmac_tx(struct stmmac_priv *priv) 699static void stmmac_tx_clean(struct stmmac_priv *priv)
712{ 700{
713 unsigned int txsize = priv->dma_tx_size; 701 unsigned int txsize = priv->dma_tx_size;
714 702
715 spin_lock(&priv->tx_lock); 703 spin_lock(&priv->tx_lock);
716 704
705 priv->xstats.tx_clean++;
706
717 while (priv->dirty_tx != priv->cur_tx) { 707 while (priv->dirty_tx != priv->cur_tx) {
718 int last; 708 int last;
719 unsigned int entry = priv->dirty_tx % txsize; 709 unsigned int entry = priv->dirty_tx % txsize;
@@ -773,69 +763,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
773 spin_unlock(&priv->tx_lock); 763 spin_unlock(&priv->tx_lock);
774} 764}
775 765
776static inline void stmmac_enable_irq(struct stmmac_priv *priv) 766static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
777{
778#ifdef CONFIG_STMMAC_TIMER
779 if (likely(priv->tm->enable))
780 priv->tm->timer_start(tmrate);
781 else
782#endif
783 priv->hw->dma->enable_dma_irq(priv->ioaddr);
784}
785
786static inline void stmmac_disable_irq(struct stmmac_priv *priv)
787{
788#ifdef CONFIG_STMMAC_TIMER
789 if (likely(priv->tm->enable))
790 priv->tm->timer_stop();
791 else
792#endif
793 priv->hw->dma->disable_dma_irq(priv->ioaddr);
794}
795
796static int stmmac_has_work(struct stmmac_priv *priv)
797{
798 unsigned int has_work = 0;
799 int rxret, tx_work = 0;
800
801 rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
802 (priv->cur_rx % priv->dma_rx_size));
803
804 if (priv->dirty_tx != priv->cur_tx)
805 tx_work = 1;
806
807 if (likely(!rxret || tx_work))
808 has_work = 1;
809
810 return has_work;
811}
812
813static inline void _stmmac_schedule(struct stmmac_priv *priv)
814{ 767{
815 if (likely(stmmac_has_work(priv))) { 768 priv->hw->dma->enable_dma_irq(priv->ioaddr);
816 stmmac_disable_irq(priv);
817 napi_schedule(&priv->napi);
818 }
819} 769}
820 770
821#ifdef CONFIG_STMMAC_TIMER 771static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
822void stmmac_schedule(struct net_device *dev)
823{ 772{
824 struct stmmac_priv *priv = netdev_priv(dev); 773 priv->hw->dma->disable_dma_irq(priv->ioaddr);
825
826 priv->xstats.sched_timer_n++;
827
828 _stmmac_schedule(priv);
829} 774}
830 775
831static void stmmac_no_timer_started(unsigned int x)
832{;
833};
834
835static void stmmac_no_timer_stopped(void)
836{;
837};
838#endif
839 776
840/** 777/**
841 * stmmac_tx_err: 778 * stmmac_tx_err:
@@ -858,16 +795,18 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
858 netif_wake_queue(priv->dev); 795 netif_wake_queue(priv->dev);
859} 796}
860 797
861
862static void stmmac_dma_interrupt(struct stmmac_priv *priv) 798static void stmmac_dma_interrupt(struct stmmac_priv *priv)
863{ 799{
864 int status; 800 int status;
865 801
866 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats); 802 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
867 if (likely(status == handle_tx_rx)) 803 if (likely((status & handle_rx)) || (status & handle_tx)) {
868 _stmmac_schedule(priv); 804 if (likely(napi_schedule_prep(&priv->napi))) {
869 805 stmmac_disable_dma_irq(priv);
870 else if (unlikely(status == tx_hard_error_bump_tc)) { 806 __napi_schedule(&priv->napi);
807 }
808 }
809 if (unlikely(status & tx_hard_error_bump_tc)) {
871 /* Try to bump up the dma threshold on this failure */ 810 /* Try to bump up the dma threshold on this failure */
872 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) { 811 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
873 tc += 64; 812 tc += 64;
@@ -983,7 +922,6 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
983 /* Alternate (enhanced) DESC mode*/ 922 /* Alternate (enhanced) DESC mode*/
984 priv->dma_cap.enh_desc = 923 priv->dma_cap.enh_desc =
985 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; 924 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
986
987 } 925 }
988 926
989 return hw_cap; 927 return hw_cap;
@@ -1025,6 +963,38 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1025} 963}
1026 964
1027/** 965/**
966 * stmmac_tx_timer:
967 * @data: data pointer
968 * Description:
969 * This is the timer handler to directly invoke the stmmac_tx_clean.
970 */
971static void stmmac_tx_timer(unsigned long data)
972{
973 struct stmmac_priv *priv = (struct stmmac_priv *)data;
974
975 stmmac_tx_clean(priv);
976}
977
978/**
979 * stmmac_tx_timer:
980 * @priv: private data structure
981 * Description:
982 * This inits the transmit coalesce parameters: i.e. timer rate,
983 * timer handler and default threshold used for enabling the
984 * interrupt on completion bit.
985 */
986static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
987{
988 priv->tx_coal_frames = STMMAC_TX_FRAMES;
989 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
990 init_timer(&priv->txtimer);
991 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
992 priv->txtimer.data = (unsigned long)priv;
993 priv->txtimer.function = stmmac_tx_timer;
994 add_timer(&priv->txtimer);
995}
996
997/**
1028 * stmmac_open - open entry point of the driver 998 * stmmac_open - open entry point of the driver
1029 * @dev : pointer to the device structure. 999 * @dev : pointer to the device structure.
1030 * Description: 1000 * Description:
@@ -1038,23 +1008,6 @@ static int stmmac_open(struct net_device *dev)
1038 struct stmmac_priv *priv = netdev_priv(dev); 1008 struct stmmac_priv *priv = netdev_priv(dev);
1039 int ret; 1009 int ret;
1040 1010
1041#ifdef CONFIG_STMMAC_TIMER
1042 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
1043 if (unlikely(priv->tm == NULL))
1044 return -ENOMEM;
1045
1046 priv->tm->freq = tmrate;
1047
1048 /* Test if the external timer can be actually used.
1049 * In case of failure continue without timer. */
1050 if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
1051 pr_warning("stmmaceth: cannot attach the external timer.\n");
1052 priv->tm->freq = 0;
1053 priv->tm->timer_start = stmmac_no_timer_started;
1054 priv->tm->timer_stop = stmmac_no_timer_stopped;
1055 } else
1056 priv->tm->enable = 1;
1057#endif
1058 clk_prepare_enable(priv->stmmac_clk); 1011 clk_prepare_enable(priv->stmmac_clk);
1059 1012
1060 stmmac_check_ether_addr(priv); 1013 stmmac_check_ether_addr(priv);
@@ -1141,10 +1094,6 @@ static int stmmac_open(struct net_device *dev)
1141 priv->hw->dma->start_tx(priv->ioaddr); 1094 priv->hw->dma->start_tx(priv->ioaddr);
1142 priv->hw->dma->start_rx(priv->ioaddr); 1095 priv->hw->dma->start_rx(priv->ioaddr);
1143 1096
1144#ifdef CONFIG_STMMAC_TIMER
1145 priv->tm->timer_start(tmrate);
1146#endif
1147
1148 /* Dump DMA/MAC registers */ 1097 /* Dump DMA/MAC registers */
1149 if (netif_msg_hw(priv)) { 1098 if (netif_msg_hw(priv)) {
1150 priv->hw->mac->dump_regs(priv->ioaddr); 1099 priv->hw->mac->dump_regs(priv->ioaddr);
@@ -1157,6 +1106,13 @@ static int stmmac_open(struct net_device *dev)
1157 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; 1106 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
1158 priv->eee_enabled = stmmac_eee_init(priv); 1107 priv->eee_enabled = stmmac_eee_init(priv);
1159 1108
1109 stmmac_init_tx_coalesce(priv);
1110
1111 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1112 priv->rx_riwt = MAX_DMA_RIWT;
1113 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1114 }
1115
1160 napi_enable(&priv->napi); 1116 napi_enable(&priv->napi);
1161 netif_start_queue(dev); 1117 netif_start_queue(dev);
1162 1118
@@ -1170,9 +1126,6 @@ open_error_wolirq:
1170 free_irq(dev->irq, dev); 1126 free_irq(dev->irq, dev);
1171 1127
1172open_error: 1128open_error:
1173#ifdef CONFIG_STMMAC_TIMER
1174 kfree(priv->tm);
1175#endif
1176 if (priv->phydev) 1129 if (priv->phydev)
1177 phy_disconnect(priv->phydev); 1130 phy_disconnect(priv->phydev);
1178 1131
@@ -1203,14 +1156,10 @@ static int stmmac_release(struct net_device *dev)
1203 1156
1204 netif_stop_queue(dev); 1157 netif_stop_queue(dev);
1205 1158
1206#ifdef CONFIG_STMMAC_TIMER
1207 /* Stop and release the timer */
1208 stmmac_close_ext_timer();
1209 if (priv->tm != NULL)
1210 kfree(priv->tm);
1211#endif
1212 napi_disable(&priv->napi); 1159 napi_disable(&priv->napi);
1213 1160
1161 del_timer_sync(&priv->txtimer);
1162
1214 /* Free the IRQ lines */ 1163 /* Free the IRQ lines */
1215 free_irq(dev->irq, dev); 1164 free_irq(dev->irq, dev);
1216 if (priv->wol_irq != dev->irq) 1165 if (priv->wol_irq != dev->irq)
@@ -1273,11 +1222,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1273 1222
1274#ifdef STMMAC_XMIT_DEBUG 1223#ifdef STMMAC_XMIT_DEBUG
1275 if ((skb->len > ETH_FRAME_LEN) || nfrags) 1224 if ((skb->len > ETH_FRAME_LEN) || nfrags)
1276 pr_info("stmmac xmit:\n" 1225 pr_debug("stmmac xmit: [entry %d]\n"
1277 "\tskb addr %p - len: %d - nopaged_len: %d\n" 1226 "\tskb addr %p - len: %d - nopaged_len: %d\n"
1278 "\tn_frags: %d - ip_summed: %d - %s gso\n", 1227 "\tn_frags: %d - ip_summed: %d - %s gso\n"
1279 skb, skb->len, nopaged_len, nfrags, skb->ip_summed, 1228 "\ttx_count_frames %d\n", entry,
1280 !skb_is_gso(skb) ? "isn't" : "is"); 1229 skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
1230 !skb_is_gso(skb) ? "isn't" : "is",
1231 priv->tx_count_frames);
1281#endif 1232#endif
1282 1233
1283 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1234 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
@@ -1287,9 +1238,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1287 1238
1288#ifdef STMMAC_XMIT_DEBUG 1239#ifdef STMMAC_XMIT_DEBUG
1289 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) 1240 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
1290 pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" 1241 pr_debug("\tskb len: %d, nopaged_len: %d,\n"
1291 "\t\tn_frags: %d, ip_summed: %d\n", 1242 "\t\tn_frags: %d, ip_summed: %d\n",
1292 skb->len, nopaged_len, nfrags, skb->ip_summed); 1243 skb->len, nopaged_len, nfrags, skb->ip_summed);
1293#endif 1244#endif
1294 priv->tx_skbuff[entry] = skb; 1245 priv->tx_skbuff[entry] = skb;
1295 1246
@@ -1320,16 +1271,24 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1320 wmb(); 1271 wmb();
1321 } 1272 }
1322 1273
1323 /* Interrupt on completition only for the latest segment */ 1274 /* Finalize the latest segment. */
1324 priv->hw->desc->close_tx_desc(desc); 1275 priv->hw->desc->close_tx_desc(desc);
1325 1276
1326#ifdef CONFIG_STMMAC_TIMER
1327 /* Clean IC while using timer */
1328 if (likely(priv->tm->enable))
1329 priv->hw->desc->clear_tx_ic(desc);
1330#endif
1331
1332 wmb(); 1277 wmb();
1278 /* According to the coalesce parameter the IC bit for the latest
1279 * segment could be reset and the timer re-started to invoke the
1280 * stmmac_tx function. This approach takes care about the fragments.
1281 */
1282 priv->tx_count_frames += nfrags + 1;
1283 if (priv->tx_coal_frames > priv->tx_count_frames) {
1284 priv->hw->desc->clear_tx_ic(desc);
1285 priv->xstats.tx_reset_ic_bit++;
1286 TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
1287 priv->tx_count_frames);
1288 mod_timer(&priv->txtimer,
1289 STMMAC_COAL_TIMER(priv->tx_coal_timer));
1290 } else
1291 priv->tx_count_frames = 0;
1333 1292
1334 /* To avoid raise condition */ 1293 /* To avoid raise condition */
1335 priv->hw->desc->set_tx_owner(first); 1294 priv->hw->desc->set_tx_owner(first);
@@ -1471,14 +1430,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1471#endif 1430#endif
1472 skb->protocol = eth_type_trans(skb, priv->dev); 1431 skb->protocol = eth_type_trans(skb, priv->dev);
1473 1432
1474 if (unlikely(!priv->plat->rx_coe)) { 1433 if (unlikely(!priv->plat->rx_coe))
1475 /* No RX COE for old mac10/100 devices */
1476 skb_checksum_none_assert(skb); 1434 skb_checksum_none_assert(skb);
1477 netif_receive_skb(skb); 1435 else
1478 } else {
1479 skb->ip_summed = CHECKSUM_UNNECESSARY; 1436 skb->ip_summed = CHECKSUM_UNNECESSARY;
1480 napi_gro_receive(&priv->napi, skb); 1437
1481 } 1438 napi_gro_receive(&priv->napi, skb);
1482 1439
1483 priv->dev->stats.rx_packets++; 1440 priv->dev->stats.rx_packets++;
1484 priv->dev->stats.rx_bytes += frame_len; 1441 priv->dev->stats.rx_bytes += frame_len;
@@ -1500,21 +1457,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1500 * @budget : maximum number of packets that the current CPU can receive from 1457 * @budget : maximum number of packets that the current CPU can receive from
1501 * all interfaces. 1458 * all interfaces.
1502 * Description : 1459 * Description :
1503 * This function implements the the reception process. 1460 * To look at the incoming frames and clear the tx resources.
1504 * Also it runs the TX completion thread
1505 */ 1461 */
1506static int stmmac_poll(struct napi_struct *napi, int budget) 1462static int stmmac_poll(struct napi_struct *napi, int budget)
1507{ 1463{
1508 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); 1464 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
1509 int work_done = 0; 1465 int work_done = 0;
1510 1466
1511 priv->xstats.poll_n++; 1467 priv->xstats.napi_poll++;
1512 stmmac_tx(priv); 1468 stmmac_tx_clean(priv);
1513 work_done = stmmac_rx(priv, budget);
1514 1469
1470 work_done = stmmac_rx(priv, budget);
1515 if (work_done < budget) { 1471 if (work_done < budget) {
1516 napi_complete(napi); 1472 napi_complete(napi);
1517 stmmac_enable_irq(priv); 1473 stmmac_enable_dma_irq(priv);
1518 } 1474 }
1519 return work_done; 1475 return work_done;
1520} 1476}
@@ -1523,7 +1479,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
1523 * stmmac_tx_timeout 1479 * stmmac_tx_timeout
1524 * @dev : Pointer to net device structure 1480 * @dev : Pointer to net device structure
1525 * Description: this function is called when a packet transmission fails to 1481 * Description: this function is called when a packet transmission fails to
1526 * complete within a reasonable tmrate. The driver will mark the error in the 1482 * complete within a reasonable time. The driver will mark the error in the
1527 * netdev structure and arrange for the device to be reset to a sane state 1483 * netdev structure and arrange for the device to be reset to a sane state
1528 * in order to transmit a new packet. 1484 * in order to transmit a new packet.
1529 */ 1485 */
@@ -2050,6 +2006,16 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2050 if (flow_ctrl) 2006 if (flow_ctrl)
2051 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ 2007 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
2052 2008
2009 /* Rx Watchdog is available in the COREs newer than the 3.40.
2010 * In some case, for example on bugged HW this feature
2011 * has to be disable and this can be done by passing the
2012 * riwt_off field from the platform.
2013 */
2014 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2015 priv->use_riwt = 1;
2016 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2017 }
2018
2053 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64); 2019 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
2054 2020
2055 spin_lock_init(&priv->lock); 2021 spin_lock_init(&priv->lock);
@@ -2141,11 +2107,9 @@ int stmmac_suspend(struct net_device *ndev)
2141 netif_device_detach(ndev); 2107 netif_device_detach(ndev);
2142 netif_stop_queue(ndev); 2108 netif_stop_queue(ndev);
2143 2109
2144#ifdef CONFIG_STMMAC_TIMER 2110 if (priv->use_riwt)
2145 priv->tm->timer_stop();
2146 if (likely(priv->tm->enable))
2147 dis_ic = 1; 2111 dis_ic = 1;
2148#endif 2112
2149 napi_disable(&priv->napi); 2113 napi_disable(&priv->napi);
2150 2114
2151 /* Stop TX/RX DMA */ 2115 /* Stop TX/RX DMA */
@@ -2196,10 +2160,6 @@ int stmmac_resume(struct net_device *ndev)
2196 priv->hw->dma->start_tx(priv->ioaddr); 2160 priv->hw->dma->start_tx(priv->ioaddr);
2197 priv->hw->dma->start_rx(priv->ioaddr); 2161 priv->hw->dma->start_rx(priv->ioaddr);
2198 2162
2199#ifdef CONFIG_STMMAC_TIMER
2200 if (likely(priv->tm->enable))
2201 priv->tm->timer_start(tmrate);
2202#endif
2203 napi_enable(&priv->napi); 2163 napi_enable(&priv->napi);
2204 2164
2205 netif_start_queue(ndev); 2165 netif_start_queue(ndev);
@@ -2295,11 +2255,6 @@ static int __init stmmac_cmdline_opt(char *str)
2295 } else if (!strncmp(opt, "eee_timer:", 6)) { 2255 } else if (!strncmp(opt, "eee_timer:", 6)) {
2296 if (kstrtoint(opt + 10, 0, &eee_timer)) 2256 if (kstrtoint(opt + 10, 0, &eee_timer))
2297 goto err; 2257 goto err;
2298#ifdef CONFIG_STMMAC_TIMER
2299 } else if (!strncmp(opt, "tmrate:", 7)) {
2300 if (kstrtoint(opt + 7, 0, &tmrate))
2301 goto err;
2302#endif
2303 } 2258 }
2304 } 2259 }
2305 return 0; 2260 return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 1f069b0f6af5..064eaac9616f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -61,8 +61,8 @@ static void stmmac_default_data(void)
61 * matches the device. The probe functions returns zero when the driver choose 61 * matches the device. The probe functions returns zero when the driver choose
62 * to take "ownership" of the device or an error code(-ve no) otherwise. 62 * to take "ownership" of the device or an error code(-ve no) otherwise.
63 */ 63 */
64static int __devinit stmmac_pci_probe(struct pci_dev *pdev, 64static int stmmac_pci_probe(struct pci_dev *pdev,
65 const struct pci_device_id *id) 65 const struct pci_device_id *id)
66{ 66{
67 int ret = 0; 67 int ret = 0;
68 void __iomem *addr = NULL; 68 void __iomem *addr = NULL;
@@ -130,7 +130,7 @@ err_out_req_reg_failed:
130 * Description: this function calls the main to free the net resources 130 * Description: this function calls the main to free the net resources
131 * and releases the PCI resources. 131 * and releases the PCI resources.
132 */ 132 */
133static void __devexit stmmac_pci_remove(struct pci_dev *pdev) 133static void stmmac_pci_remove(struct pci_dev *pdev)
134{ 134{
135 struct net_device *ndev = pci_get_drvdata(pdev); 135 struct net_device *ndev = pci_get_drvdata(pdev);
136 struct stmmac_priv *priv = netdev_priv(ndev); 136 struct stmmac_priv *priv = netdev_priv(ndev);
@@ -182,7 +182,7 @@ struct pci_driver stmmac_pci_driver = {
182 .name = STMMAC_RESOURCE_NAME, 182 .name = STMMAC_RESOURCE_NAME,
183 .id_table = stmmac_id_table, 183 .id_table = stmmac_id_table,
184 .probe = stmmac_pci_probe, 184 .probe = stmmac_pci_probe,
185 .remove = __devexit_p(stmmac_pci_remove), 185 .remove = stmmac_pci_remove,
186#ifdef CONFIG_PM 186#ifdef CONFIG_PM
187 .suspend = stmmac_pci_suspend, 187 .suspend = stmmac_pci_suspend,
188 .resume = stmmac_pci_resume, 188 .resume = stmmac_pci_resume,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ed112b55ae7f..b43d68b40e50 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -29,9 +29,9 @@
29#include "stmmac.h" 29#include "stmmac.h"
30 30
31#ifdef CONFIG_OF 31#ifdef CONFIG_OF
32static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, 32static int stmmac_probe_config_dt(struct platform_device *pdev,
33 struct plat_stmmacenet_data *plat, 33 struct plat_stmmacenet_data *plat,
34 const char **mac) 34 const char **mac)
35{ 35{
36 struct device_node *np = pdev->dev.of_node; 36 struct device_node *np = pdev->dev.of_node;
37 37
@@ -59,9 +59,9 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
59 return 0; 59 return 0;
60} 60}
61#else 61#else
62static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, 62static int stmmac_probe_config_dt(struct platform_device *pdev,
63 struct plat_stmmacenet_data *plat, 63 struct plat_stmmacenet_data *plat,
64 const char **mac) 64 const char **mac)
65{ 65{
66 return -ENOSYS; 66 return -ENOSYS;
67} 67}
@@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
74 * the necessary resources and invokes the main to init 74 * the necessary resources and invokes the main to init
75 * the net device, register the mdio bus etc. 75 * the net device, register the mdio bus etc.
76 */ 76 */
77static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) 77static int stmmac_pltfr_probe(struct platform_device *pdev)
78{ 78{
79 int ret = 0; 79 int ret = 0;
80 struct resource *res; 80 struct resource *res;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
deleted file mode 100644
index 4ccd4e2977b7..000000000000
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*******************************************************************************
2 STMMAC external timer support.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include <linux/kernel.h>
26#include <linux/etherdevice.h>
27#include "stmmac_timer.h"
28
29static void stmmac_timer_handler(void *data)
30{
31 struct net_device *dev = (struct net_device *)data;
32
33 stmmac_schedule(dev);
34}
35
36#define STMMAC_TIMER_MSG(timer, freq) \
37printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq);
38
39#if defined(CONFIG_STMMAC_RTC_TIMER)
40#include <linux/rtc.h>
41static struct rtc_device *stmmac_rtc;
42static rtc_task_t stmmac_task;
43
44static void stmmac_rtc_start(unsigned int new_freq)
45{
46 rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
47 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
48}
49
50static void stmmac_rtc_stop(void)
51{
52 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
53}
54
55int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
56{
57 stmmac_task.private_data = dev;
58 stmmac_task.func = stmmac_timer_handler;
59
60 stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
61 if (stmmac_rtc == NULL) {
62 pr_err("open rtc device failed\n");
63 return -ENODEV;
64 }
65
66 rtc_irq_register(stmmac_rtc, &stmmac_task);
67
68 /* Periodic mode is not supported */
69 if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
70 pr_err("set periodic failed\n");
71 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
72 rtc_class_close(stmmac_rtc);
73 return -1;
74 }
75
76 STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq);
77
78 tm->timer_start = stmmac_rtc_start;
79 tm->timer_stop = stmmac_rtc_stop;
80
81 return 0;
82}
83
84int stmmac_close_ext_timer(void)
85{
86 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
87 rtc_irq_unregister(stmmac_rtc, &stmmac_task);
88 rtc_class_close(stmmac_rtc);
89 return 0;
90}
91
92#elif defined(CONFIG_STMMAC_TMU_TIMER)
93#include <linux/clk.h>
94#define TMU_CHANNEL "tmu2_clk"
95static struct clk *timer_clock;
96
97static void stmmac_tmu_start(unsigned int new_freq)
98{
99 clk_set_rate(timer_clock, new_freq);
100 clk_prepare_enable(timer_clock);
101}
102
103static void stmmac_tmu_stop(void)
104{
105 clk_disable_unprepare(timer_clock);
106}
107
108int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
109{
110 timer_clock = clk_get(NULL, TMU_CHANNEL);
111
112 if (IS_ERR(timer_clock))
113 return -1;
114
115 if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) {
116 timer_clock = NULL;
117 return -1;
118 }
119
120 STMMAC_TIMER_MSG("TMU2", tm->freq);
121 tm->timer_start = stmmac_tmu_start;
122 tm->timer_stop = stmmac_tmu_stop;
123
124 return 0;
125}
126
127int stmmac_close_ext_timer(void)
128{
129 clk_disable_unprepare(timer_clock);
130 tmu2_unregister_user();
131 clk_put(timer_clock);
132 return 0;
133}
134#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
deleted file mode 100644
index aea9b14cdfbe..000000000000
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
+++ /dev/null
@@ -1,46 +0,0 @@
1/*******************************************************************************
2 STMMAC external timer Header File.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24#ifndef __STMMAC_TIMER_H__
25#define __STMMAC_TIMER_H__
26
27struct stmmac_timer {
28 void (*timer_start) (unsigned int new_freq);
29 void (*timer_stop) (void);
30 unsigned int freq;
31 unsigned int enable;
32};
33
34/* Open the HW timer device and return 0 in case of success */
35int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm);
36/* Stop the timer and release it */
37int stmmac_close_ext_timer(void);
38/* Function used for scheduling task within the stmmac */
39void stmmac_schedule(struct net_device *dev);
40
41#if defined(CONFIG_STMMAC_TMU_TIMER)
42extern int tmu2_register_user(void *fnt, void *data);
43extern void tmu2_unregister_user(void);
44#endif
45
46#endif /* __STMMAC_TIMER_H__ */
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index c8251be104d6..4c682a3d0424 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -185,7 +185,7 @@
185#define CAS_RESET_SPARE 3 185#define CAS_RESET_SPARE 3
186#endif 186#endif
187 187
188static char version[] __devinitdata = 188static char version[] =
189 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 189 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
190 190
191static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ 191static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
@@ -222,7 +222,7 @@ static int link_transition_timeout;
222 222
223 223
224 224
225static u16 link_modes[] __devinitdata = { 225static u16 link_modes[] = {
226 BMCR_ANENABLE, /* 0 : autoneg */ 226 BMCR_ANENABLE, /* 0 : autoneg */
227 0, /* 1 : 10bt half duplex */ 227 0, /* 1 : 10bt half duplex */
228 BMCR_SPEED100, /* 2 : 100bt half duplex */ 228 BMCR_SPEED100, /* 2 : 100bt half duplex */
@@ -4820,7 +4820,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4820 * only subordinate device and we can tweak the bridge settings to 4820 * only subordinate device and we can tweak the bridge settings to
4821 * reflect that fact. 4821 * reflect that fact.
4822 */ 4822 */
4823static void __devinit cas_program_bridge(struct pci_dev *cas_pdev) 4823static void cas_program_bridge(struct pci_dev *cas_pdev)
4824{ 4824{
4825 struct pci_dev *pdev = cas_pdev->bus->self; 4825 struct pci_dev *pdev = cas_pdev->bus->self;
4826 u32 val; 4826 u32 val;
@@ -4916,8 +4916,7 @@ static const struct net_device_ops cas_netdev_ops = {
4916#endif 4916#endif
4917}; 4917};
4918 4918
4919static int __devinit cas_init_one(struct pci_dev *pdev, 4919static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4920 const struct pci_device_id *ent)
4921{ 4920{
4922 static int cas_version_printed = 0; 4921 static int cas_version_printed = 0;
4923 unsigned long casreg_len; 4922 unsigned long casreg_len;
@@ -5175,7 +5174,7 @@ err_out_disable_pdev:
5175 return -ENODEV; 5174 return -ENODEV;
5176} 5175}
5177 5176
5178static void __devexit cas_remove_one(struct pci_dev *pdev) 5177static void cas_remove_one(struct pci_dev *pdev)
5179{ 5178{
5180 struct net_device *dev = pci_get_drvdata(pdev); 5179 struct net_device *dev = pci_get_drvdata(pdev);
5181 struct cas *cp; 5180 struct cas *cp;
@@ -5273,7 +5272,7 @@ static struct pci_driver cas_driver = {
5273 .name = DRV_MODULE_NAME, 5272 .name = DRV_MODULE_NAME,
5274 .id_table = cas_pci_tbl, 5273 .id_table = cas_pci_tbl,
5275 .probe = cas_init_one, 5274 .probe = cas_init_one,
5276 .remove = __devexit_p(cas_remove_one), 5275 .remove = cas_remove_one,
5277#ifdef CONFIG_PM 5276#ifdef CONFIG_PM
5278 .suspend = cas_suspend, 5277 .suspend = cas_suspend,
5279 .resume = cas_resume 5278 .resume = cas_resume
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 275b430aeb75..a0bdf0779466 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -38,7 +38,7 @@
38#define DRV_MODULE_VERSION "1.1" 38#define DRV_MODULE_VERSION "1.1"
39#define DRV_MODULE_RELDATE "Apr 22, 2010" 39#define DRV_MODULE_RELDATE "Apr 22, 2010"
40 40
41static char version[] __devinitdata = 41static char version[] =
42 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 42 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43 43
44MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 44MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
@@ -7977,7 +7977,7 @@ static int niu_set_ldg_sid(struct niu *np, int ldg, int func, int vector)
7977 return 0; 7977 return 0;
7978} 7978}
7979 7979
7980static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr) 7980static int niu_pci_eeprom_read(struct niu *np, u32 addr)
7981{ 7981{
7982 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START | 7982 u64 frame, frame_base = (ESPC_PIO_STAT_READ_START |
7983 (addr << ESPC_PIO_STAT_ADDR_SHIFT)); 7983 (addr << ESPC_PIO_STAT_ADDR_SHIFT));
@@ -8020,7 +8020,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
8020 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT; 8020 return (frame & ESPC_PIO_STAT_DATA) >> ESPC_PIO_STAT_DATA_SHIFT;
8021} 8021}
8022 8022
8023static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off) 8023static int niu_pci_eeprom_read16(struct niu *np, u32 off)
8024{ 8024{
8025 int err = niu_pci_eeprom_read(np, off); 8025 int err = niu_pci_eeprom_read(np, off);
8026 u16 val; 8026 u16 val;
@@ -8036,7 +8036,7 @@ static int __devinit niu_pci_eeprom_read16(struct niu *np, u32 off)
8036 return val; 8036 return val;
8037} 8037}
8038 8038
8039static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off) 8039static int niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
8040{ 8040{
8041 int err = niu_pci_eeprom_read(np, off); 8041 int err = niu_pci_eeprom_read(np, off);
8042 u16 val; 8042 u16 val;
@@ -8054,10 +8054,8 @@ static int __devinit niu_pci_eeprom_read16_swp(struct niu *np, u32 off)
8054 return val; 8054 return val;
8055} 8055}
8056 8056
8057static int __devinit niu_pci_vpd_get_propname(struct niu *np, 8057static int niu_pci_vpd_get_propname(struct niu *np, u32 off, char *namebuf,
8058 u32 off, 8058 int namebuf_len)
8059 char *namebuf,
8060 int namebuf_len)
8061{ 8059{
8062 int i; 8060 int i;
8063 8061
@@ -8075,7 +8073,7 @@ static int __devinit niu_pci_vpd_get_propname(struct niu *np,
8075 return i + 1; 8073 return i + 1;
8076} 8074}
8077 8075
8078static void __devinit niu_vpd_parse_version(struct niu *np) 8076static void niu_vpd_parse_version(struct niu *np)
8079{ 8077{
8080 struct niu_vpd *vpd = &np->vpd; 8078 struct niu_vpd *vpd = &np->vpd;
8081 int len = strlen(vpd->version) + 1; 8079 int len = strlen(vpd->version) + 1;
@@ -8102,8 +8100,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
8102} 8100}
8103 8101
8104/* ESPC_PIO_EN_ENABLE must be set */ 8102/* ESPC_PIO_EN_ENABLE must be set */
8105static int __devinit niu_pci_vpd_scan_props(struct niu *np, 8103static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
8106 u32 start, u32 end)
8107{ 8104{
8108 unsigned int found_mask = 0; 8105 unsigned int found_mask = 0;
8109#define FOUND_MASK_MODEL 0x00000001 8106#define FOUND_MASK_MODEL 0x00000001
@@ -8189,7 +8186,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
8189} 8186}
8190 8187
8191/* ESPC_PIO_EN_ENABLE must be set */ 8188/* ESPC_PIO_EN_ENABLE must be set */
8192static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start) 8189static void niu_pci_vpd_fetch(struct niu *np, u32 start)
8193{ 8190{
8194 u32 offset; 8191 u32 offset;
8195 int err; 8192 int err;
@@ -8224,7 +8221,7 @@ static void __devinit niu_pci_vpd_fetch(struct niu *np, u32 start)
8224} 8221}
8225 8222
8226/* ESPC_PIO_EN_ENABLE must be set */ 8223/* ESPC_PIO_EN_ENABLE must be set */
8227static u32 __devinit niu_pci_vpd_offset(struct niu *np) 8224static u32 niu_pci_vpd_offset(struct niu *np)
8228{ 8225{
8229 u32 start = 0, end = ESPC_EEPROM_SIZE, ret; 8226 u32 start = 0, end = ESPC_EEPROM_SIZE, ret;
8230 int err; 8227 int err;
@@ -8279,8 +8276,7 @@ static u32 __devinit niu_pci_vpd_offset(struct niu *np)
8279 return 0; 8276 return 0;
8280} 8277}
8281 8278
8282static int __devinit niu_phy_type_prop_decode(struct niu *np, 8279static int niu_phy_type_prop_decode(struct niu *np, const char *phy_prop)
8283 const char *phy_prop)
8284{ 8280{
8285 if (!strcmp(phy_prop, "mif")) { 8281 if (!strcmp(phy_prop, "mif")) {
8286 /* 1G copper, MII */ 8282 /* 1G copper, MII */
@@ -8334,7 +8330,7 @@ static int niu_pci_vpd_get_nports(struct niu *np)
8334 return ports; 8330 return ports;
8335} 8331}
8336 8332
8337static void __devinit niu_pci_vpd_validate(struct niu *np) 8333static void niu_pci_vpd_validate(struct niu *np)
8338{ 8334{
8339 struct net_device *dev = np->dev; 8335 struct net_device *dev = np->dev;
8340 struct niu_vpd *vpd = &np->vpd; 8336 struct niu_vpd *vpd = &np->vpd;
@@ -8380,7 +8376,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
8380 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len); 8376 memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
8381} 8377}
8382 8378
8383static int __devinit niu_pci_probe_sprom(struct niu *np) 8379static int niu_pci_probe_sprom(struct niu *np)
8384{ 8380{
8385 struct net_device *dev = np->dev; 8381 struct net_device *dev = np->dev;
8386 int len, i; 8382 int len, i;
@@ -8538,7 +8534,7 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
8538 return 0; 8534 return 0;
8539} 8535}
8540 8536
8541static int __devinit niu_get_and_validate_port(struct niu *np) 8537static int niu_get_and_validate_port(struct niu *np)
8542{ 8538{
8543 struct niu_parent *parent = np->parent; 8539 struct niu_parent *parent = np->parent;
8544 8540
@@ -8572,10 +8568,8 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
8572 return 0; 8568 return 0;
8573} 8569}
8574 8570
8575static int __devinit phy_record(struct niu_parent *parent, 8571static int phy_record(struct niu_parent *parent, struct phy_probe_info *p,
8576 struct phy_probe_info *p, 8572 int dev_id_1, int dev_id_2, u8 phy_port, int type)
8577 int dev_id_1, int dev_id_2, u8 phy_port,
8578 int type)
8579{ 8573{
8580 u32 id = (dev_id_1 << 16) | dev_id_2; 8574 u32 id = (dev_id_1 << 16) | dev_id_2;
8581 u8 idx; 8575 u8 idx;
@@ -8611,7 +8605,7 @@ static int __devinit phy_record(struct niu_parent *parent,
8611 return 0; 8605 return 0;
8612} 8606}
8613 8607
8614static int __devinit port_has_10g(struct phy_probe_info *p, int port) 8608static int port_has_10g(struct phy_probe_info *p, int port)
8615{ 8609{
8616 int i; 8610 int i;
8617 8611
@@ -8627,7 +8621,7 @@ static int __devinit port_has_10g(struct phy_probe_info *p, int port)
8627 return 0; 8621 return 0;
8628} 8622}
8629 8623
8630static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest) 8624static int count_10g_ports(struct phy_probe_info *p, int *lowest)
8631{ 8625{
8632 int port, cnt; 8626 int port, cnt;
8633 8627
@@ -8644,7 +8638,7 @@ static int __devinit count_10g_ports(struct phy_probe_info *p, int *lowest)
8644 return cnt; 8638 return cnt;
8645} 8639}
8646 8640
8647static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest) 8641static int count_1g_ports(struct phy_probe_info *p, int *lowest)
8648{ 8642{
8649 *lowest = 32; 8643 *lowest = 32;
8650 if (p->cur[PHY_TYPE_MII]) 8644 if (p->cur[PHY_TYPE_MII])
@@ -8653,7 +8647,7 @@ static int __devinit count_1g_ports(struct phy_probe_info *p, int *lowest)
8653 return p->cur[PHY_TYPE_MII]; 8647 return p->cur[PHY_TYPE_MII];
8654} 8648}
8655 8649
8656static void __devinit niu_n2_divide_channels(struct niu_parent *parent) 8650static void niu_n2_divide_channels(struct niu_parent *parent)
8657{ 8651{
8658 int num_ports = parent->num_ports; 8652 int num_ports = parent->num_ports;
8659 int i; 8653 int i;
@@ -8669,8 +8663,8 @@ static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
8669 } 8663 }
8670} 8664}
8671 8665
8672static void __devinit niu_divide_channels(struct niu_parent *parent, 8666static void niu_divide_channels(struct niu_parent *parent,
8673 int num_10g, int num_1g) 8667 int num_10g, int num_1g)
8674{ 8668{
8675 int num_ports = parent->num_ports; 8669 int num_ports = parent->num_ports;
8676 int rx_chans_per_10g, rx_chans_per_1g; 8670 int rx_chans_per_10g, rx_chans_per_1g;
@@ -8731,8 +8725,8 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
8731 } 8725 }
8732} 8726}
8733 8727
8734static void __devinit niu_divide_rdc_groups(struct niu_parent *parent, 8728static void niu_divide_rdc_groups(struct niu_parent *parent,
8735 int num_10g, int num_1g) 8729 int num_10g, int num_1g)
8736{ 8730{
8737 int i, num_ports = parent->num_ports; 8731 int i, num_ports = parent->num_ports;
8738 int rdc_group, rdc_groups_per_port; 8732 int rdc_group, rdc_groups_per_port;
@@ -8776,9 +8770,8 @@ static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
8776 } 8770 }
8777} 8771}
8778 8772
8779static int __devinit fill_phy_probe_info(struct niu *np, 8773static int fill_phy_probe_info(struct niu *np, struct niu_parent *parent,
8780 struct niu_parent *parent, 8774 struct phy_probe_info *info)
8781 struct phy_probe_info *info)
8782{ 8775{
8783 unsigned long flags; 8776 unsigned long flags;
8784 int port, err; 8777 int port, err;
@@ -8819,7 +8812,7 @@ static int __devinit fill_phy_probe_info(struct niu *np,
8819 return err; 8812 return err;
8820} 8813}
8821 8814
8822static int __devinit walk_phys(struct niu *np, struct niu_parent *parent) 8815static int walk_phys(struct niu *np, struct niu_parent *parent)
8823{ 8816{
8824 struct phy_probe_info *info = &parent->phy_probe_info; 8817 struct phy_probe_info *info = &parent->phy_probe_info;
8825 int lowest_10g, lowest_1g; 8818 int lowest_10g, lowest_1g;
@@ -8948,7 +8941,7 @@ unknown_vg_1g_port:
8948 return -EINVAL; 8941 return -EINVAL;
8949} 8942}
8950 8943
8951static int __devinit niu_probe_ports(struct niu *np) 8944static int niu_probe_ports(struct niu *np)
8952{ 8945{
8953 struct niu_parent *parent = np->parent; 8946 struct niu_parent *parent = np->parent;
8954 int err, i; 8947 int err, i;
@@ -8969,7 +8962,7 @@ static int __devinit niu_probe_ports(struct niu *np)
8969 return 0; 8962 return 0;
8970} 8963}
8971 8964
8972static int __devinit niu_classifier_swstate_init(struct niu *np) 8965static int niu_classifier_swstate_init(struct niu *np)
8973{ 8966{
8974 struct niu_classifier *cp = &np->clas; 8967 struct niu_classifier *cp = &np->clas;
8975 8968
@@ -8981,7 +8974,7 @@ static int __devinit niu_classifier_swstate_init(struct niu *np)
8981 return fflp_early_init(np); 8974 return fflp_early_init(np);
8982} 8975}
8983 8976
8984static void __devinit niu_link_config_init(struct niu *np) 8977static void niu_link_config_init(struct niu *np)
8985{ 8978{
8986 struct niu_link_config *lp = &np->link_config; 8979 struct niu_link_config *lp = &np->link_config;
8987 8980
@@ -9006,7 +8999,7 @@ static void __devinit niu_link_config_init(struct niu *np)
9006#endif 8999#endif
9007} 9000}
9008 9001
9009static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np) 9002static int niu_init_mac_ipp_pcs_base(struct niu *np)
9010{ 9003{
9011 switch (np->port) { 9004 switch (np->port) {
9012 case 0: 9005 case 0:
@@ -9045,7 +9038,7 @@ static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
9045 return 0; 9038 return 0;
9046} 9039}
9047 9040
9048static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map) 9041static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9049{ 9042{
9050 struct msix_entry msi_vec[NIU_NUM_LDG]; 9043 struct msix_entry msi_vec[NIU_NUM_LDG];
9051 struct niu_parent *parent = np->parent; 9044 struct niu_parent *parent = np->parent;
@@ -9084,7 +9077,7 @@ retry:
9084 np->num_ldg = num_irqs; 9077 np->num_ldg = num_irqs;
9085} 9078}
9086 9079
9087static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) 9080static int niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9088{ 9081{
9089#ifdef CONFIG_SPARC64 9082#ifdef CONFIG_SPARC64
9090 struct platform_device *op = np->op; 9083 struct platform_device *op = np->op;
@@ -9108,7 +9101,7 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map)
9108#endif 9101#endif
9109} 9102}
9110 9103
9111static int __devinit niu_ldg_init(struct niu *np) 9104static int niu_ldg_init(struct niu *np)
9112{ 9105{
9113 struct niu_parent *parent = np->parent; 9106 struct niu_parent *parent = np->parent;
9114 u8 ldg_num_map[NIU_NUM_LDG]; 9107 u8 ldg_num_map[NIU_NUM_LDG];
@@ -9225,13 +9218,13 @@ static int __devinit niu_ldg_init(struct niu *np)
9225 return 0; 9218 return 0;
9226} 9219}
9227 9220
9228static void __devexit niu_ldg_free(struct niu *np) 9221static void niu_ldg_free(struct niu *np)
9229{ 9222{
9230 if (np->flags & NIU_FLAGS_MSIX) 9223 if (np->flags & NIU_FLAGS_MSIX)
9231 pci_disable_msix(np->pdev); 9224 pci_disable_msix(np->pdev);
9232} 9225}
9233 9226
9234static int __devinit niu_get_of_props(struct niu *np) 9227static int niu_get_of_props(struct niu *np)
9235{ 9228{
9236#ifdef CONFIG_SPARC64 9229#ifdef CONFIG_SPARC64
9237 struct net_device *dev = np->dev; 9230 struct net_device *dev = np->dev;
@@ -9300,7 +9293,7 @@ static int __devinit niu_get_of_props(struct niu *np)
9300#endif 9293#endif
9301} 9294}
9302 9295
9303static int __devinit niu_get_invariants(struct niu *np) 9296static int niu_get_invariants(struct niu *np)
9304{ 9297{
9305 int err, have_props; 9298 int err, have_props;
9306 u32 offset; 9299 u32 offset;
@@ -9479,9 +9472,8 @@ static struct device_attribute niu_parent_attributes[] = {
9479 {} 9472 {}
9480}; 9473};
9481 9474
9482static struct niu_parent * __devinit niu_new_parent(struct niu *np, 9475static struct niu_parent *niu_new_parent(struct niu *np,
9483 union niu_parent_id *id, 9476 union niu_parent_id *id, u8 ptype)
9484 u8 ptype)
9485{ 9477{
9486 struct platform_device *plat_dev; 9478 struct platform_device *plat_dev;
9487 struct niu_parent *p; 9479 struct niu_parent *p;
@@ -9544,9 +9536,8 @@ fail_unregister:
9544 return NULL; 9536 return NULL;
9545} 9537}
9546 9538
9547static struct niu_parent * __devinit niu_get_parent(struct niu *np, 9539static struct niu_parent *niu_get_parent(struct niu *np,
9548 union niu_parent_id *id, 9540 union niu_parent_id *id, u8 ptype)
9549 u8 ptype)
9550{ 9541{
9551 struct niu_parent *p, *tmp; 9542 struct niu_parent *p, *tmp;
9552 int port = np->port; 9543 int port = np->port;
@@ -9662,7 +9653,7 @@ static const struct niu_ops niu_pci_ops = {
9662 .unmap_single = niu_pci_unmap_single, 9653 .unmap_single = niu_pci_unmap_single,
9663}; 9654};
9664 9655
9665static void __devinit niu_driver_version(void) 9656static void niu_driver_version(void)
9666{ 9657{
9667 static int niu_version_printed; 9658 static int niu_version_printed;
9668 9659
@@ -9670,10 +9661,10 @@ static void __devinit niu_driver_version(void)
9670 pr_info("%s", version); 9661 pr_info("%s", version);
9671} 9662}
9672 9663
9673static struct net_device * __devinit niu_alloc_and_init( 9664static struct net_device *niu_alloc_and_init(struct device *gen_dev,
9674 struct device *gen_dev, struct pci_dev *pdev, 9665 struct pci_dev *pdev,
9675 struct platform_device *op, const struct niu_ops *ops, 9666 struct platform_device *op,
9676 u8 port) 9667 const struct niu_ops *ops, u8 port)
9677{ 9668{
9678 struct net_device *dev; 9669 struct net_device *dev;
9679 struct niu *np; 9670 struct niu *np;
@@ -9714,14 +9705,14 @@ static const struct net_device_ops niu_netdev_ops = {
9714 .ndo_change_mtu = niu_change_mtu, 9705 .ndo_change_mtu = niu_change_mtu,
9715}; 9706};
9716 9707
9717static void __devinit niu_assign_netdev_ops(struct net_device *dev) 9708static void niu_assign_netdev_ops(struct net_device *dev)
9718{ 9709{
9719 dev->netdev_ops = &niu_netdev_ops; 9710 dev->netdev_ops = &niu_netdev_ops;
9720 dev->ethtool_ops = &niu_ethtool_ops; 9711 dev->ethtool_ops = &niu_ethtool_ops;
9721 dev->watchdog_timeo = NIU_TX_TIMEOUT; 9712 dev->watchdog_timeo = NIU_TX_TIMEOUT;
9722} 9713}
9723 9714
9724static void __devinit niu_device_announce(struct niu *np) 9715static void niu_device_announce(struct niu *np)
9725{ 9716{
9726 struct net_device *dev = np->dev; 9717 struct net_device *dev = np->dev;
9727 9718
@@ -9750,14 +9741,14 @@ static void __devinit niu_device_announce(struct niu *np)
9750 } 9741 }
9751} 9742}
9752 9743
9753static void __devinit niu_set_basic_features(struct net_device *dev) 9744static void niu_set_basic_features(struct net_device *dev)
9754{ 9745{
9755 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH; 9746 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
9756 dev->features |= dev->hw_features | NETIF_F_RXCSUM; 9747 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
9757} 9748}
9758 9749
9759static int __devinit niu_pci_init_one(struct pci_dev *pdev, 9750static int niu_pci_init_one(struct pci_dev *pdev,
9760 const struct pci_device_id *ent) 9751 const struct pci_device_id *ent)
9761{ 9752{
9762 union niu_parent_id parent_id; 9753 union niu_parent_id parent_id;
9763 struct net_device *dev; 9754 struct net_device *dev;
@@ -9895,7 +9886,7 @@ err_out_disable_pdev:
9895 return err; 9886 return err;
9896} 9887}
9897 9888
9898static void __devexit niu_pci_remove_one(struct pci_dev *pdev) 9889static void niu_pci_remove_one(struct pci_dev *pdev)
9899{ 9890{
9900 struct net_device *dev = pci_get_drvdata(pdev); 9891 struct net_device *dev = pci_get_drvdata(pdev);
9901 9892
@@ -9980,7 +9971,7 @@ static struct pci_driver niu_pci_driver = {
9980 .name = DRV_MODULE_NAME, 9971 .name = DRV_MODULE_NAME,
9981 .id_table = niu_pci_tbl, 9972 .id_table = niu_pci_tbl,
9982 .probe = niu_pci_init_one, 9973 .probe = niu_pci_init_one,
9983 .remove = __devexit_p(niu_pci_remove_one), 9974 .remove = niu_pci_remove_one,
9984 .suspend = niu_suspend, 9975 .suspend = niu_suspend,
9985 .resume = niu_resume, 9976 .resume = niu_resume,
9986}; 9977};
@@ -10044,7 +10035,7 @@ static const struct niu_ops niu_phys_ops = {
10044 .unmap_single = niu_phys_unmap_single, 10035 .unmap_single = niu_phys_unmap_single,
10045}; 10036};
10046 10037
10047static int __devinit niu_of_probe(struct platform_device *op) 10038static int niu_of_probe(struct platform_device *op)
10048{ 10039{
10049 union niu_parent_id parent_id; 10040 union niu_parent_id parent_id;
10050 struct net_device *dev; 10041 struct net_device *dev;
@@ -10158,7 +10149,7 @@ err_out:
10158 return err; 10149 return err;
10159} 10150}
10160 10151
10161static int __devexit niu_of_remove(struct platform_device *op) 10152static int niu_of_remove(struct platform_device *op)
10162{ 10153{
10163 struct net_device *dev = dev_get_drvdata(&op->dev); 10154 struct net_device *dev = dev_get_drvdata(&op->dev);
10164 10155
@@ -10211,7 +10202,7 @@ static struct platform_driver niu_of_driver = {
10211 .of_match_table = niu_match, 10202 .of_match_table = niu_match,
10212 }, 10203 },
10213 .probe = niu_of_probe, 10204 .probe = niu_of_probe,
10214 .remove = __devexit_p(niu_of_remove), 10205 .remove = niu_of_remove,
10215}; 10206};
10216 10207
10217#endif /* CONFIG_SPARC64 */ 10208#endif /* CONFIG_SPARC64 */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index c9c977bf02ac..be82f6d13c51 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1074,8 +1074,8 @@ static const struct net_device_ops bigmac_ops = {
1074 .ndo_validate_addr = eth_validate_addr, 1074 .ndo_validate_addr = eth_validate_addr,
1075}; 1075};
1076 1076
1077static int __devinit bigmac_ether_init(struct platform_device *op, 1077static int bigmac_ether_init(struct platform_device *op,
1078 struct platform_device *qec_op) 1078 struct platform_device *qec_op)
1079{ 1079{
1080 static int version_printed; 1080 static int version_printed;
1081 struct net_device *dev; 1081 struct net_device *dev;
@@ -1233,7 +1233,7 @@ fail_and_cleanup:
1233/* QEC can be the parent of either QuadEthernet or a BigMAC. We want 1233/* QEC can be the parent of either QuadEthernet or a BigMAC. We want
1234 * the latter. 1234 * the latter.
1235 */ 1235 */
1236static int __devinit bigmac_sbus_probe(struct platform_device *op) 1236static int bigmac_sbus_probe(struct platform_device *op)
1237{ 1237{
1238 struct device *parent = op->dev.parent; 1238 struct device *parent = op->dev.parent;
1239 struct platform_device *qec_op; 1239 struct platform_device *qec_op;
@@ -1243,7 +1243,7 @@ static int __devinit bigmac_sbus_probe(struct platform_device *op)
1243 return bigmac_ether_init(op, qec_op); 1243 return bigmac_ether_init(op, qec_op);
1244} 1244}
1245 1245
1246static int __devexit bigmac_sbus_remove(struct platform_device *op) 1246static int bigmac_sbus_remove(struct platform_device *op)
1247{ 1247{
1248 struct bigmac *bp = dev_get_drvdata(&op->dev); 1248 struct bigmac *bp = dev_get_drvdata(&op->dev);
1249 struct device *parent = op->dev.parent; 1249 struct device *parent = op->dev.parent;
@@ -1286,7 +1286,7 @@ static struct platform_driver bigmac_sbus_driver = {
1286 .of_match_table = bigmac_sbus_match, 1286 .of_match_table = bigmac_sbus_match,
1287 }, 1287 },
1288 .probe = bigmac_sbus_probe, 1288 .probe = bigmac_sbus_probe,
1289 .remove = __devexit_p(bigmac_sbus_remove), 1289 .remove = bigmac_sbus_remove,
1290}; 1290};
1291 1291
1292module_platform_driver(bigmac_sbus_driver); 1292module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 6c8695ec7cb9..5f3f9d52757d 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -77,7 +77,7 @@
77#define DRV_VERSION "1.0" 77#define DRV_VERSION "1.0"
78#define DRV_AUTHOR "David S. Miller <davem@redhat.com>" 78#define DRV_AUTHOR "David S. Miller <davem@redhat.com>"
79 79
80static char version[] __devinitdata = 80static char version[] =
81 DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; 81 DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n";
82 82
83MODULE_AUTHOR(DRV_AUTHOR); 83MODULE_AUTHOR(DRV_AUTHOR);
@@ -2763,7 +2763,7 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2763} 2763}
2764#endif /* not Sparc and not PPC */ 2764#endif /* not Sparc and not PPC */
2765 2765
2766static int __devinit gem_get_device_address(struct gem *gp) 2766static int gem_get_device_address(struct gem *gp)
2767{ 2767{
2768#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) 2768#if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC)
2769 struct net_device *dev = gp->dev; 2769 struct net_device *dev = gp->dev;
@@ -2827,8 +2827,7 @@ static const struct net_device_ops gem_netdev_ops = {
2827#endif 2827#endif
2828}; 2828};
2829 2829
2830static int __devinit gem_init_one(struct pci_dev *pdev, 2830static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2831 const struct pci_device_id *ent)
2832{ 2831{
2833 unsigned long gemreg_base, gemreg_len; 2832 unsigned long gemreg_base, gemreg_len;
2834 struct net_device *dev; 2833 struct net_device *dev;
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 73f341b8befb..a1bff49a8155 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2499,7 +2499,7 @@ static int hme_version_printed;
2499 * 2499 *
2500 * Return NULL on failure. 2500 * Return NULL on failure.
2501 */ 2501 */
2502static struct quattro * __devinit quattro_sbus_find(struct platform_device *child) 2502static struct quattro *quattro_sbus_find(struct platform_device *child)
2503{ 2503{
2504 struct device *parent = child->dev.parent; 2504 struct device *parent = child->dev.parent;
2505 struct platform_device *op; 2505 struct platform_device *op;
@@ -2580,7 +2580,7 @@ static void quattro_sbus_free_irqs(void)
2580#endif /* CONFIG_SBUS */ 2580#endif /* CONFIG_SBUS */
2581 2581
2582#ifdef CONFIG_PCI 2582#ifdef CONFIG_PCI
2583static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev) 2583static struct quattro *quattro_pci_find(struct pci_dev *pdev)
2584{ 2584{
2585 struct pci_dev *bdev = pdev->bus->self; 2585 struct pci_dev *bdev = pdev->bus->self;
2586 struct quattro *qp; 2586 struct quattro *qp;
@@ -2623,7 +2623,7 @@ static const struct net_device_ops hme_netdev_ops = {
2623}; 2623};
2624 2624
2625#ifdef CONFIG_SBUS 2625#ifdef CONFIG_SBUS
2626static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) 2626static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2627{ 2627{
2628 struct device_node *dp = op->dev.of_node, *sbus_dp; 2628 struct device_node *dp = op->dev.of_node, *sbus_dp;
2629 struct quattro *qp = NULL; 2629 struct quattro *qp = NULL;
@@ -2927,8 +2927,8 @@ static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2927} 2927}
2928#endif /* !(CONFIG_SPARC) */ 2928#endif /* !(CONFIG_SPARC) */
2929 2929
2930static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, 2930static int happy_meal_pci_probe(struct pci_dev *pdev,
2931 const struct pci_device_id *ent) 2931 const struct pci_device_id *ent)
2932{ 2932{
2933 struct quattro *qp = NULL; 2933 struct quattro *qp = NULL;
2934#ifdef CONFIG_SPARC 2934#ifdef CONFIG_SPARC
@@ -3162,7 +3162,7 @@ err_out:
3162 return err; 3162 return err;
3163} 3163}
3164 3164
3165static void __devexit happy_meal_pci_remove(struct pci_dev *pdev) 3165static void happy_meal_pci_remove(struct pci_dev *pdev)
3166{ 3166{
3167 struct happy_meal *hp = dev_get_drvdata(&pdev->dev); 3167 struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
3168 struct net_device *net_dev = hp->dev; 3168 struct net_device *net_dev = hp->dev;
@@ -3190,7 +3190,7 @@ static struct pci_driver hme_pci_driver = {
3190 .name = "hme", 3190 .name = "hme",
3191 .id_table = happymeal_pci_ids, 3191 .id_table = happymeal_pci_ids,
3192 .probe = happy_meal_pci_probe, 3192 .probe = happy_meal_pci_probe,
3193 .remove = __devexit_p(happy_meal_pci_remove), 3193 .remove = happy_meal_pci_remove,
3194}; 3194};
3195 3195
3196static int __init happy_meal_pci_init(void) 3196static int __init happy_meal_pci_init(void)
@@ -3216,7 +3216,7 @@ static void happy_meal_pci_exit(void)
3216 3216
3217#ifdef CONFIG_SBUS 3217#ifdef CONFIG_SBUS
3218static const struct of_device_id hme_sbus_match[]; 3218static const struct of_device_id hme_sbus_match[];
3219static int __devinit hme_sbus_probe(struct platform_device *op) 3219static int hme_sbus_probe(struct platform_device *op)
3220{ 3220{
3221 const struct of_device_id *match; 3221 const struct of_device_id *match;
3222 struct device_node *dp = op->dev.of_node; 3222 struct device_node *dp = op->dev.of_node;
@@ -3234,7 +3234,7 @@ static int __devinit hme_sbus_probe(struct platform_device *op)
3234 return happy_meal_sbus_probe_one(op, is_qfe); 3234 return happy_meal_sbus_probe_one(op, is_qfe);
3235} 3235}
3236 3236
3237static int __devexit hme_sbus_remove(struct platform_device *op) 3237static int hme_sbus_remove(struct platform_device *op)
3238{ 3238{
3239 struct happy_meal *hp = dev_get_drvdata(&op->dev); 3239 struct happy_meal *hp = dev_get_drvdata(&op->dev);
3240 struct net_device *net_dev = hp->dev; 3240 struct net_device *net_dev = hp->dev;
@@ -3284,7 +3284,7 @@ static struct platform_driver hme_sbus_driver = {
3284 .of_match_table = hme_sbus_match, 3284 .of_match_table = hme_sbus_match,
3285 }, 3285 },
3286 .probe = hme_sbus_probe, 3286 .probe = hme_sbus_probe,
3287 .remove = __devexit_p(hme_sbus_remove), 3287 .remove = hme_sbus_remove,
3288}; 3288};
3289 3289
3290static int __init happy_meal_sbus_init(void) 3290static int __init happy_meal_sbus_init(void)
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index aeded7ff1c8f..1dcee6915843 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -744,7 +744,7 @@ static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
744 qecp->gregs + GLOB_RSIZE); 744 qecp->gregs + GLOB_RSIZE);
745} 745}
746 746
747static u8 __devinit qec_get_burst(struct device_node *dp) 747static u8 qec_get_burst(struct device_node *dp)
748{ 748{
749 u8 bsizes, bsizes_more; 749 u8 bsizes, bsizes_more;
750 750
@@ -764,7 +764,7 @@ static u8 __devinit qec_get_burst(struct device_node *dp)
764 return bsizes; 764 return bsizes;
765} 765}
766 766
767static struct sunqec * __devinit get_qec(struct platform_device *child) 767static struct sunqec *get_qec(struct platform_device *child)
768{ 768{
769 struct platform_device *op = to_platform_device(child->dev.parent); 769 struct platform_device *op = to_platform_device(child->dev.parent);
770 struct sunqec *qecp; 770 struct sunqec *qecp;
@@ -830,7 +830,7 @@ static const struct net_device_ops qec_ops = {
830 .ndo_validate_addr = eth_validate_addr, 830 .ndo_validate_addr = eth_validate_addr,
831}; 831};
832 832
833static int __devinit qec_ether_init(struct platform_device *op) 833static int qec_ether_init(struct platform_device *op)
834{ 834{
835 static unsigned version_printed; 835 static unsigned version_printed;
836 struct net_device *dev; 836 struct net_device *dev;
@@ -929,12 +929,12 @@ fail:
929 return res; 929 return res;
930} 930}
931 931
932static int __devinit qec_sbus_probe(struct platform_device *op) 932static int qec_sbus_probe(struct platform_device *op)
933{ 933{
934 return qec_ether_init(op); 934 return qec_ether_init(op);
935} 935}
936 936
937static int __devexit qec_sbus_remove(struct platform_device *op) 937static int qec_sbus_remove(struct platform_device *op)
938{ 938{
939 struct sunqe *qp = dev_get_drvdata(&op->dev); 939 struct sunqe *qp = dev_get_drvdata(&op->dev);
940 struct net_device *net_dev = qp->dev; 940 struct net_device *net_dev = qp->dev;
@@ -971,7 +971,7 @@ static struct platform_driver qec_sbus_driver = {
971 .of_match_table = qec_sbus_match, 971 .of_match_table = qec_sbus_match,
972 }, 972 },
973 .probe = qec_sbus_probe, 973 .probe = qec_sbus_probe,
974 .remove = __devexit_p(qec_sbus_remove), 974 .remove = qec_sbus_remove,
975}; 975};
976 976
977static int __init qec_init(void) 977static int __init qec_init(void)
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index a108db35924e..e1b895530827 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -25,7 +25,7 @@
25#define DRV_MODULE_VERSION "1.0" 25#define DRV_MODULE_VERSION "1.0"
26#define DRV_MODULE_RELDATE "June 25, 2007" 26#define DRV_MODULE_RELDATE "June 25, 2007"
27 27
28static char version[] __devinitdata = 28static char version[] =
29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 29 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
30MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 30MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
31MODULE_DESCRIPTION("Sun LDOM virtual network driver"); 31MODULE_DESCRIPTION("Sun LDOM virtual network driver");
@@ -937,7 +937,7 @@ static void vnet_port_free_tx_bufs(struct vnet_port *port)
937 } 937 }
938} 938}
939 939
940static int __devinit vnet_port_alloc_tx_bufs(struct vnet_port *port) 940static int vnet_port_alloc_tx_bufs(struct vnet_port *port)
941{ 941{
942 struct vio_dring_state *dr; 942 struct vio_dring_state *dr;
943 unsigned long len; 943 unsigned long len;
@@ -1019,7 +1019,7 @@ static const struct net_device_ops vnet_ops = {
1019 .ndo_start_xmit = vnet_start_xmit, 1019 .ndo_start_xmit = vnet_start_xmit,
1020}; 1020};
1021 1021
1022static struct vnet * __devinit vnet_new(const u64 *local_mac) 1022static struct vnet *vnet_new(const u64 *local_mac)
1023{ 1023{
1024 struct net_device *dev; 1024 struct net_device *dev;
1025 struct vnet *vp; 1025 struct vnet *vp;
@@ -1067,7 +1067,7 @@ err_out_free_dev:
1067 return ERR_PTR(err); 1067 return ERR_PTR(err);
1068} 1068}
1069 1069
1070static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac) 1070static struct vnet *vnet_find_or_create(const u64 *local_mac)
1071{ 1071{
1072 struct vnet *iter, *vp; 1072 struct vnet *iter, *vp;
1073 1073
@@ -1088,7 +1088,7 @@ static struct vnet * __devinit vnet_find_or_create(const u64 *local_mac)
1088 1088
1089static const char *local_mac_prop = "local-mac-address"; 1089static const char *local_mac_prop = "local-mac-address";
1090 1090
1091static struct vnet * __devinit vnet_find_parent(struct mdesc_handle *hp, 1091static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1092 u64 port_node) 1092 u64 port_node)
1093{ 1093{
1094 const u64 *local_mac = NULL; 1094 const u64 *local_mac = NULL;
@@ -1125,15 +1125,14 @@ static struct vio_driver_ops vnet_vio_ops = {
1125 .handshake_complete = vnet_handshake_complete, 1125 .handshake_complete = vnet_handshake_complete,
1126}; 1126};
1127 1127
1128static void __devinit print_version(void) 1128static void print_version(void)
1129{ 1129{
1130 printk_once(KERN_INFO "%s", version); 1130 printk_once(KERN_INFO "%s", version);
1131} 1131}
1132 1132
1133const char *remote_macaddr_prop = "remote-mac-address"; 1133const char *remote_macaddr_prop = "remote-mac-address";
1134 1134
1135static int __devinit vnet_port_probe(struct vio_dev *vdev, 1135static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1136 const struct vio_device_id *id)
1137{ 1136{
1138 struct mdesc_handle *hp; 1137 struct mdesc_handle *hp;
1139 struct vnet_port *port; 1138 struct vnet_port *port;
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6ce9edd95c04..1e4d743ff03e 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1914,7 +1914,7 @@ static const struct net_device_ops bdx_netdev_ops = {
1914 */ 1914 */
1915 1915
1916/* TBD: netif_msg should be checked and implemented. I disable it for now */ 1916/* TBD: netif_msg should be checked and implemented. I disable it for now */
1917static int __devinit 1917static int
1918bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1918bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1919{ 1919{
1920 struct net_device *ndev; 1920 struct net_device *ndev;
@@ -2427,7 +2427,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
2427 * Hot-Plug event, or because the driver is going to be removed from 2427 * Hot-Plug event, or because the driver is going to be removed from
2428 * memory. 2428 * memory.
2429 **/ 2429 **/
2430static void __devexit bdx_remove(struct pci_dev *pdev) 2430static void bdx_remove(struct pci_dev *pdev)
2431{ 2431{
2432 struct pci_nic *nic = pci_get_drvdata(pdev); 2432 struct pci_nic *nic = pci_get_drvdata(pdev);
2433 struct net_device *ndev; 2433 struct net_device *ndev;
@@ -2458,7 +2458,7 @@ static struct pci_driver bdx_pci_driver = {
2458 .name = BDX_DRV_NAME, 2458 .name = BDX_DRV_NAME,
2459 .id_table = bdx_pci_tbl, 2459 .id_table = bdx_pci_tbl,
2460 .probe = bdx_probe, 2460 .probe = bdx_probe,
2461 .remove = __devexit_p(bdx_remove), 2461 .remove = bdx_remove,
2462}; 2462};
2463 2463
2464/* 2464/*
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 2c41894d5472..4426151d4ac9 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -60,6 +60,15 @@ config TI_CPSW
60 To compile this driver as a module, choose M here: the module 60 To compile this driver as a module, choose M here: the module
61 will be called cpsw. 61 will be called cpsw.
62 62
63config TI_CPTS
64 boolean "TI Common Platform Time Sync (CPTS) Support"
65 depends on TI_CPSW
66 select PTP_1588_CLOCK
67 ---help---
68 This driver supports the Common Platform Time Sync unit of
69 the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
70 and Layer 2 packets, and the driver offers a PTP Hardware Clock.
71
63config TLAN 72config TLAN
64 tristate "TI ThunderLAN support" 73 tristate "TI ThunderLAN support"
65 depends on (PCI || EISA) 74 depends on (PCI || EISA)
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 91bd8bba78ff..c65148e8aa1d 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o 8obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o 9obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
10obj-$(CONFIG_TI_CPSW) += ti_cpsw.o 10obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
11ti_cpsw-y := cpsw_ale.o cpsw.o 11ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 860c2526f08d..d9625f62b026 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1110,7 +1110,7 @@ static const struct net_device_ops cpmac_netdev_ops = {
1110 1110
1111static int external_switch; 1111static int external_switch;
1112 1112
1113static int __devinit cpmac_probe(struct platform_device *pdev) 1113static int cpmac_probe(struct platform_device *pdev)
1114{ 1114{
1115 int rc, phy_id; 1115 int rc, phy_id;
1116 char mdio_bus_id[MII_BUS_ID_SIZE]; 1116 char mdio_bus_id[MII_BUS_ID_SIZE];
@@ -1204,7 +1204,7 @@ fail:
1204 return rc; 1204 return rc;
1205} 1205}
1206 1206
1207static int __devexit cpmac_remove(struct platform_device *pdev) 1207static int cpmac_remove(struct platform_device *pdev)
1208{ 1208{
1209 struct net_device *dev = platform_get_drvdata(pdev); 1209 struct net_device *dev = platform_get_drvdata(pdev);
1210 unregister_netdev(dev); 1210 unregister_netdev(dev);
@@ -1216,10 +1216,10 @@ static struct platform_driver cpmac_driver = {
1216 .driver.name = "cpmac", 1216 .driver.name = "cpmac",
1217 .driver.owner = THIS_MODULE, 1217 .driver.owner = THIS_MODULE,
1218 .probe = cpmac_probe, 1218 .probe = cpmac_probe,
1219 .remove = __devexit_p(cpmac_remove), 1219 .remove = cpmac_remove,
1220}; 1220};
1221 1221
1222int __devinit cpmac_init(void) 1222int cpmac_init(void)
1223{ 1223{
1224 u32 mask; 1224 u32 mask;
1225 int i, res; 1225 int i, res;
@@ -1290,7 +1290,7 @@ fail_alloc:
1290 return res; 1290 return res;
1291} 1291}
1292 1292
1293void __devexit cpmac_exit(void) 1293void cpmac_exit(void)
1294{ 1294{
1295 platform_driver_unregister(&cpmac_driver); 1295 platform_driver_unregister(&cpmac_driver);
1296 mdiobus_unregister(cpmac_mii); 1296 mdiobus_unregister(cpmac_mii);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index df55e2403746..40aff684aa23 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -24,6 +24,7 @@
24#include <linux/if_ether.h> 24#include <linux/if_ether.h>
25#include <linux/etherdevice.h> 25#include <linux/etherdevice.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/net_tstamp.h>
27#include <linux/phy.h> 28#include <linux/phy.h>
28#include <linux/workqueue.h> 29#include <linux/workqueue.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
@@ -35,6 +36,7 @@
35#include <linux/platform_data/cpsw.h> 36#include <linux/platform_data/cpsw.h>
36 37
37#include "cpsw_ale.h" 38#include "cpsw_ale.h"
39#include "cpts.h"
38#include "davinci_cpdma.h" 40#include "davinci_cpdma.h"
39 41
40#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \ 42#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
@@ -70,10 +72,37 @@ do { \
70 dev_notice(priv->dev, format, ## __VA_ARGS__); \ 72 dev_notice(priv->dev, format, ## __VA_ARGS__); \
71} while (0) 73} while (0)
72 74
75#define ALE_ALL_PORTS 0x7
76
73#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7) 77#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
74#define CPSW_MINOR_VERSION(reg) (reg & 0xff) 78#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
75#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f) 79#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
76 80
81#define CPSW_VERSION_1 0x19010a
82#define CPSW_VERSION_2 0x19010c
83
84#define HOST_PORT_NUM 0
85#define SLIVER_SIZE 0x40
86
87#define CPSW1_HOST_PORT_OFFSET 0x028
88#define CPSW1_SLAVE_OFFSET 0x050
89#define CPSW1_SLAVE_SIZE 0x040
90#define CPSW1_CPDMA_OFFSET 0x100
91#define CPSW1_STATERAM_OFFSET 0x200
92#define CPSW1_CPTS_OFFSET 0x500
93#define CPSW1_ALE_OFFSET 0x600
94#define CPSW1_SLIVER_OFFSET 0x700
95
96#define CPSW2_HOST_PORT_OFFSET 0x108
97#define CPSW2_SLAVE_OFFSET 0x200
98#define CPSW2_SLAVE_SIZE 0x100
99#define CPSW2_CPDMA_OFFSET 0x800
100#define CPSW2_STATERAM_OFFSET 0xa00
101#define CPSW2_CPTS_OFFSET 0xc00
102#define CPSW2_ALE_OFFSET 0xd00
103#define CPSW2_SLIVER_OFFSET 0xd80
104#define CPSW2_BD_OFFSET 0x2000
105
77#define CPDMA_RXTHRESH 0x0c0 106#define CPDMA_RXTHRESH 0x0c0
78#define CPDMA_RXFREE 0x0e0 107#define CPDMA_RXFREE 0x0e0
79#define CPDMA_TXHDP 0x00 108#define CPDMA_TXHDP 0x00
@@ -81,21 +110,6 @@ do { \
81#define CPDMA_TXCP 0x40 110#define CPDMA_TXCP 0x40
82#define CPDMA_RXCP 0x60 111#define CPDMA_RXCP 0x60
83 112
84#define cpsw_dma_regs(base, offset) \
85 (void __iomem *)((base) + (offset))
86#define cpsw_dma_rxthresh(base, offset) \
87 (void __iomem *)((base) + (offset) + CPDMA_RXTHRESH)
88#define cpsw_dma_rxfree(base, offset) \
89 (void __iomem *)((base) + (offset) + CPDMA_RXFREE)
90#define cpsw_dma_txhdp(base, offset) \
91 (void __iomem *)((base) + (offset) + CPDMA_TXHDP)
92#define cpsw_dma_rxhdp(base, offset) \
93 (void __iomem *)((base) + (offset) + CPDMA_RXHDP)
94#define cpsw_dma_txcp(base, offset) \
95 (void __iomem *)((base) + (offset) + CPDMA_TXCP)
96#define cpsw_dma_rxcp(base, offset) \
97 (void __iomem *)((base) + (offset) + CPDMA_RXCP)
98
99#define CPSW_POLL_WEIGHT 64 113#define CPSW_POLL_WEIGHT 64
100#define CPSW_MIN_PACKET_SIZE 60 114#define CPSW_MIN_PACKET_SIZE 60
101#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4) 115#define CPSW_MAX_PACKET_SIZE (1500 + 14 + 4 + 4)
@@ -129,7 +143,7 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
129module_param(rx_packet_max, int, 0); 143module_param(rx_packet_max, int, 0);
130MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)"); 144MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
131 145
132struct cpsw_ss_regs { 146struct cpsw_wr_regs {
133 u32 id_ver; 147 u32 id_ver;
134 u32 soft_reset; 148 u32 soft_reset;
135 u32 control; 149 u32 control;
@@ -140,26 +154,98 @@ struct cpsw_ss_regs {
140 u32 misc_en; 154 u32 misc_en;
141}; 155};
142 156
143struct cpsw_regs { 157struct cpsw_ss_regs {
144 u32 id_ver; 158 u32 id_ver;
145 u32 control; 159 u32 control;
146 u32 soft_reset; 160 u32 soft_reset;
147 u32 stat_port_en; 161 u32 stat_port_en;
148 u32 ptype; 162 u32 ptype;
163 u32 soft_idle;
164 u32 thru_rate;
165 u32 gap_thresh;
166 u32 tx_start_wds;
167 u32 flow_control;
168 u32 vlan_ltype;
169 u32 ts_ltype;
170 u32 dlr_ltype;
149}; 171};
150 172
151struct cpsw_slave_regs { 173/* CPSW_PORT_V1 */
152 u32 max_blks; 174#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
153 u32 blk_cnt; 175#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
154 u32 flow_thresh; 176#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
155 u32 port_vlan; 177#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
156 u32 tx_pri_map; 178#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
157 u32 ts_ctl; 179#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
158 u32 ts_seq_ltype; 180#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
159 u32 ts_vlan; 181#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
160 u32 sa_lo; 182
161 u32 sa_hi; 183/* CPSW_PORT_V2 */
162}; 184#define CPSW2_CONTROL 0x00 /* Control Register */
185#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
186#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
187#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
188#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
189#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
190#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
191
192/* CPSW_PORT_V1 and V2 */
193#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
194#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
195#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
196
197/* CPSW_PORT_V2 only */
198#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
199#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
200#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
201#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
202#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
203#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
204#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
205#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
206
207/* Bit definitions for the CPSW2_CONTROL register */
208#define PASS_PRI_TAGGED (1<<24) /* Pass Priority Tagged */
209#define VLAN_LTYPE2_EN (1<<21) /* VLAN LTYPE 2 enable */
210#define VLAN_LTYPE1_EN (1<<20) /* VLAN LTYPE 1 enable */
211#define DSCP_PRI_EN (1<<16) /* DSCP Priority Enable */
212#define TS_320 (1<<14) /* Time Sync Dest Port 320 enable */
213#define TS_319 (1<<13) /* Time Sync Dest Port 319 enable */
214#define TS_132 (1<<12) /* Time Sync Dest IP Addr 132 enable */
215#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
216#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
217#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
218#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
219#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
220#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
221#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
222#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
223#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
224
225#define CTRL_TS_BITS \
226 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
227 TS_ANNEX_D_EN | TS_LTYPE1_EN)
228
229#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
230#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
231#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
232
233/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
234#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
235#define TS_SEQ_ID_OFFSET_MASK (0x3f)
236#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
237#define TS_MSG_TYPE_EN_MASK (0xffff)
238
239/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
240#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
241
242/* Bit definitions for the CPSW1_TS_CTL register */
243#define CPSW_V1_TS_RX_EN BIT(0)
244#define CPSW_V1_TS_TX_EN BIT(4)
245#define CPSW_V1_MSG_TYPE_OFS 16
246
247/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
248#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
163 249
164struct cpsw_host_regs { 250struct cpsw_host_regs {
165 u32 max_blks; 251 u32 max_blks;
@@ -185,7 +271,7 @@ struct cpsw_sliver_regs {
185}; 271};
186 272
187struct cpsw_slave { 273struct cpsw_slave {
188 struct cpsw_slave_regs __iomem *regs; 274 void __iomem *regs;
189 struct cpsw_sliver_regs __iomem *sliver; 275 struct cpsw_sliver_regs __iomem *sliver;
190 int slave_num; 276 int slave_num;
191 u32 mac_control; 277 u32 mac_control;
@@ -193,19 +279,30 @@ struct cpsw_slave {
193 struct phy_device *phy; 279 struct phy_device *phy;
194}; 280};
195 281
282static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
283{
284 return __raw_readl(slave->regs + offset);
285}
286
287static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
288{
289 __raw_writel(val, slave->regs + offset);
290}
291
196struct cpsw_priv { 292struct cpsw_priv {
197 spinlock_t lock; 293 spinlock_t lock;
198 struct platform_device *pdev; 294 struct platform_device *pdev;
199 struct net_device *ndev; 295 struct net_device *ndev;
200 struct resource *cpsw_res; 296 struct resource *cpsw_res;
201 struct resource *cpsw_ss_res; 297 struct resource *cpsw_wr_res;
202 struct napi_struct napi; 298 struct napi_struct napi;
203 struct device *dev; 299 struct device *dev;
204 struct cpsw_platform_data data; 300 struct cpsw_platform_data data;
205 struct cpsw_regs __iomem *regs; 301 struct cpsw_ss_regs __iomem *regs;
206 struct cpsw_ss_regs __iomem *ss_regs; 302 struct cpsw_wr_regs __iomem *wr_regs;
207 struct cpsw_host_regs __iomem *host_port_regs; 303 struct cpsw_host_regs __iomem *host_port_regs;
208 u32 msg_enable; 304 u32 msg_enable;
305 u32 version;
209 struct net_device_stats stats; 306 struct net_device_stats stats;
210 int rx_packet_max; 307 int rx_packet_max;
211 int host_port; 308 int host_port;
@@ -218,6 +315,7 @@ struct cpsw_priv {
218 /* snapshot of IRQ numbers */ 315 /* snapshot of IRQ numbers */
219 u32 irqs_table[4]; 316 u32 irqs_table[4];
220 u32 num_irqs; 317 u32 num_irqs;
318 struct cpts cpts;
221}; 319};
222 320
223#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi) 321#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
@@ -228,10 +326,34 @@ struct cpsw_priv {
228 (func)((priv)->slaves + idx, ##arg); \ 326 (func)((priv)->slaves + idx, ##arg); \
229 } while (0) 327 } while (0)
230 328
329static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
330{
331 struct cpsw_priv *priv = netdev_priv(ndev);
332
333 if (ndev->flags & IFF_PROMISC) {
334 /* Enable promiscuous mode */
335 dev_err(priv->dev, "Ignoring Promiscuous mode\n");
336 return;
337 }
338
339 /* Clear all mcast from ALE */
340 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
341
342 if (!netdev_mc_empty(ndev)) {
343 struct netdev_hw_addr *ha;
344
345 /* program multicast address list into ALE register */
346 netdev_for_each_mc_addr(ha, ndev) {
347 cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
348 ALE_ALL_PORTS << priv->host_port, 0, 0);
349 }
350 }
351}
352
231static void cpsw_intr_enable(struct cpsw_priv *priv) 353static void cpsw_intr_enable(struct cpsw_priv *priv)
232{ 354{
233 __raw_writel(0xFF, &priv->ss_regs->tx_en); 355 __raw_writel(0xFF, &priv->wr_regs->tx_en);
234 __raw_writel(0xFF, &priv->ss_regs->rx_en); 356 __raw_writel(0xFF, &priv->wr_regs->rx_en);
235 357
236 cpdma_ctlr_int_ctrl(priv->dma, true); 358 cpdma_ctlr_int_ctrl(priv->dma, true);
237 return; 359 return;
@@ -239,8 +361,8 @@ static void cpsw_intr_enable(struct cpsw_priv *priv)
239 361
240static void cpsw_intr_disable(struct cpsw_priv *priv) 362static void cpsw_intr_disable(struct cpsw_priv *priv)
241{ 363{
242 __raw_writel(0, &priv->ss_regs->tx_en); 364 __raw_writel(0, &priv->wr_regs->tx_en);
243 __raw_writel(0, &priv->ss_regs->rx_en); 365 __raw_writel(0, &priv->wr_regs->rx_en);
244 366
245 cpdma_ctlr_int_ctrl(priv->dma, false); 367 cpdma_ctlr_int_ctrl(priv->dma, false);
246 return; 368 return;
@@ -254,6 +376,7 @@ void cpsw_tx_handler(void *token, int len, int status)
254 376
255 if (unlikely(netif_queue_stopped(ndev))) 377 if (unlikely(netif_queue_stopped(ndev)))
256 netif_start_queue(ndev); 378 netif_start_queue(ndev);
379 cpts_tx_timestamp(&priv->cpts, skb);
257 priv->stats.tx_packets++; 380 priv->stats.tx_packets++;
258 priv->stats.tx_bytes += len; 381 priv->stats.tx_bytes += len;
259 dev_kfree_skb_any(skb); 382 dev_kfree_skb_any(skb);
@@ -274,6 +397,7 @@ void cpsw_rx_handler(void *token, int len, int status)
274 } 397 }
275 if (likely(status >= 0)) { 398 if (likely(status >= 0)) {
276 skb_put(skb, len); 399 skb_put(skb, len);
400 cpts_rx_timestamp(&priv->cpts, skb);
277 skb->protocol = eth_type_trans(skb, ndev); 401 skb->protocol = eth_type_trans(skb, ndev);
278 netif_receive_skb(skb); 402 netif_receive_skb(skb);
279 priv->stats.rx_bytes += len; 403 priv->stats.rx_bytes += len;
@@ -359,8 +483,8 @@ static inline void soft_reset(const char *module, void __iomem *reg)
359static void cpsw_set_slave_mac(struct cpsw_slave *slave, 483static void cpsw_set_slave_mac(struct cpsw_slave *slave,
360 struct cpsw_priv *priv) 484 struct cpsw_priv *priv)
361{ 485{
362 __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi); 486 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
363 __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo); 487 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
364} 488}
365 489
366static void _cpsw_adjust_link(struct cpsw_slave *slave, 490static void _cpsw_adjust_link(struct cpsw_slave *slave,
@@ -446,7 +570,15 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
446 570
447 /* setup priority mapping */ 571 /* setup priority mapping */
448 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map); 572 __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
449 __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map); 573
574 switch (priv->version) {
575 case CPSW_VERSION_1:
576 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
577 break;
578 case CPSW_VERSION_2:
579 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
580 break;
581 }
450 582
451 /* setup max packet size, and mac address */ 583 /* setup max packet size, and mac address */
452 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen); 584 __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
@@ -505,7 +637,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
505 637
506 pm_runtime_get_sync(&priv->pdev->dev); 638 pm_runtime_get_sync(&priv->pdev->dev);
507 639
508 reg = __raw_readl(&priv->regs->id_ver); 640 reg = priv->version;
509 641
510 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", 642 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
511 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg), 643 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
@@ -566,12 +698,12 @@ static int cpsw_ndo_stop(struct net_device *ndev)
566 struct cpsw_priv *priv = netdev_priv(ndev); 698 struct cpsw_priv *priv = netdev_priv(ndev);
567 699
568 cpsw_info(priv, ifdown, "shutting down cpsw device\n"); 700 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
569 cpsw_intr_disable(priv);
570 cpdma_ctlr_int_ctrl(priv->dma, false);
571 cpdma_ctlr_stop(priv->dma);
572 netif_stop_queue(priv->ndev); 701 netif_stop_queue(priv->ndev);
573 napi_disable(&priv->napi); 702 napi_disable(&priv->napi);
574 netif_carrier_off(priv->ndev); 703 netif_carrier_off(priv->ndev);
704 cpsw_intr_disable(priv);
705 cpdma_ctlr_int_ctrl(priv->dma, false);
706 cpdma_ctlr_stop(priv->dma);
575 cpsw_ale_stop(priv->ale); 707 cpsw_ale_stop(priv->ale);
576 for_each_slave(priv, cpsw_slave_stop, priv); 708 for_each_slave(priv, cpsw_slave_stop, priv);
577 pm_runtime_put_sync(&priv->pdev->dev); 709 pm_runtime_put_sync(&priv->pdev->dev);
@@ -592,6 +724,11 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
592 return NETDEV_TX_OK; 724 return NETDEV_TX_OK;
593 } 725 }
594 726
727 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
728 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
729
730 skb_tx_timestamp(skb);
731
595 ret = cpdma_chan_submit(priv->txch, skb, skb->data, 732 ret = cpdma_chan_submit(priv->txch, skb, skb->data,
596 skb->len, GFP_KERNEL); 733 skb->len, GFP_KERNEL);
597 if (unlikely(ret != 0)) { 734 if (unlikely(ret != 0)) {
@@ -629,6 +766,129 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
629 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n"); 766 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
630} 767}
631 768
769#ifdef CONFIG_TI_CPTS
770
771static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
772{
773 struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
774 u32 ts_en, seq_id;
775
776 if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
777 slave_write(slave, 0, CPSW1_TS_CTL);
778 return;
779 }
780
781 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
782 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
783
784 if (priv->cpts.tx_enable)
785 ts_en |= CPSW_V1_TS_TX_EN;
786
787 if (priv->cpts.rx_enable)
788 ts_en |= CPSW_V1_TS_RX_EN;
789
790 slave_write(slave, ts_en, CPSW1_TS_CTL);
791 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
792}
793
794static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
795{
796 struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
797 u32 ctrl, mtype;
798
799 ctrl = slave_read(slave, CPSW2_CONTROL);
800 ctrl &= ~CTRL_ALL_TS_MASK;
801
802 if (priv->cpts.tx_enable)
803 ctrl |= CTRL_TX_TS_BITS;
804
805 if (priv->cpts.rx_enable)
806 ctrl |= CTRL_RX_TS_BITS;
807
808 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
809
810 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
811 slave_write(slave, ctrl, CPSW2_CONTROL);
812 __raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
813}
814
815static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
816{
817 struct cpsw_priv *priv = netdev_priv(dev);
818 struct cpts *cpts = &priv->cpts;
819 struct hwtstamp_config cfg;
820
821 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
822 return -EFAULT;
823
824 /* reserved for future extensions */
825 if (cfg.flags)
826 return -EINVAL;
827
828 switch (cfg.tx_type) {
829 case HWTSTAMP_TX_OFF:
830 cpts->tx_enable = 0;
831 break;
832 case HWTSTAMP_TX_ON:
833 cpts->tx_enable = 1;
834 break;
835 default:
836 return -ERANGE;
837 }
838
839 switch (cfg.rx_filter) {
840 case HWTSTAMP_FILTER_NONE:
841 cpts->rx_enable = 0;
842 break;
843 case HWTSTAMP_FILTER_ALL:
844 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
845 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
846 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
847 return -ERANGE;
848 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
849 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
850 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
851 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
852 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
853 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
854 case HWTSTAMP_FILTER_PTP_V2_EVENT:
855 case HWTSTAMP_FILTER_PTP_V2_SYNC:
856 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
857 cpts->rx_enable = 1;
858 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
859 break;
860 default:
861 return -ERANGE;
862 }
863
864 switch (priv->version) {
865 case CPSW_VERSION_1:
866 cpsw_hwtstamp_v1(priv);
867 break;
868 case CPSW_VERSION_2:
869 cpsw_hwtstamp_v2(priv);
870 break;
871 default:
872 return -ENOTSUPP;
873 }
874
875 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
876}
877
878#endif /*CONFIG_TI_CPTS*/
879
880static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
881{
882 if (!netif_running(dev))
883 return -EINVAL;
884
885#ifdef CONFIG_TI_CPTS
886 if (cmd == SIOCSHWTSTAMP)
887 return cpsw_hwtstamp_ioctl(dev, req);
888#endif
889 return -ENOTSUPP;
890}
891
632static void cpsw_ndo_tx_timeout(struct net_device *ndev) 892static void cpsw_ndo_tx_timeout(struct net_device *ndev)
633{ 893{
634 struct cpsw_priv *priv = netdev_priv(ndev); 894 struct cpsw_priv *priv = netdev_priv(ndev);
@@ -669,10 +929,12 @@ static const struct net_device_ops cpsw_netdev_ops = {
669 .ndo_stop = cpsw_ndo_stop, 929 .ndo_stop = cpsw_ndo_stop,
670 .ndo_start_xmit = cpsw_ndo_start_xmit, 930 .ndo_start_xmit = cpsw_ndo_start_xmit,
671 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags, 931 .ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
932 .ndo_do_ioctl = cpsw_ndo_ioctl,
672 .ndo_validate_addr = eth_validate_addr, 933 .ndo_validate_addr = eth_validate_addr,
673 .ndo_change_mtu = eth_change_mtu, 934 .ndo_change_mtu = eth_change_mtu,
674 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 935 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
675 .ndo_get_stats = cpsw_ndo_get_stats, 936 .ndo_get_stats = cpsw_ndo_get_stats,
937 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
676#ifdef CONFIG_NET_POLL_CONTROLLER 938#ifdef CONFIG_NET_POLL_CONTROLLER
677 .ndo_poll_controller = cpsw_ndo_poll_controller, 939 .ndo_poll_controller = cpsw_ndo_poll_controller,
678#endif 940#endif
@@ -699,22 +961,56 @@ static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
699 priv->msg_enable = value; 961 priv->msg_enable = value;
700} 962}
701 963
964static int cpsw_get_ts_info(struct net_device *ndev,
965 struct ethtool_ts_info *info)
966{
967#ifdef CONFIG_TI_CPTS
968 struct cpsw_priv *priv = netdev_priv(ndev);
969
970 info->so_timestamping =
971 SOF_TIMESTAMPING_TX_HARDWARE |
972 SOF_TIMESTAMPING_TX_SOFTWARE |
973 SOF_TIMESTAMPING_RX_HARDWARE |
974 SOF_TIMESTAMPING_RX_SOFTWARE |
975 SOF_TIMESTAMPING_SOFTWARE |
976 SOF_TIMESTAMPING_RAW_HARDWARE;
977 info->phc_index = priv->cpts.phc_index;
978 info->tx_types =
979 (1 << HWTSTAMP_TX_OFF) |
980 (1 << HWTSTAMP_TX_ON);
981 info->rx_filters =
982 (1 << HWTSTAMP_FILTER_NONE) |
983 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
984#else
985 info->so_timestamping =
986 SOF_TIMESTAMPING_TX_SOFTWARE |
987 SOF_TIMESTAMPING_RX_SOFTWARE |
988 SOF_TIMESTAMPING_SOFTWARE;
989 info->phc_index = -1;
990 info->tx_types = 0;
991 info->rx_filters = 0;
992#endif
993 return 0;
994}
995
702static const struct ethtool_ops cpsw_ethtool_ops = { 996static const struct ethtool_ops cpsw_ethtool_ops = {
703 .get_drvinfo = cpsw_get_drvinfo, 997 .get_drvinfo = cpsw_get_drvinfo,
704 .get_msglevel = cpsw_get_msglevel, 998 .get_msglevel = cpsw_get_msglevel,
705 .set_msglevel = cpsw_set_msglevel, 999 .set_msglevel = cpsw_set_msglevel,
706 .get_link = ethtool_op_get_link, 1000 .get_link = ethtool_op_get_link,
1001 .get_ts_info = cpsw_get_ts_info,
707}; 1002};
708 1003
709static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) 1004static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
1005 u32 slave_reg_ofs, u32 sliver_reg_ofs)
710{ 1006{
711 void __iomem *regs = priv->regs; 1007 void __iomem *regs = priv->regs;
712 int slave_num = slave->slave_num; 1008 int slave_num = slave->slave_num;
713 struct cpsw_slave_data *data = priv->data.slave_data + slave_num; 1009 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
714 1010
715 slave->data = data; 1011 slave->data = data;
716 slave->regs = regs + data->slave_reg_ofs; 1012 slave->regs = regs + slave_reg_ofs;
717 slave->sliver = regs + data->sliver_reg_ofs; 1013 slave->sliver = regs + sliver_reg_ofs;
718} 1014}
719 1015
720static int cpsw_probe_dt(struct cpsw_platform_data *data, 1016static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -734,49 +1030,40 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
734 } 1030 }
735 data->slaves = prop; 1031 data->slaves = prop;
736 1032
737 data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * 1033 if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
738 data->slaves, GFP_KERNEL); 1034 pr_err("Missing cpts_active_slave property in the DT.\n");
739 if (!data->slave_data) {
740 pr_err("Could not allocate slave memory.\n");
741 return -EINVAL;
742 }
743
744 data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
745
746 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
747 pr_err("Missing cpdma_channels property in the DT.\n");
748 ret = -EINVAL; 1035 ret = -EINVAL;
749 goto error_ret; 1036 goto error_ret;
750 } 1037 }
751 data->channels = prop; 1038 data->cpts_active_slave = prop;
752 1039
753 if (of_property_read_u32(node, "host_port_no", &prop)) { 1040 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
754 pr_err("Missing host_port_no property in the DT.\n"); 1041 pr_err("Missing cpts_clock_mult property in the DT.\n");
755 ret = -EINVAL; 1042 ret = -EINVAL;
756 goto error_ret; 1043 goto error_ret;
757 } 1044 }
758 data->host_port_num = prop; 1045 data->cpts_clock_mult = prop;
759 1046
760 if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) { 1047 if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
761 pr_err("Missing cpdma_reg_ofs property in the DT.\n"); 1048 pr_err("Missing cpts_clock_shift property in the DT.\n");
762 ret = -EINVAL; 1049 ret = -EINVAL;
763 goto error_ret; 1050 goto error_ret;
764 } 1051 }
765 data->cpdma_reg_ofs = prop; 1052 data->cpts_clock_shift = prop;
766 1053
767 if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) { 1054 data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
768 pr_err("Missing cpdma_sram_ofs property in the DT.\n"); 1055 data->slaves, GFP_KERNEL);
769 ret = -EINVAL; 1056 if (!data->slave_data) {
770 goto error_ret; 1057 pr_err("Could not allocate slave memory.\n");
1058 return -EINVAL;
771 } 1059 }
772 data->cpdma_sram_ofs = prop;
773 1060
774 if (of_property_read_u32(node, "ale_reg_ofs", &prop)) { 1061 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
775 pr_err("Missing ale_reg_ofs property in the DT.\n"); 1062 pr_err("Missing cpdma_channels property in the DT.\n");
776 ret = -EINVAL; 1063 ret = -EINVAL;
777 goto error_ret; 1064 goto error_ret;
778 } 1065 }
779 data->ale_reg_ofs = prop; 1066 data->channels = prop;
780 1067
781 if (of_property_read_u32(node, "ale_entries", &prop)) { 1068 if (of_property_read_u32(node, "ale_entries", &prop)) {
782 pr_err("Missing ale_entries property in the DT.\n"); 1069 pr_err("Missing ale_entries property in the DT.\n");
@@ -785,27 +1072,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
785 } 1072 }
786 data->ale_entries = prop; 1073 data->ale_entries = prop;
787 1074
788 if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
789 pr_err("Missing host_port_reg_ofs property in the DT.\n");
790 ret = -EINVAL;
791 goto error_ret;
792 }
793 data->host_port_reg_ofs = prop;
794
795 if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
796 pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
797 ret = -EINVAL;
798 goto error_ret;
799 }
800 data->hw_stats_reg_ofs = prop;
801
802 if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
803 pr_err("Missing bd_ram_ofs property in the DT.\n");
804 ret = -EINVAL;
805 goto error_ret;
806 }
807 data->bd_ram_ofs = prop;
808
809 if (of_property_read_u32(node, "bd_ram_size", &prop)) { 1075 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
810 pr_err("Missing bd_ram_size property in the DT.\n"); 1076 pr_err("Missing bd_ram_size property in the DT.\n");
811 ret = -EINVAL; 1077 ret = -EINVAL;
@@ -827,33 +1093,34 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
827 } 1093 }
828 data->mac_control = prop; 1094 data->mac_control = prop;
829 1095
830 for_each_child_of_node(node, slave_node) { 1096 /*
1097 * Populate all the child nodes here...
1098 */
1099 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
1100 /* We do not want to force this, as in some cases may not have child */
1101 if (ret)
1102 pr_warn("Doesn't have any child node\n");
1103
1104 for_each_node_by_name(slave_node, "slave") {
831 struct cpsw_slave_data *slave_data = data->slave_data + i; 1105 struct cpsw_slave_data *slave_data = data->slave_data + i;
832 const char *phy_id = NULL;
833 const void *mac_addr = NULL; 1106 const void *mac_addr = NULL;
834 1107 u32 phyid;
835 if (of_property_read_string(slave_node, "phy_id", &phy_id)) { 1108 int lenp;
1109 const __be32 *parp;
1110 struct device_node *mdio_node;
1111 struct platform_device *mdio;
1112
1113 parp = of_get_property(slave_node, "phy_id", &lenp);
1114 if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
836 pr_err("Missing slave[%d] phy_id property\n", i); 1115 pr_err("Missing slave[%d] phy_id property\n", i);
837 ret = -EINVAL; 1116 ret = -EINVAL;
838 goto error_ret; 1117 goto error_ret;
839 } 1118 }
840 slave_data->phy_id = phy_id; 1119 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
841 1120 phyid = be32_to_cpup(parp+1);
842 if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) { 1121 mdio = of_find_device_by_node(mdio_node);
843 pr_err("Missing slave[%d] slave_reg_ofs property\n", i); 1122 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
844 ret = -EINVAL; 1123 PHY_ID_FMT, mdio->name, phyid);
845 goto error_ret;
846 }
847 slave_data->slave_reg_ofs = prop;
848
849 if (of_property_read_u32(slave_node, "sliver_reg_ofs",
850 &prop)) {
851 pr_err("Missing slave[%d] sliver_reg_ofs property\n",
852 i);
853 ret = -EINVAL;
854 goto error_ret;
855 }
856 slave_data->sliver_reg_ofs = prop;
857 1124
858 mac_addr = of_get_mac_address(slave_node); 1125 mac_addr = of_get_mac_address(slave_node);
859 if (mac_addr) 1126 if (mac_addr)
@@ -869,15 +1136,16 @@ error_ret:
869 return ret; 1136 return ret;
870} 1137}
871 1138
872static int __devinit cpsw_probe(struct platform_device *pdev) 1139static int cpsw_probe(struct platform_device *pdev)
873{ 1140{
874 struct cpsw_platform_data *data = pdev->dev.platform_data; 1141 struct cpsw_platform_data *data = pdev->dev.platform_data;
875 struct net_device *ndev; 1142 struct net_device *ndev;
876 struct cpsw_priv *priv; 1143 struct cpsw_priv *priv;
877 struct cpdma_params dma_params; 1144 struct cpdma_params dma_params;
878 struct cpsw_ale_params ale_params; 1145 struct cpsw_ale_params ale_params;
879 void __iomem *regs; 1146 void __iomem *ss_regs, *wr_regs;
880 struct resource *res; 1147 struct resource *res;
1148 u32 slave_offset, sliver_offset, slave_size;
881 int ret = 0, i, k = 0; 1149 int ret = 0, i, k = 0;
882 1150
883 ndev = alloc_etherdev(sizeof(struct cpsw_priv)); 1151 ndev = alloc_etherdev(sizeof(struct cpsw_priv));
@@ -895,6 +1163,11 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
895 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1163 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
896 priv->rx_packet_max = max(rx_packet_max, 128); 1164 priv->rx_packet_max = max(rx_packet_max, 128);
897 1165
1166 /*
1167 * This may be required here for child devices.
1168 */
1169 pm_runtime_enable(&pdev->dev);
1170
898 if (cpsw_probe_dt(&priv->data, pdev)) { 1171 if (cpsw_probe_dt(&priv->data, pdev)) {
899 pr_err("cpsw: platform data missing\n"); 1172 pr_err("cpsw: platform data missing\n");
900 ret = -ENODEV; 1173 ret = -ENODEV;
@@ -921,7 +1194,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
921 for (i = 0; i < data->slaves; i++) 1194 for (i = 0; i < data->slaves; i++)
922 priv->slaves[i].slave_num = i; 1195 priv->slaves[i].slave_num = i;
923 1196
924 pm_runtime_enable(&pdev->dev);
925 priv->clk = clk_get(&pdev->dev, "fck"); 1197 priv->clk = clk_get(&pdev->dev, "fck");
926 if (IS_ERR(priv->clk)) { 1198 if (IS_ERR(priv->clk)) {
927 dev_err(&pdev->dev, "fck is not found\n"); 1199 dev_err(&pdev->dev, "fck is not found\n");
@@ -935,63 +1207,86 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
935 ret = -ENOENT; 1207 ret = -ENOENT;
936 goto clean_clk_ret; 1208 goto clean_clk_ret;
937 } 1209 }
938
939 if (!request_mem_region(priv->cpsw_res->start, 1210 if (!request_mem_region(priv->cpsw_res->start,
940 resource_size(priv->cpsw_res), ndev->name)) { 1211 resource_size(priv->cpsw_res), ndev->name)) {
941 dev_err(priv->dev, "failed request i/o region\n"); 1212 dev_err(priv->dev, "failed request i/o region\n");
942 ret = -ENXIO; 1213 ret = -ENXIO;
943 goto clean_clk_ret; 1214 goto clean_clk_ret;
944 } 1215 }
945 1216 ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
946 regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res)); 1217 if (!ss_regs) {
947 if (!regs) {
948 dev_err(priv->dev, "unable to map i/o region\n"); 1218 dev_err(priv->dev, "unable to map i/o region\n");
949 goto clean_cpsw_iores_ret; 1219 goto clean_cpsw_iores_ret;
950 } 1220 }
951 priv->regs = regs; 1221 priv->regs = ss_regs;
952 priv->host_port = data->host_port_num; 1222 priv->version = __raw_readl(&priv->regs->id_ver);
953 priv->host_port_regs = regs + data->host_port_reg_ofs; 1223 priv->host_port = HOST_PORT_NUM;
954 1224
955 priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1225 priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
956 if (!priv->cpsw_ss_res) { 1226 if (!priv->cpsw_wr_res) {
957 dev_err(priv->dev, "error getting i/o resource\n"); 1227 dev_err(priv->dev, "error getting i/o resource\n");
958 ret = -ENOENT; 1228 ret = -ENOENT;
959 goto clean_clk_ret; 1229 goto clean_iomap_ret;
960 } 1230 }
961 1231 if (!request_mem_region(priv->cpsw_wr_res->start,
962 if (!request_mem_region(priv->cpsw_ss_res->start, 1232 resource_size(priv->cpsw_wr_res), ndev->name)) {
963 resource_size(priv->cpsw_ss_res), ndev->name)) {
964 dev_err(priv->dev, "failed request i/o region\n"); 1233 dev_err(priv->dev, "failed request i/o region\n");
965 ret = -ENXIO; 1234 ret = -ENXIO;
966 goto clean_clk_ret; 1235 goto clean_iomap_ret;
967 } 1236 }
968 1237 wr_regs = ioremap(priv->cpsw_wr_res->start,
969 regs = ioremap(priv->cpsw_ss_res->start, 1238 resource_size(priv->cpsw_wr_res));
970 resource_size(priv->cpsw_ss_res)); 1239 if (!wr_regs) {
971 if (!regs) {
972 dev_err(priv->dev, "unable to map i/o region\n"); 1240 dev_err(priv->dev, "unable to map i/o region\n");
973 goto clean_cpsw_ss_iores_ret; 1241 goto clean_cpsw_wr_iores_ret;
974 } 1242 }
975 priv->ss_regs = regs; 1243 priv->wr_regs = wr_regs;
976
977 for_each_slave(priv, cpsw_slave_init, priv);
978 1244
979 memset(&dma_params, 0, sizeof(dma_params)); 1245 memset(&dma_params, 0, sizeof(dma_params));
1246 memset(&ale_params, 0, sizeof(ale_params));
1247
1248 switch (priv->version) {
1249 case CPSW_VERSION_1:
1250 priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
1251 priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET;
1252 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
1253 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
1254 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
1255 slave_offset = CPSW1_SLAVE_OFFSET;
1256 slave_size = CPSW1_SLAVE_SIZE;
1257 sliver_offset = CPSW1_SLIVER_OFFSET;
1258 dma_params.desc_mem_phys = 0;
1259 break;
1260 case CPSW_VERSION_2:
1261 priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
1262 priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET;
1263 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
1264 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
1265 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
1266 slave_offset = CPSW2_SLAVE_OFFSET;
1267 slave_size = CPSW2_SLAVE_SIZE;
1268 sliver_offset = CPSW2_SLIVER_OFFSET;
1269 dma_params.desc_mem_phys =
1270 (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
1271 break;
1272 default:
1273 dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
1274 ret = -ENODEV;
1275 goto clean_cpsw_wr_iores_ret;
1276 }
1277 for (i = 0; i < priv->data.slaves; i++) {
1278 struct cpsw_slave *slave = &priv->slaves[i];
1279 cpsw_slave_init(slave, priv, slave_offset, sliver_offset);
1280 slave_offset += slave_size;
1281 sliver_offset += SLIVER_SIZE;
1282 }
1283
980 dma_params.dev = &pdev->dev; 1284 dma_params.dev = &pdev->dev;
981 dma_params.dmaregs = cpsw_dma_regs((u32)priv->regs, 1285 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
982 data->cpdma_reg_ofs); 1286 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
983 dma_params.rxthresh = cpsw_dma_rxthresh((u32)priv->regs, 1287 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
984 data->cpdma_reg_ofs); 1288 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
985 dma_params.rxfree = cpsw_dma_rxfree((u32)priv->regs, 1289 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
986 data->cpdma_reg_ofs);
987 dma_params.txhdp = cpsw_dma_txhdp((u32)priv->regs,
988 data->cpdma_sram_ofs);
989 dma_params.rxhdp = cpsw_dma_rxhdp((u32)priv->regs,
990 data->cpdma_sram_ofs);
991 dma_params.txcp = cpsw_dma_txcp((u32)priv->regs,
992 data->cpdma_sram_ofs);
993 dma_params.rxcp = cpsw_dma_rxcp((u32)priv->regs,
994 data->cpdma_sram_ofs);
995 1290
996 dma_params.num_chan = data->channels; 1291 dma_params.num_chan = data->channels;
997 dma_params.has_soft_reset = true; 1292 dma_params.has_soft_reset = true;
@@ -999,16 +1294,13 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
999 dma_params.desc_mem_size = data->bd_ram_size; 1294 dma_params.desc_mem_size = data->bd_ram_size;
1000 dma_params.desc_align = 16; 1295 dma_params.desc_align = 16;
1001 dma_params.has_ext_regs = true; 1296 dma_params.has_ext_regs = true;
1002 dma_params.desc_mem_phys = data->no_bd_ram ? 0 : 1297 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
1003 (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
1004 dma_params.desc_hw_addr = data->hw_ram_addr ?
1005 data->hw_ram_addr : dma_params.desc_mem_phys ;
1006 1298
1007 priv->dma = cpdma_ctlr_create(&dma_params); 1299 priv->dma = cpdma_ctlr_create(&dma_params);
1008 if (!priv->dma) { 1300 if (!priv->dma) {
1009 dev_err(priv->dev, "error initializing dma\n"); 1301 dev_err(priv->dev, "error initializing dma\n");
1010 ret = -ENOMEM; 1302 ret = -ENOMEM;
1011 goto clean_iomap_ret; 1303 goto clean_wr_iomap_ret;
1012 } 1304 }
1013 1305
1014 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0), 1306 priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -1022,10 +1314,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
1022 goto clean_dma_ret; 1314 goto clean_dma_ret;
1023 } 1315 }
1024 1316
1025 memset(&ale_params, 0, sizeof(ale_params));
1026 ale_params.dev = &ndev->dev; 1317 ale_params.dev = &ndev->dev;
1027 ale_params.ale_regs = (void *)((u32)priv->regs) +
1028 ((u32)data->ale_reg_ofs);
1029 ale_params.ale_ageout = ale_ageout; 1318 ale_params.ale_ageout = ale_ageout;
1030 ale_params.ale_entries = data->ale_entries; 1319 ale_params.ale_entries = data->ale_entries;
1031 ale_params.ale_ports = data->slaves; 1320 ale_params.ale_ports = data->slaves;
@@ -1072,6 +1361,10 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
1072 goto clean_irq_ret; 1361 goto clean_irq_ret;
1073 } 1362 }
1074 1363
1364 if (cpts_register(&pdev->dev, &priv->cpts,
1365 data->cpts_clock_mult, data->cpts_clock_shift))
1366 dev_err(priv->dev, "error registering cpts device\n");
1367
1075 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n", 1368 cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
1076 priv->cpsw_res->start, ndev->irq); 1369 priv->cpsw_res->start, ndev->irq);
1077 1370
@@ -1085,11 +1378,13 @@ clean_dma_ret:
1085 cpdma_chan_destroy(priv->txch); 1378 cpdma_chan_destroy(priv->txch);
1086 cpdma_chan_destroy(priv->rxch); 1379 cpdma_chan_destroy(priv->rxch);
1087 cpdma_ctlr_destroy(priv->dma); 1380 cpdma_ctlr_destroy(priv->dma);
1381clean_wr_iomap_ret:
1382 iounmap(priv->wr_regs);
1383clean_cpsw_wr_iores_ret:
1384 release_mem_region(priv->cpsw_wr_res->start,
1385 resource_size(priv->cpsw_wr_res));
1088clean_iomap_ret: 1386clean_iomap_ret:
1089 iounmap(priv->regs); 1387 iounmap(priv->regs);
1090clean_cpsw_ss_iores_ret:
1091 release_mem_region(priv->cpsw_ss_res->start,
1092 resource_size(priv->cpsw_ss_res));
1093clean_cpsw_iores_ret: 1388clean_cpsw_iores_ret:
1094 release_mem_region(priv->cpsw_res->start, 1389 release_mem_region(priv->cpsw_res->start,
1095 resource_size(priv->cpsw_res)); 1390 resource_size(priv->cpsw_res));
@@ -1103,7 +1398,7 @@ clean_ndev_ret:
1103 return ret; 1398 return ret;
1104} 1399}
1105 1400
1106static int __devexit cpsw_remove(struct platform_device *pdev) 1401static int cpsw_remove(struct platform_device *pdev)
1107{ 1402{
1108 struct net_device *ndev = platform_get_drvdata(pdev); 1403 struct net_device *ndev = platform_get_drvdata(pdev);
1109 struct cpsw_priv *priv = netdev_priv(ndev); 1404 struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1111,6 +1406,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
1111 pr_info("removing device"); 1406 pr_info("removing device");
1112 platform_set_drvdata(pdev, NULL); 1407 platform_set_drvdata(pdev, NULL);
1113 1408
1409 cpts_unregister(&priv->cpts);
1114 free_irq(ndev->irq, priv); 1410 free_irq(ndev->irq, priv);
1115 cpsw_ale_destroy(priv->ale); 1411 cpsw_ale_destroy(priv->ale);
1116 cpdma_chan_destroy(priv->txch); 1412 cpdma_chan_destroy(priv->txch);
@@ -1119,8 +1415,9 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
1119 iounmap(priv->regs); 1415 iounmap(priv->regs);
1120 release_mem_region(priv->cpsw_res->start, 1416 release_mem_region(priv->cpsw_res->start,
1121 resource_size(priv->cpsw_res)); 1417 resource_size(priv->cpsw_res));
1122 release_mem_region(priv->cpsw_ss_res->start, 1418 iounmap(priv->wr_regs);
1123 resource_size(priv->cpsw_ss_res)); 1419 release_mem_region(priv->cpsw_wr_res->start,
1420 resource_size(priv->cpsw_wr_res));
1124 pm_runtime_disable(&pdev->dev); 1421 pm_runtime_disable(&pdev->dev);
1125 clk_put(priv->clk); 1422 clk_put(priv->clk);
1126 kfree(priv->slaves); 1423 kfree(priv->slaves);
@@ -1170,7 +1467,7 @@ static struct platform_driver cpsw_driver = {
1170 .of_match_table = of_match_ptr(cpsw_of_mtable), 1467 .of_match_table = of_match_ptr(cpsw_of_mtable),
1171 }, 1468 },
1172 .probe = cpsw_probe, 1469 .probe = cpsw_probe,
1173 .remove = __devexit_p(cpsw_remove), 1470 .remove = cpsw_remove,
1174}; 1471};
1175 1472
1176static int __init cpsw_init(void) 1473static int __init cpsw_init(void)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index ca0d48a7e508..0e9ccc2cf91f 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -20,6 +20,7 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/stat.h> 21#include <linux/stat.h>
22#include <linux/sysfs.h> 22#include <linux/sysfs.h>
23#include <linux/etherdevice.h>
23 24
24#include "cpsw_ale.h" 25#include "cpsw_ale.h"
25 26
@@ -211,10 +212,34 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
211 mask &= ~port_mask; 212 mask &= ~port_mask;
212 213
213 /* free if only remaining port is host port */ 214 /* free if only remaining port is host port */
214 if (mask == BIT(ale->params.ale_ports)) 215 if (mask)
215 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
216 else
217 cpsw_ale_set_port_mask(ale_entry, mask); 216 cpsw_ale_set_port_mask(ale_entry, mask);
217 else
218 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
219}
220
221int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
222{
223 u32 ale_entry[ALE_ENTRY_WORDS];
224 int ret, idx;
225
226 for (idx = 0; idx < ale->params.ale_entries; idx++) {
227 cpsw_ale_read(ale, idx, ale_entry);
228 ret = cpsw_ale_get_entry_type(ale_entry);
229 if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
230 continue;
231
232 if (cpsw_ale_get_mcast(ale_entry)) {
233 u8 addr[6];
234
235 cpsw_ale_get_addr(ale_entry, addr);
236 if (!is_broadcast_ether_addr(addr))
237 cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
238 }
239
240 cpsw_ale_write(ale, idx, ale_entry);
241 }
242 return 0;
218} 243}
219 244
220static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry, 245static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index a95b37beb02d..2bd09cbce522 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -80,6 +80,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
80 80
81int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); 81int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
82int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); 82int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
83int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
83int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags); 84int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
84int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port); 85int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
85int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, 86int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
new file mode 100644
index 000000000000..337766738eca
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -0,0 +1,427 @@
1/*
2 * TI Common Platform Time Sync
3 *
4 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/err.h>
21#include <linux/if.h>
22#include <linux/hrtimer.h>
23#include <linux/module.h>
24#include <linux/net_tstamp.h>
25#include <linux/ptp_classify.h>
26#include <linux/time.h>
27#include <linux/uaccess.h>
28#include <linux/workqueue.h>
29
30#include <plat/clock.h>
31
32#include "cpts.h"
33
34#ifdef CONFIG_TI_CPTS
35
36static struct sock_filter ptp_filter[] = {
37 PTP_FILTER
38};
39
40#define cpts_read32(c, r) __raw_readl(&c->reg->r)
41#define cpts_write32(c, v, r) __raw_writel(v, &c->reg->r)
42
43static int event_expired(struct cpts_event *event)
44{
45 return time_after(jiffies, event->tmo);
46}
47
48static int event_type(struct cpts_event *event)
49{
50 return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
51}
52
53static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
54{
55 u32 r = cpts_read32(cpts, intstat_raw);
56
57 if (r & TS_PEND_RAW) {
58 *high = cpts_read32(cpts, event_high);
59 *low = cpts_read32(cpts, event_low);
60 cpts_write32(cpts, EVENT_POP, event_pop);
61 return 0;
62 }
63 return -1;
64}
65
66/*
67 * Returns zero if matching event type was found.
68 */
69static int cpts_fifo_read(struct cpts *cpts, int match)
70{
71 int i, type = -1;
72 u32 hi, lo;
73 struct cpts_event *event;
74
75 for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
76 if (cpts_fifo_pop(cpts, &hi, &lo))
77 break;
78 if (list_empty(&cpts->pool)) {
79 pr_err("cpts: event pool is empty\n");
80 return -1;
81 }
82 event = list_first_entry(&cpts->pool, struct cpts_event, list);
83 event->tmo = jiffies + 2;
84 event->high = hi;
85 event->low = lo;
86 type = event_type(event);
87 switch (type) {
88 case CPTS_EV_PUSH:
89 case CPTS_EV_RX:
90 case CPTS_EV_TX:
91 list_del_init(&event->list);
92 list_add_tail(&event->list, &cpts->events);
93 break;
94 case CPTS_EV_ROLL:
95 case CPTS_EV_HALF:
96 case CPTS_EV_HW:
97 break;
98 default:
99 pr_err("cpts: unkown event type\n");
100 break;
101 }
102 if (type == match)
103 break;
104 }
105 return type == match ? 0 : -1;
106}
107
108static cycle_t cpts_systim_read(const struct cyclecounter *cc)
109{
110 u64 val = 0;
111 struct cpts_event *event;
112 struct list_head *this, *next;
113 struct cpts *cpts = container_of(cc, struct cpts, cc);
114
115 cpts_write32(cpts, TS_PUSH, ts_push);
116 if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
117 pr_err("cpts: unable to obtain a time stamp\n");
118
119 list_for_each_safe(this, next, &cpts->events) {
120 event = list_entry(this, struct cpts_event, list);
121 if (event_type(event) == CPTS_EV_PUSH) {
122 list_del_init(&event->list);
123 list_add(&event->list, &cpts->pool);
124 val = event->low;
125 break;
126 }
127 }
128
129 return val;
130}
131
132/* PTP clock operations */
133
134static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
135{
136 u64 adj;
137 u32 diff, mult;
138 int neg_adj = 0;
139 unsigned long flags;
140 struct cpts *cpts = container_of(ptp, struct cpts, info);
141
142 if (ppb < 0) {
143 neg_adj = 1;
144 ppb = -ppb;
145 }
146 mult = cpts->cc_mult;
147 adj = mult;
148 adj *= ppb;
149 diff = div_u64(adj, 1000000000ULL);
150
151 spin_lock_irqsave(&cpts->lock, flags);
152
153 timecounter_read(&cpts->tc);
154
155 cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
156
157 spin_unlock_irqrestore(&cpts->lock, flags);
158
159 return 0;
160}
161
162static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
163{
164 s64 now;
165 unsigned long flags;
166 struct cpts *cpts = container_of(ptp, struct cpts, info);
167
168 spin_lock_irqsave(&cpts->lock, flags);
169 now = timecounter_read(&cpts->tc);
170 now += delta;
171 timecounter_init(&cpts->tc, &cpts->cc, now);
172 spin_unlock_irqrestore(&cpts->lock, flags);
173
174 return 0;
175}
176
177static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
178{
179 u64 ns;
180 u32 remainder;
181 unsigned long flags;
182 struct cpts *cpts = container_of(ptp, struct cpts, info);
183
184 spin_lock_irqsave(&cpts->lock, flags);
185 ns = timecounter_read(&cpts->tc);
186 spin_unlock_irqrestore(&cpts->lock, flags);
187
188 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
189 ts->tv_nsec = remainder;
190
191 return 0;
192}
193
194static int cpts_ptp_settime(struct ptp_clock_info *ptp,
195 const struct timespec *ts)
196{
197 u64 ns;
198 unsigned long flags;
199 struct cpts *cpts = container_of(ptp, struct cpts, info);
200
201 ns = ts->tv_sec * 1000000000ULL;
202 ns += ts->tv_nsec;
203
204 spin_lock_irqsave(&cpts->lock, flags);
205 timecounter_init(&cpts->tc, &cpts->cc, ns);
206 spin_unlock_irqrestore(&cpts->lock, flags);
207
208 return 0;
209}
210
211static int cpts_ptp_enable(struct ptp_clock_info *ptp,
212 struct ptp_clock_request *rq, int on)
213{
214 return -EOPNOTSUPP;
215}
216
217static struct ptp_clock_info cpts_info = {
218 .owner = THIS_MODULE,
219 .name = "CTPS timer",
220 .max_adj = 1000000,
221 .n_ext_ts = 0,
222 .pps = 0,
223 .adjfreq = cpts_ptp_adjfreq,
224 .adjtime = cpts_ptp_adjtime,
225 .gettime = cpts_ptp_gettime,
226 .settime = cpts_ptp_settime,
227 .enable = cpts_ptp_enable,
228};
229
230static void cpts_overflow_check(struct work_struct *work)
231{
232 struct timespec ts;
233 struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
234
235 cpts_write32(cpts, CPTS_EN, control);
236 cpts_write32(cpts, TS_PEND_EN, int_enable);
237 cpts_ptp_gettime(&cpts->info, &ts);
238 pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
239 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
240}
241
242#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
243
244static void cpts_clk_init(struct cpts *cpts)
245{
246 cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
247 if (IS_ERR(cpts->refclk)) {
248 pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
249 cpts->refclk = NULL;
250 return;
251 }
252 clk_enable(cpts->refclk);
253 cpts->freq = cpts->refclk->recalc(cpts->refclk);
254}
255
256static void cpts_clk_release(struct cpts *cpts)
257{
258 clk_disable(cpts->refclk);
259 clk_put(cpts->refclk);
260}
261
262static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
263 u16 ts_seqid, u8 ts_msgtype)
264{
265 u16 *seqid;
266 unsigned int offset;
267 u8 *msgtype, *data = skb->data;
268
269 switch (ptp_class) {
270 case PTP_CLASS_V1_IPV4:
271 case PTP_CLASS_V2_IPV4:
272 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
273 break;
274 case PTP_CLASS_V1_IPV6:
275 case PTP_CLASS_V2_IPV6:
276 offset = OFF_PTP6;
277 break;
278 case PTP_CLASS_V2_L2:
279 offset = ETH_HLEN;
280 break;
281 case PTP_CLASS_V2_VLAN:
282 offset = ETH_HLEN + VLAN_HLEN;
283 break;
284 default:
285 return 0;
286 }
287
288 if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
289 return 0;
290
291 if (unlikely(ptp_class & PTP_CLASS_V1))
292 msgtype = data + offset + OFF_PTP_CONTROL;
293 else
294 msgtype = data + offset;
295
296 seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
297
298 return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
299}
300
301static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
302{
303 u64 ns = 0;
304 struct cpts_event *event;
305 struct list_head *this, *next;
306 unsigned int class = sk_run_filter(skb, ptp_filter);
307 unsigned long flags;
308 u16 seqid;
309 u8 mtype;
310
311 if (class == PTP_CLASS_NONE)
312 return 0;
313
314 spin_lock_irqsave(&cpts->lock, flags);
315 cpts_fifo_read(cpts, CPTS_EV_PUSH);
316 list_for_each_safe(this, next, &cpts->events) {
317 event = list_entry(this, struct cpts_event, list);
318 if (event_expired(event)) {
319 list_del_init(&event->list);
320 list_add(&event->list, &cpts->pool);
321 continue;
322 }
323 mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
324 seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
325 if (ev_type == event_type(event) &&
326 cpts_match(skb, class, seqid, mtype)) {
327 ns = timecounter_cyc2time(&cpts->tc, event->low);
328 list_del_init(&event->list);
329 list_add(&event->list, &cpts->pool);
330 break;
331 }
332 }
333 spin_unlock_irqrestore(&cpts->lock, flags);
334
335 return ns;
336}
337
338void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
339{
340 u64 ns;
341 struct skb_shared_hwtstamps *ssh;
342
343 if (!cpts->rx_enable)
344 return;
345 ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
346 if (!ns)
347 return;
348 ssh = skb_hwtstamps(skb);
349 memset(ssh, 0, sizeof(*ssh));
350 ssh->hwtstamp = ns_to_ktime(ns);
351}
352
353void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
354{
355 u64 ns;
356 struct skb_shared_hwtstamps ssh;
357
358 if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
359 return;
360 ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
361 if (!ns)
362 return;
363 memset(&ssh, 0, sizeof(ssh));
364 ssh.hwtstamp = ns_to_ktime(ns);
365 skb_tstamp_tx(skb, &ssh);
366}
367
368#endif /*CONFIG_TI_CPTS*/
369
370int cpts_register(struct device *dev, struct cpts *cpts,
371 u32 mult, u32 shift)
372{
373#ifdef CONFIG_TI_CPTS
374 int err, i;
375 unsigned long flags;
376
377 if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
378 pr_err("cpts: bad ptp filter\n");
379 return -EINVAL;
380 }
381 cpts->info = cpts_info;
382 cpts->clock = ptp_clock_register(&cpts->info, dev);
383 if (IS_ERR(cpts->clock)) {
384 err = PTR_ERR(cpts->clock);
385 cpts->clock = NULL;
386 return err;
387 }
388 spin_lock_init(&cpts->lock);
389
390 cpts->cc.read = cpts_systim_read;
391 cpts->cc.mask = CLOCKSOURCE_MASK(32);
392 cpts->cc_mult = mult;
393 cpts->cc.mult = mult;
394 cpts->cc.shift = shift;
395
396 INIT_LIST_HEAD(&cpts->events);
397 INIT_LIST_HEAD(&cpts->pool);
398 for (i = 0; i < CPTS_MAX_EVENTS; i++)
399 list_add(&cpts->pool_data[i].list, &cpts->pool);
400
401 cpts_clk_init(cpts);
402 cpts_write32(cpts, CPTS_EN, control);
403 cpts_write32(cpts, TS_PEND_EN, int_enable);
404
405 spin_lock_irqsave(&cpts->lock, flags);
406 timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
407 spin_unlock_irqrestore(&cpts->lock, flags);
408
409 INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
410 schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
411
412 cpts->phc_index = ptp_clock_index(cpts->clock);
413#endif
414 return 0;
415}
416
417void cpts_unregister(struct cpts *cpts)
418{
419#ifdef CONFIG_TI_CPTS
420 if (cpts->clock) {
421 ptp_clock_unregister(cpts->clock);
422 cancel_delayed_work_sync(&cpts->overflow_work);
423 }
424 if (cpts->refclk)
425 cpts_clk_release(cpts);
426#endif
427}
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
new file mode 100644
index 000000000000..e1bba3a496b2
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -0,0 +1,146 @@
1/*
2 * TI Common Platform Time Sync
3 *
4 * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#ifndef _TI_CPTS_H_
21#define _TI_CPTS_H_
22
23#include <linux/clk.h>
24#include <linux/clkdev.h>
25#include <linux/clocksource.h>
26#include <linux/device.h>
27#include <linux/list.h>
28#include <linux/ptp_clock_kernel.h>
29#include <linux/skbuff.h>
30
31struct cpsw_cpts {
32 u32 idver; /* Identification and version */
33 u32 control; /* Time sync control */
34 u32 res1;
35 u32 ts_push; /* Time stamp event push */
36 u32 ts_load_val; /* Time stamp load value */
37 u32 ts_load_en; /* Time stamp load enable */
38 u32 res2[2];
39 u32 intstat_raw; /* Time sync interrupt status raw */
40 u32 intstat_masked; /* Time sync interrupt status masked */
41 u32 int_enable; /* Time sync interrupt enable */
42 u32 res3;
43 u32 event_pop; /* Event interrupt pop */
44 u32 event_low; /* 32 Bit Event Time Stamp */
45 u32 event_high; /* Event Type Fields */
46};
47
48/* Bit definitions for the IDVER register */
49#define TX_IDENT_SHIFT (16) /* TX Identification Value */
50#define TX_IDENT_MASK (0xffff)
51#define RTL_VER_SHIFT (11) /* RTL Version Value */
52#define RTL_VER_MASK (0x1f)
53#define MAJOR_VER_SHIFT (8) /* Major Version Value */
54#define MAJOR_VER_MASK (0x7)
55#define MINOR_VER_SHIFT (0) /* Minor Version Value */
56#define MINOR_VER_MASK (0xff)
57
58/* Bit definitions for the CONTROL register */
59#define HW4_TS_PUSH_EN (1<<11) /* Hardware push 4 enable */
60#define HW3_TS_PUSH_EN (1<<10) /* Hardware push 3 enable */
61#define HW2_TS_PUSH_EN (1<<9) /* Hardware push 2 enable */
62#define HW1_TS_PUSH_EN (1<<8) /* Hardware push 1 enable */
63#define INT_TEST (1<<1) /* Interrupt Test */
64#define CPTS_EN (1<<0) /* Time Sync Enable */
65
66/*
67 * Definitions for the single bit resisters:
68 * TS_PUSH TS_LOAD_EN INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP
69 */
70#define TS_PUSH (1<<0) /* Time stamp event push */
71#define TS_LOAD_EN (1<<0) /* Time Stamp Load */
72#define TS_PEND_RAW (1<<0) /* int read (before enable) */
73#define TS_PEND (1<<0) /* masked interrupt read (after enable) */
74#define TS_PEND_EN (1<<0) /* masked interrupt enable */
75#define EVENT_POP (1<<0) /* writing discards one event */
76
77/* Bit definitions for the EVENT_HIGH register */
78#define PORT_NUMBER_SHIFT (24) /* Indicates Ethernet port or HW pin */
79#define PORT_NUMBER_MASK (0x1f)
80#define EVENT_TYPE_SHIFT (20) /* Time sync event type */
81#define EVENT_TYPE_MASK (0xf)
82#define MESSAGE_TYPE_SHIFT (16) /* PTP message type */
83#define MESSAGE_TYPE_MASK (0xf)
84#define SEQUENCE_ID_SHIFT (0) /* PTP message sequence ID */
85#define SEQUENCE_ID_MASK (0xffff)
86
87enum {
88 CPTS_EV_PUSH, /* Time Stamp Push Event */
89 CPTS_EV_ROLL, /* Time Stamp Rollover Event */
90 CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
91 CPTS_EV_HW, /* Hardware Time Stamp Push Event */
92 CPTS_EV_RX, /* Ethernet Receive Event */
93 CPTS_EV_TX, /* Ethernet Transmit Event */
94};
95
96/* This covers any input clock up to about 500 MHz. */
97#define CPTS_OVERFLOW_PERIOD (HZ * 8)
98
99#define CPTS_FIFO_DEPTH 16
100#define CPTS_MAX_EVENTS 32
101
102struct cpts_event {
103 struct list_head list;
104 unsigned long tmo;
105 u32 high;
106 u32 low;
107};
108
109struct cpts {
110 struct cpsw_cpts __iomem *reg;
111 int tx_enable;
112 int rx_enable;
113#ifdef CONFIG_TI_CPTS
114 struct ptp_clock_info info;
115 struct ptp_clock *clock;
116 spinlock_t lock; /* protects time registers */
117 u32 cc_mult; /* for the nominal frequency */
118 struct cyclecounter cc;
119 struct timecounter tc;
120 struct delayed_work overflow_work;
121 int phc_index;
122 struct clk *refclk;
123 unsigned long freq;
124 struct list_head events;
125 struct list_head pool;
126 struct cpts_event pool_data[CPTS_MAX_EVENTS];
127#endif
128};
129
130#ifdef CONFIG_TI_CPTS
131extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
132extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
133#else
134static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
135{
136}
137static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
138{
139}
140#endif
141
142extern int cpts_register(struct device *dev, struct cpts *cpts,
143 u32 mult, u32 shift);
144extern void cpts_unregister(struct cpts *cpts);
145
146#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index fce89a0ab06e..2a3e2c56bc60 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1850,7 +1850,7 @@ static struct emac_platform_data
1850 * resource information from platform init and register a network device 1850 * resource information from platform init and register a network device
1851 * and allocate resources necessary for driver to perform 1851 * and allocate resources necessary for driver to perform
1852 */ 1852 */
1853static int __devinit davinci_emac_probe(struct platform_device *pdev) 1853static int davinci_emac_probe(struct platform_device *pdev)
1854{ 1854{
1855 int rc = 0; 1855 int rc = 0;
1856 struct resource *res; 1856 struct resource *res;
@@ -2039,7 +2039,7 @@ no_ndev:
2039 * Called when removing the device driver. We disable clock usage and release 2039 * Called when removing the device driver. We disable clock usage and release
2040 * the resources taken up by the driver and unregister network device 2040 * the resources taken up by the driver and unregister network device
2041 */ 2041 */
2042static int __devexit davinci_emac_remove(struct platform_device *pdev) 2042static int davinci_emac_remove(struct platform_device *pdev)
2043{ 2043{
2044 struct resource *res; 2044 struct resource *res;
2045 struct net_device *ndev = platform_get_drvdata(pdev); 2045 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -2107,7 +2107,7 @@ static struct platform_driver davinci_emac_driver = {
2107 .of_match_table = of_match_ptr(davinci_emac_of_match), 2107 .of_match_table = of_match_ptr(davinci_emac_of_match),
2108 }, 2108 },
2109 .probe = davinci_emac_probe, 2109 .probe = davinci_emac_probe,
2110 .remove = __devexit_p(davinci_emac_remove), 2110 .remove = davinci_emac_remove,
2111}; 2111};
2112 2112
2113/** 2113/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 51a96dbee9ac..cca25509b039 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -310,7 +310,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
310} 310}
311 311
312 312
313static int __devinit davinci_mdio_probe(struct platform_device *pdev) 313static int davinci_mdio_probe(struct platform_device *pdev)
314{ 314{
315 struct mdio_platform_data *pdata = pdev->dev.platform_data; 315 struct mdio_platform_data *pdata = pdev->dev.platform_data;
316 struct device *dev = &pdev->dev; 316 struct device *dev = &pdev->dev;
@@ -416,7 +416,7 @@ bail_out:
416 return ret; 416 return ret;
417} 417}
418 418
419static int __devexit davinci_mdio_remove(struct platform_device *pdev) 419static int davinci_mdio_remove(struct platform_device *pdev)
420{ 420{
421 struct device *dev = &pdev->dev; 421 struct device *dev = &pdev->dev;
422 struct davinci_mdio_data *data = dev_get_drvdata(dev); 422 struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -465,7 +465,7 @@ static int davinci_mdio_resume(struct device *dev)
465 u32 ctrl; 465 u32 ctrl;
466 466
467 spin_lock(&data->lock); 467 spin_lock(&data->lock);
468 pm_runtime_put_sync(data->dev); 468 pm_runtime_get_sync(data->dev);
469 469
470 /* restart the scan state machine */ 470 /* restart the scan state machine */
471 ctrl = __raw_readl(&data->regs->control); 471 ctrl = __raw_readl(&data->regs->control);
@@ -496,7 +496,7 @@ static struct platform_driver davinci_mdio_driver = {
496 .of_match_table = of_match_ptr(davinci_mdio_of_mtable), 496 .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
497 }, 497 },
498 .probe = davinci_mdio_probe, 498 .probe = davinci_mdio_probe,
499 .remove = __devexit_p(davinci_mdio_remove), 499 .remove = davinci_mdio_remove,
500}; 500};
501 501
502static int __init davinci_mdio_init(void) 502static int __init davinci_mdio_init(void)
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 3e6abf0f2771..22725386c5de 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -300,7 +300,7 @@ these functions are more or less common to all linux network drivers.
300 **************************************************************/ 300 **************************************************************/
301 301
302 302
303static void __devexit tlan_remove_one(struct pci_dev *pdev) 303static void tlan_remove_one(struct pci_dev *pdev)
304{ 304{
305 struct net_device *dev = pci_get_drvdata(pdev); 305 struct net_device *dev = pci_get_drvdata(pdev);
306 struct tlan_priv *priv = netdev_priv(dev); 306 struct tlan_priv *priv = netdev_priv(dev);
@@ -392,7 +392,7 @@ static struct pci_driver tlan_driver = {
392 .name = "tlan", 392 .name = "tlan",
393 .id_table = tlan_pci_tbl, 393 .id_table = tlan_pci_tbl,
394 .probe = tlan_init_one, 394 .probe = tlan_init_one,
395 .remove = __devexit_p(tlan_remove_one), 395 .remove = tlan_remove_one,
396 .suspend = tlan_suspend, 396 .suspend = tlan_suspend,
397 .resume = tlan_resume, 397 .resume = tlan_resume,
398}; 398};
@@ -434,7 +434,7 @@ err_out_pci_free:
434} 434}
435 435
436 436
437static int __devinit tlan_init_one(struct pci_dev *pdev, 437static int tlan_init_one(struct pci_dev *pdev,
438 const struct pci_device_id *ent) 438 const struct pci_device_id *ent)
439{ 439{
440 return tlan_probe1(pdev, -1, -1, 0, ent); 440 return tlan_probe1(pdev, -1, -1, 0, ent);
@@ -460,9 +460,8 @@ static int __devinit tlan_init_one(struct pci_dev *pdev,
460* 460*
461**************************************************************/ 461**************************************************************/
462 462
463static int __devinit tlan_probe1(struct pci_dev *pdev, 463static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
464 long ioaddr, int irq, int rev, 464 const struct pci_device_id *ent)
465 const struct pci_device_id *ent)
466{ 465{
467 466
468 struct net_device *dev; 467 struct net_device *dev;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 5ee82a77723b..e321d0b6fc88 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -234,10 +234,9 @@ static void gelic_card_free_chain(struct gelic_card *card,
234 * 234 *
235 * returns 0 on success, <0 on failure 235 * returns 0 on success, <0 on failure
236 */ 236 */
237static int __devinit gelic_card_init_chain(struct gelic_card *card, 237static int gelic_card_init_chain(struct gelic_card *card,
238 struct gelic_descr_chain *chain, 238 struct gelic_descr_chain *chain,
239 struct gelic_descr *start_descr, 239 struct gelic_descr *start_descr, int no)
240 int no)
241{ 240{
242 int i; 241 int i;
243 struct gelic_descr *descr; 242 struct gelic_descr *descr;
@@ -428,7 +427,7 @@ rewind:
428 * 427 *
429 * returns 0 on success, < 0 on failure 428 * returns 0 on success, < 0 on failure
430 */ 429 */
431static int __devinit gelic_card_alloc_rx_skbs(struct gelic_card *card) 430static int gelic_card_alloc_rx_skbs(struct gelic_card *card)
432{ 431{
433 struct gelic_descr_chain *chain; 432 struct gelic_descr_chain *chain;
434 int ret; 433 int ret;
@@ -1468,8 +1467,8 @@ static const struct net_device_ops gelic_netdevice_ops = {
1468 * 1467 *
1469 * fills out function pointers in the net_device structure 1468 * fills out function pointers in the net_device structure
1470 */ 1469 */
1471static void __devinit gelic_ether_setup_netdev_ops(struct net_device *netdev, 1470static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
1472 struct napi_struct *napi) 1471 struct napi_struct *napi)
1473{ 1472{
1474 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1473 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
1475 /* NAPI */ 1474 /* NAPI */
@@ -1489,8 +1488,7 @@ static void __devinit gelic_ether_setup_netdev_ops(struct net_device *netdev,
1489 * gelic_ether_setup_netdev initializes the net_device structure 1488 * gelic_ether_setup_netdev initializes the net_device structure
1490 * and register it. 1489 * and register it.
1491 **/ 1490 **/
1492int __devinit gelic_net_setup_netdev(struct net_device *netdev, 1491int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
1493 struct gelic_card *card)
1494{ 1492{
1495 int status; 1493 int status;
1496 u64 v1, v2; 1494 u64 v1, v2;
@@ -1542,7 +1540,7 @@ int __devinit gelic_net_setup_netdev(struct net_device *netdev,
1542 * the card and net_device structures are linked to each other 1540 * the card and net_device structures are linked to each other
1543 */ 1541 */
1544#define GELIC_ALIGN (32) 1542#define GELIC_ALIGN (32)
1545static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **netdev) 1543static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
1546{ 1544{
1547 struct gelic_card *card; 1545 struct gelic_card *card;
1548 struct gelic_port *port; 1546 struct gelic_port *port;
@@ -1593,7 +1591,7 @@ static struct gelic_card * __devinit gelic_alloc_card_net(struct net_device **ne
1593 return card; 1591 return card;
1594} 1592}
1595 1593
1596static void __devinit gelic_card_get_vlan_info(struct gelic_card *card) 1594static void gelic_card_get_vlan_info(struct gelic_card *card)
1597{ 1595{
1598 u64 v1, v2; 1596 u64 v1, v2;
1599 int status; 1597 int status;
@@ -1667,7 +1665,7 @@ static void __devinit gelic_card_get_vlan_info(struct gelic_card *card)
1667/** 1665/**
1668 * ps3_gelic_driver_probe - add a device to the control of this driver 1666 * ps3_gelic_driver_probe - add a device to the control of this driver
1669 */ 1667 */
1670static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev) 1668static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
1671{ 1669{
1672 struct gelic_card *card; 1670 struct gelic_card *card;
1673 struct net_device *netdev; 1671 struct net_device *netdev;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 961c8321451f..d568af1eb4f4 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -452,7 +452,7 @@ static size_t gelic_wl_synthesize_ie(u8 *buf,
452 if (rsn) 452 if (rsn)
453 *buf++ = WLAN_EID_RSN; 453 *buf++ = WLAN_EID_RSN;
454 else 454 else
455 *buf++ = WLAN_EID_GENERIC; 455 *buf++ = WLAN_EID_VENDOR_SPECIFIC;
456 456
457 /* length filed; set later */ 457 /* length filed; set later */
458 buf++; 458 buf++;
@@ -540,7 +540,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
540 break; 540 break;
541 541
542 switch (item_id) { 542 switch (item_id) {
543 case WLAN_EID_GENERIC: 543 case WLAN_EID_VENDOR_SPECIFIC:
544 if ((OUI_LEN + 1 <= item_len) && 544 if ((OUI_LEN + 1 <= item_len) &&
545 !memcmp(pos, wpa_oui, OUI_LEN) && 545 !memcmp(pos, wpa_oui, OUI_LEN) &&
546 pos[OUI_LEN] == 0x01) { 546 pos[OUI_LEN] == 0x01) {
@@ -2305,7 +2305,7 @@ static const struct iw_handler_def gelic_wl_wext_handler_def = {
2305 .get_wireless_stats = gelic_wl_get_wireless_stats, 2305 .get_wireless_stats = gelic_wl_get_wireless_stats,
2306}; 2306};
2307 2307
2308static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card) 2308static struct net_device *gelic_wl_alloc(struct gelic_card *card)
2309{ 2309{
2310 struct net_device *netdev; 2310 struct net_device *netdev;
2311 struct gelic_port *port; 2311 struct gelic_port *port;
@@ -2582,7 +2582,7 @@ static const struct ethtool_ops gelic_wl_ethtool_ops = {
2582 .get_link = gelic_wl_get_link, 2582 .get_link = gelic_wl_get_link,
2583}; 2583};
2584 2584
2585static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev) 2585static void gelic_wl_setup_netdev_ops(struct net_device *netdev)
2586{ 2586{
2587 struct gelic_wl_info *wl; 2587 struct gelic_wl_info *wl;
2588 wl = port_wl(netdev_priv(netdev)); 2588 wl = port_wl(netdev_priv(netdev));
@@ -2598,7 +2598,7 @@ static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev)
2598/* 2598/*
2599 * driver probe/remove 2599 * driver probe/remove
2600 */ 2600 */
2601int __devinit gelic_wl_driver_probe(struct gelic_card *card) 2601int gelic_wl_driver_probe(struct gelic_card *card)
2602{ 2602{
2603 int ret; 2603 int ret;
2604 struct net_device *netdev; 2604 struct net_device *netdev;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index c1ebfe9efcb3..f1b91fd7e41c 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2492,7 +2492,7 @@ out_disable_dev:
2492 * spider_net_probe initializes pdev and registers a net_device 2492 * spider_net_probe initializes pdev and registers a net_device
2493 * structure for it. After that, the device can be ifconfig'ed up 2493 * structure for it. After that, the device can be ifconfig'ed up
2494 **/ 2494 **/
2495static int __devinit 2495static int
2496spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2496spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2497{ 2497{
2498 int err = -EIO; 2498 int err = -EIO;
@@ -2531,7 +2531,7 @@ out:
2531 * spider_net_remove is called to remove the device and unregisters the 2531 * spider_net_remove is called to remove the device and unregisters the
2532 * net_device 2532 * net_device
2533 **/ 2533 **/
2534static void __devexit 2534static void
2535spider_net_remove(struct pci_dev *pdev) 2535spider_net_remove(struct pci_dev *pdev)
2536{ 2536{
2537 struct net_device *netdev; 2537 struct net_device *netdev;
@@ -2559,7 +2559,7 @@ static struct pci_driver spider_net_driver = {
2559 .name = spider_net_driver_name, 2559 .name = spider_net_driver_name,
2560 .id_table = spider_net_pci_tbl, 2560 .id_table = spider_net_pci_tbl,
2561 .probe = spider_net_probe, 2561 .probe = spider_net_probe,
2562 .remove = __devexit_p(spider_net_remove) 2562 .remove = spider_net_remove
2563}; 2563};
2564 2564
2565/** 2565/**
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 651a70c55e6e..9819349eaa1e 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -60,7 +60,7 @@ enum tc35815_chiptype {
60/* indexed by tc35815_chiptype, above */ 60/* indexed by tc35815_chiptype, above */
61static const struct { 61static const struct {
62 const char *name; 62 const char *name;
63} chip_info[] __devinitdata = { 63} chip_info[] = {
64 { "TOSHIBA TC35815CF 10/100BaseTX" }, 64 { "TOSHIBA TC35815CF 10/100BaseTX" },
65 { "TOSHIBA TC35815 with Wake on LAN" }, 65 { "TOSHIBA TC35815 with Wake on LAN" },
66 { "TOSHIBA TC35815/TX4939" }, 66 { "TOSHIBA TC35815/TX4939" },
@@ -719,7 +719,7 @@ err_out:
719 * should provide a "tc35815-mac" device with a MAC address in its 719 * should provide a "tc35815-mac" device with a MAC address in its
720 * platform_data. 720 * platform_data.
721 */ 721 */
722static int __devinit tc35815_mac_match(struct device *dev, void *data) 722static int tc35815_mac_match(struct device *dev, void *data)
723{ 723{
724 struct platform_device *plat_dev = to_platform_device(dev); 724 struct platform_device *plat_dev = to_platform_device(dev);
725 struct pci_dev *pci_dev = data; 725 struct pci_dev *pci_dev = data;
@@ -727,7 +727,7 @@ static int __devinit tc35815_mac_match(struct device *dev, void *data)
727 return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id; 727 return !strcmp(plat_dev->name, "tc35815-mac") && plat_dev->id == id;
728} 728}
729 729
730static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) 730static int tc35815_read_plat_dev_addr(struct net_device *dev)
731{ 731{
732 struct tc35815_local *lp = netdev_priv(dev); 732 struct tc35815_local *lp = netdev_priv(dev);
733 struct device *pd = bus_find_device(&platform_bus_type, NULL, 733 struct device *pd = bus_find_device(&platform_bus_type, NULL,
@@ -741,13 +741,13 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
741 return -ENODEV; 741 return -ENODEV;
742} 742}
743#else 743#else
744static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) 744static int tc35815_read_plat_dev_addr(struct net_device *dev)
745{ 745{
746 return -ENODEV; 746 return -ENODEV;
747} 747}
748#endif 748#endif
749 749
750static int __devinit tc35815_init_dev_addr(struct net_device *dev) 750static int tc35815_init_dev_addr(struct net_device *dev)
751{ 751{
752 struct tc35815_regs __iomem *tr = 752 struct tc35815_regs __iomem *tr =
753 (struct tc35815_regs __iomem *)dev->base_addr; 753 (struct tc35815_regs __iomem *)dev->base_addr;
@@ -785,8 +785,8 @@ static const struct net_device_ops tc35815_netdev_ops = {
785#endif 785#endif
786}; 786};
787 787
788static int __devinit tc35815_init_one(struct pci_dev *pdev, 788static int tc35815_init_one(struct pci_dev *pdev,
789 const struct pci_device_id *ent) 789 const struct pci_device_id *ent)
790{ 790{
791 void __iomem *ioaddr = NULL; 791 void __iomem *ioaddr = NULL;
792 struct net_device *dev; 792 struct net_device *dev;
@@ -878,7 +878,7 @@ err_out:
878} 878}
879 879
880 880
881static void __devexit tc35815_remove_one(struct pci_dev *pdev) 881static void tc35815_remove_one(struct pci_dev *pdev)
882{ 882{
883 struct net_device *dev = pci_get_drvdata(pdev); 883 struct net_device *dev = pci_get_drvdata(pdev);
884 struct tc35815_local *lp = netdev_priv(dev); 884 struct tc35815_local *lp = netdev_priv(dev);
@@ -2198,7 +2198,7 @@ static struct pci_driver tc35815_pci_driver = {
2198 .name = MODNAME, 2198 .name = MODNAME,
2199 .id_table = tc35815_pci_tbl, 2199 .id_table = tc35815_pci_tbl,
2200 .probe = tc35815_init_one, 2200 .probe = tc35815_init_one,
2201 .remove = __devexit_p(tc35815_remove_one), 2201 .remove = tc35815_remove_one,
2202#ifdef CONFIG_PM 2202#ifdef CONFIG_PM
2203 .suspend = tc35815_suspend, 2203 .suspend = tc35815_suspend,
2204 .resume = tc35815_resume, 2204 .resume = tc35815_resume,
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 0459c096629f..7992b3e05d3d 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -113,7 +113,7 @@ static const int multicast_filter_limit = 32;
113#include <linux/dmi.h> 113#include <linux/dmi.h>
114 114
115/* These identify the driver base version and may not be removed. */ 115/* These identify the driver base version and may not be removed. */
116static const char version[] __devinitconst = 116static const char version[] =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker"; 117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
118 118
119/* This driver was written to use PCI memory space. Some early versions 119/* This driver was written to use PCI memory space. Some early versions
@@ -657,7 +657,7 @@ static void enable_mmio(long pioaddr, u32 quirks)
657 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM 657 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
658 * (plus 0x6C for Rhine-I/II) 658 * (plus 0x6C for Rhine-I/II)
659 */ 659 */
660static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev) 660static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
661{ 661{
662 struct rhine_private *rp = netdev_priv(dev); 662 struct rhine_private *rp = netdev_priv(dev);
663 void __iomem *ioaddr = rp->base; 663 void __iomem *ioaddr = rp->base;
@@ -823,7 +823,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
823 return work_done; 823 return work_done;
824} 824}
825 825
826static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr) 826static void rhine_hw_init(struct net_device *dev, long pioaddr)
827{ 827{
828 struct rhine_private *rp = netdev_priv(dev); 828 struct rhine_private *rp = netdev_priv(dev);
829 829
@@ -856,8 +856,7 @@ static const struct net_device_ops rhine_netdev_ops = {
856#endif 856#endif
857}; 857};
858 858
859static int __devinit rhine_init_one(struct pci_dev *pdev, 859static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
860 const struct pci_device_id *ent)
861{ 860{
862 struct net_device *dev; 861 struct net_device *dev;
863 struct rhine_private *rp; 862 struct rhine_private *rp;
@@ -2232,7 +2231,7 @@ static int rhine_close(struct net_device *dev)
2232} 2231}
2233 2232
2234 2233
2235static void __devexit rhine_remove_one(struct pci_dev *pdev) 2234static void rhine_remove_one(struct pci_dev *pdev)
2236{ 2235{
2237 struct net_device *dev = pci_get_drvdata(pdev); 2236 struct net_device *dev = pci_get_drvdata(pdev);
2238 struct rhine_private *rp = netdev_priv(dev); 2237 struct rhine_private *rp = netdev_priv(dev);
@@ -2359,7 +2358,7 @@ static struct pci_driver rhine_driver = {
2359 .name = DRV_NAME, 2358 .name = DRV_NAME,
2360 .id_table = rhine_pci_tbl, 2359 .id_table = rhine_pci_tbl,
2361 .probe = rhine_init_one, 2360 .probe = rhine_init_one,
2362 .remove = __devexit_p(rhine_remove_one), 2361 .remove = rhine_remove_one,
2363 .shutdown = rhine_shutdown, 2362 .shutdown = rhine_shutdown,
2364 .driver.pm = RHINE_PM_OPS, 2363 .driver.pm = RHINE_PM_OPS,
2365}; 2364};
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index a46c19859683..1bc7f9fd2583 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -375,7 +375,7 @@ MODULE_DEVICE_TABLE(pci, velocity_id_table);
375 * Given a chip identifier return a suitable description. Returns 375 * Given a chip identifier return a suitable description. Returns
376 * a pointer a static string valid while the driver is loaded. 376 * a pointer a static string valid while the driver is loaded.
377 */ 377 */
378static const char __devinit *get_chip_name(enum chip_type chip_id) 378static const char *get_chip_name(enum chip_type chip_id)
379{ 379{
380 int i; 380 int i;
381 for (i = 0; chip_info_table[i].name != NULL; i++) 381 for (i = 0; chip_info_table[i].name != NULL; i++)
@@ -392,7 +392,7 @@ static const char __devinit *get_chip_name(enum chip_type chip_id)
392 * unload for each active device that is present. Disconnects 392 * unload for each active device that is present. Disconnects
393 * the device from the network layer and frees all the resources 393 * the device from the network layer and frees all the resources
394 */ 394 */
395static void __devexit velocity_remove1(struct pci_dev *pdev) 395static void velocity_remove1(struct pci_dev *pdev)
396{ 396{
397 struct net_device *dev = pci_get_drvdata(pdev); 397 struct net_device *dev = pci_get_drvdata(pdev);
398 struct velocity_info *vptr = netdev_priv(dev); 398 struct velocity_info *vptr = netdev_priv(dev);
@@ -421,7 +421,8 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
421 * all the verification and checking as well as reporting so that 421 * all the verification and checking as well as reporting so that
422 * we don't duplicate code for each option. 422 * we don't duplicate code for each option.
423 */ 423 */
424static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname) 424static void velocity_set_int_opt(int *opt, int val, int min, int max, int def,
425 char *name, const char *devname)
425{ 426{
426 if (val == -1) 427 if (val == -1)
427 *opt = def; 428 *opt = def;
@@ -449,7 +450,8 @@ static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max,
449 * all the verification and checking as well as reporting so that 450 * all the verification and checking as well as reporting so that
450 * we don't duplicate code for each option. 451 * we don't duplicate code for each option.
451 */ 452 */
452static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname) 453static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag,
454 char *name, const char *devname)
453{ 455{
454 (*opt) &= (~flag); 456 (*opt) &= (~flag);
455 if (val == -1) 457 if (val == -1)
@@ -474,7 +476,8 @@ static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag
474 * Turn the module and command options into a single structure 476 * Turn the module and command options into a single structure
475 * for the current device 477 * for the current device
476 */ 478 */
477static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname) 479static void velocity_get_options(struct velocity_opt *opts, int index,
480 const char *devname)
478{ 481{
479 482
480 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); 483 velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
@@ -2627,9 +2630,8 @@ static const struct net_device_ops velocity_netdev_ops = {
2627 * Set up the initial velocity_info struct for the device that has been 2630 * Set up the initial velocity_info struct for the device that has been
2628 * discovered. 2631 * discovered.
2629 */ 2632 */
2630static void __devinit velocity_init_info(struct pci_dev *pdev, 2633static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr,
2631 struct velocity_info *vptr, 2634 const struct velocity_info_tbl *info)
2632 const struct velocity_info_tbl *info)
2633{ 2635{
2634 memset(vptr, 0, sizeof(struct velocity_info)); 2636 memset(vptr, 0, sizeof(struct velocity_info));
2635 2637
@@ -2648,7 +2650,8 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
2648 * Retrieve the PCI configuration space data that interests us from 2650 * Retrieve the PCI configuration space data that interests us from
2649 * the kernel PCI layer 2651 * the kernel PCI layer
2650 */ 2652 */
2651static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) 2653static int velocity_get_pci_info(struct velocity_info *vptr,
2654 struct pci_dev *pdev)
2652{ 2655{
2653 vptr->rev_id = pdev->revision; 2656 vptr->rev_id = pdev->revision;
2654 2657
@@ -2685,7 +2688,7 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc
2685 * Print per driver data as the kernel driver finds Velocity 2688 * Print per driver data as the kernel driver finds Velocity
2686 * hardware 2689 * hardware
2687 */ 2690 */
2688static void __devinit velocity_print_info(struct velocity_info *vptr) 2691static void velocity_print_info(struct velocity_info *vptr)
2689{ 2692{
2690 struct net_device *dev = vptr->dev; 2693 struct net_device *dev = vptr->dev;
2691 2694
@@ -2709,7 +2712,8 @@ static u32 velocity_get_link(struct net_device *dev)
2709 * Configure a discovered adapter from scratch. Return a negative 2712 * Configure a discovered adapter from scratch. Return a negative
2710 * errno error code on failure paths. 2713 * errno error code on failure paths.
2711 */ 2714 */
2712static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent) 2715static int velocity_found1(struct pci_dev *pdev,
2716 const struct pci_device_id *ent)
2713{ 2717{
2714 static int first = 1; 2718 static int first = 1;
2715 struct net_device *dev; 2719 struct net_device *dev;
@@ -3108,7 +3112,7 @@ static struct pci_driver velocity_driver = {
3108 .name = VELOCITY_NAME, 3112 .name = VELOCITY_NAME,
3109 .id_table = velocity_id_table, 3113 .id_table = velocity_id_table,
3110 .probe = velocity_found1, 3114 .probe = velocity_found1,
3111 .remove = __devexit_p(velocity_remove1), 3115 .remove = velocity_remove1,
3112#ifdef CONFIG_PM 3116#ifdef CONFIG_PM
3113 .suspend = velocity_suspend, 3117 .suspend = velocity_suspend,
3114 .resume = velocity_resume, 3118 .resume = velocity_resume,
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 2c08bf6e7bf3..352383890326 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -580,8 +580,6 @@ static int w5100_open(struct net_device *ndev)
580 struct w5100_priv *priv = netdev_priv(ndev); 580 struct w5100_priv *priv = netdev_priv(ndev);
581 581
582 netif_info(priv, ifup, ndev, "enabling\n"); 582 netif_info(priv, ifup, ndev, "enabling\n");
583 if (!is_valid_ether_addr(ndev->dev_addr))
584 return -EINVAL;
585 w5100_hw_start(priv); 583 w5100_hw_start(priv);
586 napi_enable(&priv->napi); 584 napi_enable(&priv->napi);
587 netif_start_queue(ndev); 585 netif_start_queue(ndev);
@@ -623,7 +621,7 @@ static const struct net_device_ops w5100_netdev_ops = {
623 .ndo_change_mtu = eth_change_mtu, 621 .ndo_change_mtu = eth_change_mtu,
624}; 622};
625 623
626static int __devinit w5100_hw_probe(struct platform_device *pdev) 624static int w5100_hw_probe(struct platform_device *pdev)
627{ 625{
628 struct wiznet_platform_data *data = pdev->dev.platform_data; 626 struct wiznet_platform_data *data = pdev->dev.platform_data;
629 struct net_device *ndev = platform_get_drvdata(pdev); 627 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -698,7 +696,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
698 return 0; 696 return 0;
699} 697}
700 698
701static int __devinit w5100_probe(struct platform_device *pdev) 699static int w5100_probe(struct platform_device *pdev)
702{ 700{
703 struct w5100_priv *priv; 701 struct w5100_priv *priv;
704 struct net_device *ndev; 702 struct net_device *ndev;
@@ -741,7 +739,7 @@ err_register:
741 return err; 739 return err;
742} 740}
743 741
744static int __devexit w5100_remove(struct platform_device *pdev) 742static int w5100_remove(struct platform_device *pdev)
745{ 743{
746 struct net_device *ndev = platform_get_drvdata(pdev); 744 struct net_device *ndev = platform_get_drvdata(pdev);
747 struct w5100_priv *priv = netdev_priv(ndev); 745 struct w5100_priv *priv = netdev_priv(ndev);
@@ -801,7 +799,7 @@ static struct platform_driver w5100_driver = {
801 .pm = &w5100_pm_ops, 799 .pm = &w5100_pm_ops,
802 }, 800 },
803 .probe = w5100_probe, 801 .probe = w5100_probe,
804 .remove = __devexit_p(w5100_remove), 802 .remove = w5100_remove,
805}; 803};
806 804
807module_platform_driver(w5100_driver); 805module_platform_driver(w5100_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 88943d90c765..9d1d986f8d40 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -500,8 +500,6 @@ static int w5300_open(struct net_device *ndev)
500 struct w5300_priv *priv = netdev_priv(ndev); 500 struct w5300_priv *priv = netdev_priv(ndev);
501 501
502 netif_info(priv, ifup, ndev, "enabling\n"); 502 netif_info(priv, ifup, ndev, "enabling\n");
503 if (!is_valid_ether_addr(ndev->dev_addr))
504 return -EINVAL;
505 w5300_hw_start(priv); 503 w5300_hw_start(priv);
506 napi_enable(&priv->napi); 504 napi_enable(&priv->napi);
507 netif_start_queue(ndev); 505 netif_start_queue(ndev);
@@ -543,7 +541,7 @@ static const struct net_device_ops w5300_netdev_ops = {
543 .ndo_change_mtu = eth_change_mtu, 541 .ndo_change_mtu = eth_change_mtu,
544}; 542};
545 543
546static int __devinit w5300_hw_probe(struct platform_device *pdev) 544static int w5300_hw_probe(struct platform_device *pdev)
547{ 545{
548 struct wiznet_platform_data *data = pdev->dev.platform_data; 546 struct wiznet_platform_data *data = pdev->dev.platform_data;
549 struct net_device *ndev = platform_get_drvdata(pdev); 547 struct net_device *ndev = platform_get_drvdata(pdev);
@@ -610,7 +608,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
610 return 0; 608 return 0;
611} 609}
612 610
613static int __devinit w5300_probe(struct platform_device *pdev) 611static int w5300_probe(struct platform_device *pdev)
614{ 612{
615 struct w5300_priv *priv; 613 struct w5300_priv *priv;
616 struct net_device *ndev; 614 struct net_device *ndev;
@@ -653,7 +651,7 @@ err_register:
653 return err; 651 return err;
654} 652}
655 653
656static int __devexit w5300_remove(struct platform_device *pdev) 654static int w5300_remove(struct platform_device *pdev)
657{ 655{
658 struct net_device *ndev = platform_get_drvdata(pdev); 656 struct net_device *ndev = platform_get_drvdata(pdev);
659 struct w5300_priv *priv = netdev_priv(ndev); 657 struct w5300_priv *priv = netdev_priv(ndev);
@@ -713,7 +711,7 @@ static struct platform_driver w5300_driver = {
713 .pm = &w5300_pm_ops, 711 .pm = &w5300_pm_ops,
714 }, 712 },
715 .probe = w5300_probe, 713 .probe = w5300_probe,
716 .remove = __devexit_p(w5300_remove), 714 .remove = w5300_remove,
717}; 715};
718 716
719module_platform_driver(w5300_driver); 717module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index f8e351880119..aad909d793d7 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1002,7 +1002,7 @@ static const struct ethtool_ops temac_ethtool_ops = {
1002 .get_ts_info = ethtool_op_get_ts_info, 1002 .get_ts_info = ethtool_op_get_ts_info,
1003}; 1003};
1004 1004
1005static int __devinit temac_of_probe(struct platform_device *op) 1005static int temac_of_probe(struct platform_device *op)
1006{ 1006{
1007 struct device_node *np; 1007 struct device_node *np;
1008 struct temac_local *lp; 1008 struct temac_local *lp;
@@ -1144,7 +1144,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
1144 return rc; 1144 return rc;
1145} 1145}
1146 1146
1147static int __devexit temac_of_remove(struct platform_device *op) 1147static int temac_of_remove(struct platform_device *op)
1148{ 1148{
1149 struct net_device *ndev = dev_get_drvdata(&op->dev); 1149 struct net_device *ndev = dev_get_drvdata(&op->dev);
1150 struct temac_local *lp = netdev_priv(ndev); 1150 struct temac_local *lp = netdev_priv(ndev);
@@ -1163,7 +1163,7 @@ static int __devexit temac_of_remove(struct platform_device *op)
1163 return 0; 1163 return 0;
1164} 1164}
1165 1165
1166static struct of_device_id temac_of_match[] __devinitdata = { 1166static struct of_device_id temac_of_match[] = {
1167 { .compatible = "xlnx,xps-ll-temac-1.01.b", }, 1167 { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1168 { .compatible = "xlnx,xps-ll-temac-2.00.a", }, 1168 { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1169 { .compatible = "xlnx,xps-ll-temac-2.02.a", }, 1169 { .compatible = "xlnx,xps-ll-temac-2.02.a", },
@@ -1174,7 +1174,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match);
1174 1174
1175static struct platform_driver temac_of_driver = { 1175static struct platform_driver temac_of_driver = {
1176 .probe = temac_of_probe, 1176 .probe = temac_of_probe,
1177 .remove = __devexit_p(temac_of_remove), 1177 .remove = temac_of_remove,
1178 .driver = { 1178 .driver = {
1179 .owner = THIS_MODULE, 1179 .owner = THIS_MODULE,
1180 .name = "xilinx_temac", 1180 .name = "xilinx_temac",
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index a788501e978e..d9f69b82cc4f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -48,7 +48,7 @@
48#define AXIENET_REGS_N 32 48#define AXIENET_REGS_N 32
49 49
50/* Match table for of_platform binding */ 50/* Match table for of_platform binding */
51static struct of_device_id axienet_of_match[] __devinitdata = { 51static struct of_device_id axienet_of_match[] = {
52 { .compatible = "xlnx,axi-ethernet-1.00.a", }, 52 { .compatible = "xlnx,axi-ethernet-1.00.a", },
53 { .compatible = "xlnx,axi-ethernet-1.01.a", }, 53 { .compatible = "xlnx,axi-ethernet-1.01.a", },
54 { .compatible = "xlnx,axi-ethernet-2.01.a", }, 54 { .compatible = "xlnx,axi-ethernet-2.01.a", },
@@ -1482,7 +1482,7 @@ static void axienet_dma_err_handler(unsigned long data)
1482 * device. Parses through device tree and populates fields of 1482 * device. Parses through device tree and populates fields of
1483 * axienet_local. It registers the Ethernet device. 1483 * axienet_local. It registers the Ethernet device.
1484 */ 1484 */
1485static int __devinit axienet_of_probe(struct platform_device *op) 1485static int axienet_of_probe(struct platform_device *op)
1486{ 1486{
1487 __be32 *p; 1487 __be32 *p;
1488 int size, ret = 0; 1488 int size, ret = 0;
@@ -1632,7 +1632,7 @@ nodev:
1632 return ret; 1632 return ret;
1633} 1633}
1634 1634
1635static int __devexit axienet_of_remove(struct platform_device *op) 1635static int axienet_of_remove(struct platform_device *op)
1636{ 1636{
1637 struct net_device *ndev = dev_get_drvdata(&op->dev); 1637 struct net_device *ndev = dev_get_drvdata(&op->dev);
1638 struct axienet_local *lp = netdev_priv(ndev); 1638 struct axienet_local *lp = netdev_priv(ndev);
@@ -1656,7 +1656,7 @@ static int __devexit axienet_of_remove(struct platform_device *op)
1656 1656
1657static struct platform_driver axienet_of_driver = { 1657static struct platform_driver axienet_of_driver = {
1658 .probe = axienet_of_probe, 1658 .probe = axienet_of_probe,
1659 .remove = __devexit_p(axienet_of_remove), 1659 .remove = axienet_of_remove,
1660 .driver = { 1660 .driver = {
1661 .owner = THIS_MODULE, 1661 .owner = THIS_MODULE,
1662 .name = "xilinx_axienet", 1662 .name = "xilinx_axienet",
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 77cfe5110318..919b983114e9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1107,7 +1107,7 @@ static struct net_device_ops xemaclite_netdev_ops;
1107 * Return: 0, if the driver is bound to the Emaclite device, or 1107 * Return: 0, if the driver is bound to the Emaclite device, or
1108 * a negative error if there is failure. 1108 * a negative error if there is failure.
1109 */ 1109 */
1110static int __devinit xemaclite_of_probe(struct platform_device *ofdev) 1110static int xemaclite_of_probe(struct platform_device *ofdev)
1111{ 1111{
1112 struct resource r_irq; /* Interrupt resources */ 1112 struct resource r_irq; /* Interrupt resources */
1113 struct resource r_mem; /* IO mem resources */ 1113 struct resource r_mem; /* IO mem resources */
@@ -1229,7 +1229,7 @@ error2:
1229 * 1229 *
1230 * Return: 0, always. 1230 * Return: 0, always.
1231 */ 1231 */
1232static int __devexit xemaclite_of_remove(struct platform_device *of_dev) 1232static int xemaclite_of_remove(struct platform_device *of_dev)
1233{ 1233{
1234 struct device *dev = &of_dev->dev; 1234 struct device *dev = &of_dev->dev;
1235 struct net_device *ndev = dev_get_drvdata(dev); 1235 struct net_device *ndev = dev_get_drvdata(dev);
@@ -1280,7 +1280,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
1280}; 1280};
1281 1281
1282/* Match table for OF platform binding */ 1282/* Match table for OF platform binding */
1283static struct of_device_id xemaclite_of_match[] __devinitdata = { 1283static struct of_device_id xemaclite_of_match[] = {
1284 { .compatible = "xlnx,opb-ethernetlite-1.01.a", }, 1284 { .compatible = "xlnx,opb-ethernetlite-1.01.a", },
1285 { .compatible = "xlnx,opb-ethernetlite-1.01.b", }, 1285 { .compatible = "xlnx,opb-ethernetlite-1.01.b", },
1286 { .compatible = "xlnx,xps-ethernetlite-1.00.a", }, 1286 { .compatible = "xlnx,xps-ethernetlite-1.00.a", },
@@ -1298,7 +1298,7 @@ static struct platform_driver xemaclite_of_driver = {
1298 .of_match_table = xemaclite_of_match, 1298 .of_match_table = xemaclite_of_match,
1299 }, 1299 },
1300 .probe = xemaclite_of_probe, 1300 .probe = xemaclite_of_probe,
1301 .remove = __devexit_p(xemaclite_of_remove), 1301 .remove = xemaclite_of_remove,
1302}; 1302};
1303 1303
1304module_platform_driver(xemaclite_of_driver); 1304module_platform_driver(xemaclite_of_driver);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 477d6729b17f..d3ebb73277be 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1379,7 +1379,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = {
1379 .ndo_validate_addr = eth_validate_addr, 1379 .ndo_validate_addr = eth_validate_addr,
1380}; 1380};
1381 1381
1382static int __devinit eth_init_one(struct platform_device *pdev) 1382static int eth_init_one(struct platform_device *pdev)
1383{ 1383{
1384 struct port *port; 1384 struct port *port;
1385 struct net_device *dev; 1385 struct net_device *dev;
@@ -1480,7 +1480,7 @@ err_free:
1480 return err; 1480 return err;
1481} 1481}
1482 1482
1483static int __devexit eth_remove_one(struct platform_device *pdev) 1483static int eth_remove_one(struct platform_device *pdev)
1484{ 1484{
1485 struct net_device *dev = platform_get_drvdata(pdev); 1485 struct net_device *dev = platform_get_drvdata(pdev);
1486 struct port *port = netdev_priv(dev); 1486 struct port *port = netdev_priv(dev);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 6695a1dadf4e..502c8ff1d985 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -228,7 +228,7 @@
228#define DRV_VERSION "v1.10" 228#define DRV_VERSION "v1.10"
229#define DRV_RELDATE "2006/12/14" 229#define DRV_RELDATE "2006/12/14"
230 230
231static char version[] __devinitdata = 231static char version[] =
232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE 232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
233 " Lawrence V. Stefani and others\n"; 233 " Lawrence V. Stefani and others\n";
234 234
@@ -515,7 +515,7 @@ static const struct net_device_ops dfx_netdev_ops = {
515 * initialized and the board resources are read and stored in 515 * initialized and the board resources are read and stored in
516 * the device structure. 516 * the device structure.
517 */ 517 */
518static int __devinit dfx_register(struct device *bdev) 518static int dfx_register(struct device *bdev)
519{ 519{
520 static int version_disp; 520 static int version_disp;
521 int dfx_bus_pci = DFX_BUS_PCI(bdev); 521 int dfx_bus_pci = DFX_BUS_PCI(bdev);
@@ -663,7 +663,7 @@ err_out:
663 * enabled yet. 663 * enabled yet.
664 */ 664 */
665 665
666static void __devinit dfx_bus_init(struct net_device *dev) 666static void dfx_bus_init(struct net_device *dev)
667{ 667{
668 DFX_board_t *bp = netdev_priv(dev); 668 DFX_board_t *bp = netdev_priv(dev);
669 struct device *bdev = bp->bus_dev; 669 struct device *bdev = bp->bus_dev;
@@ -809,7 +809,7 @@ static void __devinit dfx_bus_init(struct net_device *dev)
809 * Interrupts are disabled at the adapter bus-specific logic. 809 * Interrupts are disabled at the adapter bus-specific logic.
810 */ 810 */
811 811
812static void __devexit dfx_bus_uninit(struct net_device *dev) 812static void dfx_bus_uninit(struct net_device *dev)
813{ 813{
814 DFX_board_t *bp = netdev_priv(dev); 814 DFX_board_t *bp = netdev_priv(dev);
815 struct device *bdev = bp->bus_dev; 815 struct device *bdev = bp->bus_dev;
@@ -866,7 +866,7 @@ static void __devexit dfx_bus_uninit(struct net_device *dev)
866 * None 866 * None
867 */ 867 */
868 868
869static void __devinit dfx_bus_config_check(DFX_board_t *bp) 869static void dfx_bus_config_check(DFX_board_t *bp)
870{ 870{
871 struct device __maybe_unused *bdev = bp->bus_dev; 871 struct device __maybe_unused *bdev = bp->bus_dev;
872 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 872 int dfx_bus_eisa = DFX_BUS_EISA(bdev);
@@ -962,9 +962,8 @@ static void __devinit dfx_bus_config_check(DFX_board_t *bp)
962 * returning from this routine. 962 * returning from this routine.
963 */ 963 */
964 964
965static int __devinit dfx_driver_init(struct net_device *dev, 965static int dfx_driver_init(struct net_device *dev, const char *print_name,
966 const char *print_name, 966 resource_size_t bar_start)
967 resource_size_t bar_start)
968{ 967{
969 DFX_board_t *bp = netdev_priv(dev); 968 DFX_board_t *bp = netdev_priv(dev);
970 struct device *bdev = bp->bus_dev; 969 struct device *bdev = bp->bus_dev;
@@ -3579,7 +3578,7 @@ static void dfx_xmt_flush( DFX_board_t *bp )
3579 * Device structures for FDDI adapters (fddi0, fddi1, etc) are 3578 * Device structures for FDDI adapters (fddi0, fddi1, etc) are
3580 * freed. 3579 * freed.
3581 */ 3580 */
3582static void __devexit dfx_unregister(struct device *bdev) 3581static void dfx_unregister(struct device *bdev)
3583{ 3582{
3584 struct net_device *dev = dev_get_drvdata(bdev); 3583 struct net_device *dev = dev_get_drvdata(bdev);
3585 DFX_board_t *bp = netdev_priv(dev); 3584 DFX_board_t *bp = netdev_priv(dev);
@@ -3619,13 +3618,12 @@ static void __devexit dfx_unregister(struct device *bdev)
3619} 3618}
3620 3619
3621 3620
3622static int __devinit __maybe_unused dfx_dev_register(struct device *); 3621static int __maybe_unused dfx_dev_register(struct device *);
3623static int __devexit __maybe_unused dfx_dev_unregister(struct device *); 3622static int __maybe_unused dfx_dev_unregister(struct device *);
3624 3623
3625#ifdef CONFIG_PCI 3624#ifdef CONFIG_PCI
3626static int __devinit dfx_pci_register(struct pci_dev *, 3625static int dfx_pci_register(struct pci_dev *, const struct pci_device_id *);
3627 const struct pci_device_id *); 3626static void dfx_pci_unregister(struct pci_dev *);
3628static void __devexit dfx_pci_unregister(struct pci_dev *);
3629 3627
3630static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = { 3628static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
3631 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, 3629 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
@@ -3637,16 +3635,16 @@ static struct pci_driver dfx_pci_driver = {
3637 .name = "defxx", 3635 .name = "defxx",
3638 .id_table = dfx_pci_table, 3636 .id_table = dfx_pci_table,
3639 .probe = dfx_pci_register, 3637 .probe = dfx_pci_register,
3640 .remove = __devexit_p(dfx_pci_unregister), 3638 .remove = dfx_pci_unregister,
3641}; 3639};
3642 3640
3643static __devinit int dfx_pci_register(struct pci_dev *pdev, 3641static int dfx_pci_register(struct pci_dev *pdev,
3644 const struct pci_device_id *ent) 3642 const struct pci_device_id *ent)
3645{ 3643{
3646 return dfx_register(&pdev->dev); 3644 return dfx_register(&pdev->dev);
3647} 3645}
3648 3646
3649static void __devexit dfx_pci_unregister(struct pci_dev *pdev) 3647static void dfx_pci_unregister(struct pci_dev *pdev)
3650{ 3648{
3651 dfx_unregister(&pdev->dev); 3649 dfx_unregister(&pdev->dev);
3652} 3650}
@@ -3668,7 +3666,7 @@ static struct eisa_driver dfx_eisa_driver = {
3668 .name = "defxx", 3666 .name = "defxx",
3669 .bus = &eisa_bus_type, 3667 .bus = &eisa_bus_type,
3670 .probe = dfx_dev_register, 3668 .probe = dfx_dev_register,
3671 .remove = __devexit_p(dfx_dev_unregister), 3669 .remove = dfx_dev_unregister,
3672 }, 3670 },
3673}; 3671};
3674#endif /* CONFIG_EISA */ 3672#endif /* CONFIG_EISA */
@@ -3689,12 +3687,12 @@ static struct tc_driver dfx_tc_driver = {
3689 .name = "defxx", 3687 .name = "defxx",
3690 .bus = &tc_bus_type, 3688 .bus = &tc_bus_type,
3691 .probe = dfx_dev_register, 3689 .probe = dfx_dev_register,
3692 .remove = __devexit_p(dfx_dev_unregister), 3690 .remove = dfx_dev_unregister,
3693 }, 3691 },
3694}; 3692};
3695#endif /* CONFIG_TC */ 3693#endif /* CONFIG_TC */
3696 3694
3697static int __devinit __maybe_unused dfx_dev_register(struct device *dev) 3695static int __maybe_unused dfx_dev_register(struct device *dev)
3698{ 3696{
3699 int status; 3697 int status;
3700 3698
@@ -3704,7 +3702,7 @@ static int __devinit __maybe_unused dfx_dev_register(struct device *dev)
3704 return status; 3702 return status;
3705} 3703}
3706 3704
3707static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev) 3705static int __maybe_unused dfx_dev_unregister(struct device *dev)
3708{ 3706{
3709 put_device(dev); 3707 put_device(dev);
3710 dfx_unregister(dev); 3708 dfx_unregister(dev);
@@ -3712,7 +3710,7 @@ static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev)
3712} 3710}
3713 3711
3714 3712
3715static int __devinit dfx_init(void) 3713static int dfx_init(void)
3716{ 3714{
3717 int status; 3715 int status;
3718 3716
@@ -3724,7 +3722,7 @@ static int __devinit dfx_init(void)
3724 return status; 3722 return status;
3725} 3723}
3726 3724
3727static void __devexit dfx_cleanup(void) 3725static void dfx_cleanup(void)
3728{ 3726{
3729 tc_unregister_driver(&dfx_tc_driver); 3727 tc_unregister_driver(&dfx_tc_driver);
3730 eisa_driver_unregister(&dfx_eisa_driver); 3728 eisa_driver_unregister(&dfx_eisa_driver);
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index 3d9a4596a423..d5bd563ac131 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -321,7 +321,7 @@ err_out1:
321/* 321/*
322 * Called for each adapter board from pci_unregister_driver 322 * Called for each adapter board from pci_unregister_driver
323 */ 323 */
324static void __devexit skfp_remove_one(struct pci_dev *pdev) 324static void skfp_remove_one(struct pci_dev *pdev)
325{ 325{
326 struct net_device *p = pci_get_drvdata(pdev); 326 struct net_device *p = pci_get_drvdata(pdev);
327 struct s_smc *lp = netdev_priv(p); 327 struct s_smc *lp = netdev_priv(p);
@@ -2243,7 +2243,7 @@ static struct pci_driver skfddi_pci_driver = {
2243 .name = "skfddi", 2243 .name = "skfddi",
2244 .id_table = skfddi_pci_tbl, 2244 .id_table = skfddi_pci_tbl,
2245 .probe = skfp_init_one, 2245 .probe = skfp_init_one,
2246 .remove = __devexit_p(skfp_remove_one), 2246 .remove = skfp_remove_one,
2247}; 2247};
2248 2248
2249static int __init skfd_init(void) 2249static int __init skfd_init(void)
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index d4719632ffc6..e5b19b056909 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -61,7 +61,7 @@ MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
61MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver"); 61MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63 63
64static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n"; 64static char version[] = "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
65 65
66 66
67static const struct net_device_ops rr_netdev_ops = { 67static const struct net_device_ops rr_netdev_ops = {
@@ -88,8 +88,7 @@ static const struct net_device_ops rr_netdev_ops = {
88 * stack will need to know about I/O vectors or something similar. 88 * stack will need to know about I/O vectors or something similar.
89 */ 89 */
90 90
91static int __devinit rr_init_one(struct pci_dev *pdev, 91static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
92 const struct pci_device_id *ent)
93{ 92{
94 struct net_device *dev; 93 struct net_device *dev;
95 static int version_disp; 94 static int version_disp;
@@ -221,7 +220,7 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
221 return ret; 220 return ret;
222} 221}
223 222
224static void __devexit rr_remove_one (struct pci_dev *pdev) 223static void rr_remove_one(struct pci_dev *pdev)
225{ 224{
226 struct net_device *dev = pci_get_drvdata(pdev); 225 struct net_device *dev = pci_get_drvdata(pdev);
227 struct rr_private *rr = netdev_priv(dev); 226 struct rr_private *rr = netdev_priv(dev);
@@ -503,7 +502,7 @@ static unsigned int write_eeprom(struct rr_private *rrpriv,
503} 502}
504 503
505 504
506static int __devinit rr_init(struct net_device *dev) 505static int rr_init(struct net_device *dev)
507{ 506{
508 struct rr_private *rrpriv; 507 struct rr_private *rrpriv;
509 struct rr_regs __iomem *regs; 508 struct rr_regs __iomem *regs;
@@ -1681,7 +1680,7 @@ static struct pci_driver rr_driver = {
1681 .name = "rrunner", 1680 .name = "rrunner",
1682 .id_table = rr_pci_tbl, 1681 .id_table = rr_pci_tbl,
1683 .probe = rr_init_one, 1682 .probe = rr_init_one,
1684 .remove = __devexit_p(rr_remove_one), 1683 .remove = rr_remove_one,
1685}; 1684};
1686 1685
1687static int __init rr_init_module(void) 1686static int __init rr_init_module(void)
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 928148cc3220..2b657d4d63a8 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -363,11 +363,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
363 363
364 rndis_pkt = &msg->msg.pkt; 364 rndis_pkt = &msg->msg.pkt;
365 365
366 /*
367 * FIXME: Handle multiple rndis pkt msgs that maybe enclosed in this
368 * netvsc packet (ie TotalDataBufferLength != MessageLength)
369 */
370
371 /* Remove the rndis header and pass it back up the stack */ 366 /* Remove the rndis header and pass it back up the stack */
372 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; 367 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
373 368
@@ -610,8 +605,11 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
610 return -EBUSY; 605 return -EBUSY;
611 } else { 606 } else {
612 set_complete = &request->response_msg.msg.set_complete; 607 set_complete = &request->response_msg.msg.set_complete;
613 if (set_complete->status != RNDIS_STATUS_SUCCESS) 608 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
609 netdev_err(ndev, "Fail to set MAC on host side:0x%x\n",
610 set_complete->status);
614 ret = -EINVAL; 611 ret = -EINVAL;
612 }
615 } 613 }
616 614
617cleanup: 615cleanup:
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index ba753d87a32f..a4a62e170ec0 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -778,7 +778,7 @@ static int at86rf230_fill_data(struct spi_device *spi)
778 return 0; 778 return 0;
779} 779}
780 780
781static int __devinit at86rf230_probe(struct spi_device *spi) 781static int at86rf230_probe(struct spi_device *spi)
782{ 782{
783 struct ieee802154_dev *dev; 783 struct ieee802154_dev *dev;
784 struct at86rf230_local *lp; 784 struct at86rf230_local *lp;
@@ -920,7 +920,7 @@ err_fill:
920 return rc; 920 return rc;
921} 921}
922 922
923static int __devexit at86rf230_remove(struct spi_device *spi) 923static int at86rf230_remove(struct spi_device *spi)
924{ 924{
925 struct at86rf230_local *lp = spi_get_drvdata(spi); 925 struct at86rf230_local *lp = spi_get_drvdata(spi);
926 926
@@ -947,7 +947,7 @@ static struct spi_driver at86rf230_driver = {
947 .owner = THIS_MODULE, 947 .owner = THIS_MODULE,
948 }, 948 },
949 .probe = at86rf230_probe, 949 .probe = at86rf230_probe,
950 .remove = __devexit_p(at86rf230_remove), 950 .remove = at86rf230_remove,
951 .suspend = at86rf230_suspend, 951 .suspend = at86rf230_suspend,
952 .resume = at86rf230_resume, 952 .resume = at86rf230_resume,
953}; 953};
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 7d39add7d467..1e9cb0bbf62c 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -354,7 +354,7 @@ static void ieee802154_fake_setup(struct net_device *dev)
354} 354}
355 355
356 356
357static int __devinit ieee802154fake_probe(struct platform_device *pdev) 357static int ieee802154fake_probe(struct platform_device *pdev)
358{ 358{
359 struct net_device *dev; 359 struct net_device *dev;
360 struct fakehard_priv *priv; 360 struct fakehard_priv *priv;
@@ -412,7 +412,7 @@ out:
412 return err; 412 return err;
413} 413}
414 414
415static int __devexit ieee802154fake_remove(struct platform_device *pdev) 415static int ieee802154fake_remove(struct platform_device *pdev)
416{ 416{
417 struct net_device *dev = platform_get_drvdata(pdev); 417 struct net_device *dev = platform_get_drvdata(pdev);
418 unregister_netdev(dev); 418 unregister_netdev(dev);
@@ -423,7 +423,7 @@ static struct platform_device *ieee802154fake_dev;
423 423
424static struct platform_driver ieee802154fake_driver = { 424static struct platform_driver ieee802154fake_driver = {
425 .probe = ieee802154fake_probe, 425 .probe = ieee802154fake_probe,
426 .remove = __devexit_p(ieee802154fake_remove), 426 .remove = ieee802154fake_remove,
427 .driver = { 427 .driver = {
428 .name = "ieee802154hardmac", 428 .name = "ieee802154hardmac",
429 .owner = THIS_MODULE, 429 .owner = THIS_MODULE,
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index e7456fcd0913..b8d22173925d 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -221,7 +221,7 @@ static void fakelb_del(struct fakelb_dev_priv *priv)
221 ieee802154_free_device(priv->dev); 221 ieee802154_free_device(priv->dev);
222} 222}
223 223
224static int __devinit fakelb_probe(struct platform_device *pdev) 224static int fakelb_probe(struct platform_device *pdev)
225{ 225{
226 struct fakelb_priv *priv; 226 struct fakelb_priv *priv;
227 struct fakelb_dev_priv *dp; 227 struct fakelb_dev_priv *dp;
@@ -253,7 +253,7 @@ err_alloc:
253 return err; 253 return err;
254} 254}
255 255
256static int __devexit fakelb_remove(struct platform_device *pdev) 256static int fakelb_remove(struct platform_device *pdev)
257{ 257{
258 struct fakelb_priv *priv = platform_get_drvdata(pdev); 258 struct fakelb_priv *priv = platform_get_drvdata(pdev);
259 struct fakelb_dev_priv *dp, *temp; 259 struct fakelb_dev_priv *dp, *temp;
@@ -269,7 +269,7 @@ static struct platform_device *ieee802154fake_dev;
269 269
270static struct platform_driver ieee802154fake_driver = { 270static struct platform_driver ieee802154fake_driver = {
271 .probe = fakelb_probe, 271 .probe = fakelb_probe,
272 .remove = __devexit_p(fakelb_remove), 272 .remove = fakelb_remove,
273 .driver = { 273 .driver = {
274 .name = "ieee802154fakelb", 274 .name = "ieee802154fakelb",
275 .owner = THIS_MODULE, 275 .owner = THIS_MODULE,
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index ed7521693980..3f2c7aaf28c4 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -618,7 +618,7 @@ out:
618 enable_irq(devrec->spi->irq); 618 enable_irq(devrec->spi->irq);
619} 619}
620 620
621static int __devinit mrf24j40_probe(struct spi_device *spi) 621static int mrf24j40_probe(struct spi_device *spi)
622{ 622{
623 int ret = -ENOMEM; 623 int ret = -ENOMEM;
624 u8 val; 624 u8 val;
@@ -711,7 +711,7 @@ err_devrec:
711 return ret; 711 return ret;
712} 712}
713 713
714static int __devexit mrf24j40_remove(struct spi_device *spi) 714static int mrf24j40_remove(struct spi_device *spi)
715{ 715{
716 struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev); 716 struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev);
717 717
@@ -746,7 +746,7 @@ static struct spi_driver mrf24j40_driver = {
746 }, 746 },
747 .id_table = mrf24j40_ids, 747 .id_table = mrf24j40_ids,
748 .probe = mrf24j40_probe, 748 .probe = mrf24j40_probe,
749 .remove = __devexit_p(mrf24j40_remove), 749 .remove = mrf24j40_remove,
750}; 750};
751 751
752static int __init mrf24j40_init(void) 752static int __init mrf24j40_init(void)
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index e09417df8f39..b5151e4ced61 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -760,7 +760,7 @@ static const struct net_device_ops au1k_irda_netdev_ops = {
760 .ndo_do_ioctl = au1k_irda_ioctl, 760 .ndo_do_ioctl = au1k_irda_ioctl,
761}; 761};
762 762
763static int __devinit au1k_irda_net_init(struct net_device *dev) 763static int au1k_irda_net_init(struct net_device *dev)
764{ 764{
765 struct au1k_private *aup = netdev_priv(dev); 765 struct au1k_private *aup = netdev_priv(dev);
766 struct db_dest *pDB, *pDBfree; 766 struct db_dest *pDB, *pDBfree;
@@ -849,7 +849,7 @@ out1:
849 return retval; 849 return retval;
850} 850}
851 851
852static int __devinit au1k_irda_probe(struct platform_device *pdev) 852static int au1k_irda_probe(struct platform_device *pdev)
853{ 853{
854 struct au1k_private *aup; 854 struct au1k_private *aup;
855 struct net_device *dev; 855 struct net_device *dev;
@@ -921,7 +921,7 @@ out:
921 return err; 921 return err;
922} 922}
923 923
924static int __devexit au1k_irda_remove(struct platform_device *pdev) 924static int au1k_irda_remove(struct platform_device *pdev)
925{ 925{
926 struct net_device *dev = platform_get_drvdata(pdev); 926 struct net_device *dev = platform_get_drvdata(pdev);
927 struct au1k_private *aup = netdev_priv(dev); 927 struct au1k_private *aup = netdev_priv(dev);
@@ -949,7 +949,7 @@ static struct platform_driver au1k_irda_driver = {
949 .owner = THIS_MODULE, 949 .owner = THIS_MODULE,
950 }, 950 },
951 .probe = au1k_irda_probe, 951 .probe = au1k_irda_probe,
952 .remove = __devexit_p(au1k_irda_remove), 952 .remove = au1k_irda_remove,
953}; 953};
954 954
955static int __init au1k_irda_load(void) 955static int __init au1k_irda_load(void)
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index c6a0299aa9f9..fed4a05d55c7 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -31,7 +31,7 @@ static void turnaround_delay(unsigned long last_jif, int mtt)
31 schedule_timeout_uninterruptible(ticks); 31 schedule_timeout_uninterruptible(ticks);
32} 32}
33 33
34static void __devinit bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev) 34static void bfin_sir_init_ports(struct bfin_sir_port *sp, struct platform_device *pdev)
35{ 35{
36 int i; 36 int i;
37 struct resource *res; 37 struct resource *res;
@@ -688,7 +688,7 @@ static const struct net_device_ops bfin_sir_ndo = {
688 .ndo_get_stats = bfin_sir_stats, 688 .ndo_get_stats = bfin_sir_stats,
689}; 689};
690 690
691static int __devinit bfin_sir_probe(struct platform_device *pdev) 691static int bfin_sir_probe(struct platform_device *pdev)
692{ 692{
693 struct net_device *dev; 693 struct net_device *dev;
694 struct bfin_sir_self *self; 694 struct bfin_sir_self *self;
@@ -775,7 +775,7 @@ err_mem_0:
775 return err; 775 return err;
776} 776}
777 777
778static int __devexit bfin_sir_remove(struct platform_device *pdev) 778static int bfin_sir_remove(struct platform_device *pdev)
779{ 779{
780 struct bfin_sir_port *sir_port; 780 struct bfin_sir_port *sir_port;
781 struct net_device *dev = NULL; 781 struct net_device *dev = NULL;
@@ -798,7 +798,7 @@ static int __devexit bfin_sir_remove(struct platform_device *pdev)
798 798
799static struct platform_driver bfin_ir_driver = { 799static struct platform_driver bfin_ir_driver = {
800 .probe = bfin_sir_probe, 800 .probe = bfin_sir_probe,
801 .remove = __devexit_p(bfin_sir_remove), 801 .remove = bfin_sir_remove,
802 .suspend = bfin_sir_suspend, 802 .suspend = bfin_sir_suspend,
803 .resume = bfin_sir_resume, 803 .resume = bfin_sir_resume,
804 .driver = { 804 .driver = {
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
index f83c5b881d2d..5fe1f4dd3369 100644
--- a/drivers/net/irda/ep7211-sir.c
+++ b/drivers/net/irda/ep7211-sir.c
@@ -1,52 +1,18 @@
1/* 1/*
2 * IR port driver for the Cirrus Logic EP7211 processor. 2 * IR port driver for the Cirrus Logic CLPS711X processors
3 * 3 *
4 * Copyright 2001, Blue Mug Inc. All rights reserved. 4 * Copyright 2001, Blue Mug Inc. All rights reserved.
5 * Copyright 2007, Samuel Ortiz <samuel@sortiz.org> 5 * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
6 */ 6 */
7#include <linux/module.h>
8#include <linux/delay.h>
9#include <linux/tty.h>
10#include <linux/init.h>
11#include <linux/spinlock.h>
12 7
13#include <net/irda/irda.h> 8#include <linux/module.h>
14#include <net/irda/irda_device.h> 9#include <linux/platform_device.h>
15 10
16#include <asm/io.h>
17#include <mach/hardware.h> 11#include <mach/hardware.h>
18 12
19#include "sir-dev.h" 13#include "sir-dev.h"
20 14
21#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */ 15static int clps711x_dongle_open(struct sir_dev *dev)
22#define MAX_DELAY 10000 /* 1 ms */
23
24static int ep7211_open(struct sir_dev *dev);
25static int ep7211_close(struct sir_dev *dev);
26static int ep7211_change_speed(struct sir_dev *dev, unsigned speed);
27static int ep7211_reset(struct sir_dev *dev);
28
29static struct dongle_driver ep7211 = {
30 .owner = THIS_MODULE,
31 .driver_name = "EP7211 IR driver",
32 .type = IRDA_EP7211_DONGLE,
33 .open = ep7211_open,
34 .close = ep7211_close,
35 .reset = ep7211_reset,
36 .set_speed = ep7211_change_speed,
37};
38
39static int __init ep7211_sir_init(void)
40{
41 return irda_register_dongle(&ep7211);
42}
43
44static void __exit ep7211_sir_cleanup(void)
45{
46 irda_unregister_dongle(&ep7211);
47}
48
49static int ep7211_open(struct sir_dev *dev)
50{ 16{
51 unsigned int syscon; 17 unsigned int syscon;
52 18
@@ -58,7 +24,7 @@ static int ep7211_open(struct sir_dev *dev)
58 return 0; 24 return 0;
59} 25}
60 26
61static int ep7211_close(struct sir_dev *dev) 27static int clps711x_dongle_close(struct sir_dev *dev)
62{ 28{
63 unsigned int syscon; 29 unsigned int syscon;
64 30
@@ -70,20 +36,35 @@ static int ep7211_close(struct sir_dev *dev)
70 return 0; 36 return 0;
71} 37}
72 38
73static int ep7211_change_speed(struct sir_dev *dev, unsigned speed) 39static struct dongle_driver clps711x_dongle = {
40 .owner = THIS_MODULE,
41 .driver_name = "EP7211 IR driver",
42 .type = IRDA_EP7211_DONGLE,
43 .open = clps711x_dongle_open,
44 .close = clps711x_dongle_close,
45};
46
47static int clps711x_sir_probe(struct platform_device *pdev)
74{ 48{
75 return 0; 49 return irda_register_dongle(&clps711x_dongle);
76} 50}
77 51
78static int ep7211_reset(struct sir_dev *dev) 52static int clps711x_sir_remove(struct platform_device *pdev)
79{ 53{
80 return 0; 54 return irda_unregister_dongle(&clps711x_dongle);
81} 55}
82 56
57static struct platform_driver clps711x_sir_driver = {
58 .driver = {
59 .name = "sir-clps711x",
60 .owner = THIS_MODULE,
61 },
62 .probe = clps711x_sir_probe,
63 .remove = clps711x_sir_remove,
64};
65module_platform_driver(clps711x_sir_driver);
66
83MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>"); 67MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
84MODULE_DESCRIPTION("EP7211 IR dongle driver"); 68MODULE_DESCRIPTION("EP7211 IR dongle driver");
85MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
86MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */ 70MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
87
88module_init(ep7211_sir_init);
89module_exit(ep7211_sir_cleanup);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 4b746d9bd8e7..9448587de453 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -33,11 +33,7 @@
33 33
34#define DRIVER_NAME "sh_irda" 34#define DRIVER_NAME "sh_irda"
35 35
36#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
37#define __IRDARAM_LEN 0x13FF
38#else
39#define __IRDARAM_LEN 0x1039 36#define __IRDARAM_LEN 0x1039
40#endif
41 37
42#define IRTMR 0x1F00 /* Transfer mode */ 38#define IRTMR 0x1F00 /* Transfer mode */
43#define IRCFR 0x1F02 /* Configuration */ 39#define IRCFR 0x1F02 /* Configuration */
@@ -757,7 +753,7 @@ static const struct net_device_ops sh_irda_ndo = {
757 753
758 754
759************************************************************************/ 755************************************************************************/
760static int __devinit sh_irda_probe(struct platform_device *pdev) 756static int sh_irda_probe(struct platform_device *pdev)
761{ 757{
762 struct net_device *ndev; 758 struct net_device *ndev;
763 struct sh_irda_self *self; 759 struct sh_irda_self *self;
@@ -829,7 +825,7 @@ exit:
829 return err; 825 return err;
830} 826}
831 827
832static int __devexit sh_irda_remove(struct platform_device *pdev) 828static int sh_irda_remove(struct platform_device *pdev)
833{ 829{
834 struct net_device *ndev = platform_get_drvdata(pdev); 830 struct net_device *ndev = platform_get_drvdata(pdev);
835 struct sh_irda_self *self = netdev_priv(ndev); 831 struct sh_irda_self *self = netdev_priv(ndev);
@@ -866,7 +862,7 @@ static const struct dev_pm_ops sh_irda_pm_ops = {
866 862
867static struct platform_driver sh_irda_driver = { 863static struct platform_driver sh_irda_driver = {
868 .probe = sh_irda_probe, 864 .probe = sh_irda_probe,
869 .remove = __devexit_p(sh_irda_remove), 865 .remove = sh_irda_remove,
870 .driver = { 866 .driver = {
871 .name = DRIVER_NAME, 867 .name = DRIVER_NAME,
872 .pm = &sh_irda_pm_ops, 868 .pm = &sh_irda_pm_ops,
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 624ac1939e85..24aefcd84065 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -705,7 +705,7 @@ static const struct net_device_ops sh_sir_ndo = {
705 705
706 706
707************************************************************************/ 707************************************************************************/
708static int __devinit sh_sir_probe(struct platform_device *pdev) 708static int sh_sir_probe(struct platform_device *pdev)
709{ 709{
710 struct net_device *ndev; 710 struct net_device *ndev;
711 struct sh_sir_self *self; 711 struct sh_sir_self *self;
@@ -783,7 +783,7 @@ exit:
783 return err; 783 return err;
784} 784}
785 785
786static int __devexit sh_sir_remove(struct platform_device *pdev) 786static int sh_sir_remove(struct platform_device *pdev)
787{ 787{
788 struct net_device *ndev = platform_get_drvdata(pdev); 788 struct net_device *ndev = platform_get_drvdata(pdev);
789 struct sh_sir_self *self = netdev_priv(ndev); 789 struct sh_sir_self *self = netdev_priv(ndev);
@@ -803,7 +803,7 @@ static int __devexit sh_sir_remove(struct platform_device *pdev)
803 803
804static struct platform_driver sh_sir_driver = { 804static struct platform_driver sh_sir_driver = {
805 .probe = sh_sir_probe, 805 .probe = sh_sir_probe,
806 .remove = __devexit_p(sh_sir_remove), 806 .remove = sh_sir_remove,
807 .driver = { 807 .driver = {
808 .name = DRIVER_NAME, 808 .name = DRIVER_NAME,
809 }, 809 },
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index a926813ee91d..5290952b60c2 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -376,8 +376,8 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
376static int pnp_driver_registered; 376static int pnp_driver_registered;
377 377
378#ifdef CONFIG_PNP 378#ifdef CONFIG_PNP
379static int __devinit smsc_ircc_pnp_probe(struct pnp_dev *dev, 379static int smsc_ircc_pnp_probe(struct pnp_dev *dev,
380 const struct pnp_device_id *dev_id) 380 const struct pnp_device_id *dev_id)
381{ 381{
382 unsigned int firbase, sirbase; 382 unsigned int firbase, sirbase;
383 u8 dma, irq; 383 u8 dma, irq;
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
515 * Try to open driver instance 515 * Try to open driver instance
516 * 516 *
517 */ 517 */
518static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq) 518static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
519{ 519{
520 struct smsc_ircc_cb *self; 520 struct smsc_ircc_cb *self;
521 struct net_device *dev; 521 struct net_device *dev;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 1a89fd459dd5..f9033c6a888c 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -77,7 +77,7 @@ static int dongle_id = 0; /* default: probe */
77module_param(dongle_id, int, 0); 77module_param(dongle_id, int, 0);
78 78
79/* Some prototypes */ 79/* Some prototypes */
80static int via_ircc_open(struct pci_dev *pdev, chipio_t * info, 80static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
81 unsigned int id); 81 unsigned int id);
82static int via_ircc_dma_receive(struct via_ircc_cb *self); 82static int via_ircc_dma_receive(struct via_ircc_cb *self);
83static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, 83static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
@@ -102,8 +102,8 @@ static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
102static void hwreset(struct via_ircc_cb *self); 102static void hwreset(struct via_ircc_cb *self);
103static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase); 103static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
104static int upload_rxdata(struct via_ircc_cb *self, int iobase); 104static int upload_rxdata(struct via_ircc_cb *self, int iobase);
105static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id); 105static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
106static void __devexit via_remove_one (struct pci_dev *pdev); 106static void via_remove_one(struct pci_dev *pdev);
107 107
108/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */ 108/* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
109static void iodelay(int udelay) 109static void iodelay(int udelay)
@@ -132,7 +132,7 @@ static struct pci_driver via_driver = {
132 .name = VIA_MODULE_NAME, 132 .name = VIA_MODULE_NAME,
133 .id_table = via_pci_tbl, 133 .id_table = via_pci_tbl,
134 .probe = via_init_one, 134 .probe = via_init_one,
135 .remove = __devexit_p(via_remove_one), 135 .remove = via_remove_one,
136}; 136};
137 137
138 138
@@ -156,7 +156,7 @@ static int __init via_ircc_init(void)
156 return 0; 156 return 0;
157} 157}
158 158
159static int __devinit via_init_one (struct pci_dev *pcidev, const struct pci_device_id *id) 159static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
160{ 160{
161 int rc; 161 int rc;
162 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1; 162 u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
@@ -286,8 +286,7 @@ static const struct net_device_ops via_ircc_fir_ops = {
286 * Open driver instance 286 * Open driver instance
287 * 287 *
288 */ 288 */
289static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info, 289static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
290 unsigned int id)
291{ 290{
292 struct net_device *dev; 291 struct net_device *dev;
293 struct via_ircc_cb *self; 292 struct via_ircc_cb *self;
@@ -424,7 +423,7 @@ static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
424 * Close driver instance 423 * Close driver instance
425 * 424 *
426 */ 425 */
427static void __devexit via_remove_one(struct pci_dev *pdev) 426static void via_remove_one(struct pci_dev *pdev)
428{ 427{
429 struct via_ircc_cb *self = pci_get_drvdata(pdev); 428 struct via_ircc_cb *self = pci_get_drvdata(pdev);
430 int iobase; 429 int iobase;
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 9021d0131727..2f99f8881dfc 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1627,7 +1627,7 @@ static int vlsi_irda_init(struct net_device *ndev)
1627 1627
1628/**************************************************************/ 1628/**************************************************************/
1629 1629
1630static int __devinit 1630static int
1631vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1631vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1632{ 1632{
1633 struct net_device *ndev; 1633 struct net_device *ndev;
@@ -1699,7 +1699,7 @@ out:
1699 return -ENODEV; 1699 return -ENODEV;
1700} 1700}
1701 1701
1702static void __devexit vlsi_irda_remove(struct pci_dev *pdev) 1702static void vlsi_irda_remove(struct pci_dev *pdev)
1703{ 1703{
1704 struct net_device *ndev = pci_get_drvdata(pdev); 1704 struct net_device *ndev = pci_get_drvdata(pdev);
1705 vlsi_irda_dev_t *idev; 1705 vlsi_irda_dev_t *idev;
@@ -1832,7 +1832,7 @@ static struct pci_driver vlsi_irda_driver = {
1832 .name = drivername, 1832 .name = drivername,
1833 .id_table = vlsi_irda_table, 1833 .id_table = vlsi_irda_table,
1834 .probe = vlsi_irda_probe, 1834 .probe = vlsi_irda_probe,
1835 .remove = __devexit_p(vlsi_irda_remove), 1835 .remove = vlsi_irda_remove,
1836#ifdef CONFIG_PM 1836#ifdef CONFIG_PM
1837 .suspend = vlsi_irda_suspend, 1837 .suspend = vlsi_irda_suspend,
1838 .resume = vlsi_irda_resume, 1838 .resume = vlsi_irda_resume,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index b3321129a83c..6989ebe2bc79 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -56,6 +56,10 @@ static char config[MAX_PARAM_LENGTH];
56module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0); 56module_param_string(netconsole, config, MAX_PARAM_LENGTH, 0);
57MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]"); 57MODULE_PARM_DESC(netconsole, " netconsole=[src-port]@[src-ip]/[dev],[tgt-port]@<tgt-ip>/[tgt-macaddr]");
58 58
59static bool oops_only = false;
60module_param(oops_only, bool, 0600);
61MODULE_PARM_DESC(oops_only, "Only log oops messages");
62
59#ifndef MODULE 63#ifndef MODULE
60static int __init option_setup(char *opt) 64static int __init option_setup(char *opt)
61{ 65{
@@ -683,6 +687,8 @@ static void write_msg(struct console *con, const char *msg, unsigned int len)
683 struct netconsole_target *nt; 687 struct netconsole_target *nt;
684 const char *tmp; 688 const char *tmp;
685 689
690 if (oops_only && !oops_in_progress)
691 return;
686 /* Avoid taking lock and disabling interrupts unnecessarily */ 692 /* Avoid taking lock and disabling interrupts unnecessarily */
687 if (list_empty(&target_list)) 693 if (list_empty(&target_list))
688 return; 694 return;
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 81c7bc010dd8..383e8338ad86 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -150,18 +150,24 @@ static struct phy_driver dm91xx_driver[] = {
150 .name = "Davicom DM9161E", 150 .name = "Davicom DM9161E",
151 .phy_id_mask = 0x0ffffff0, 151 .phy_id_mask = 0x0ffffff0,
152 .features = PHY_BASIC_FEATURES, 152 .features = PHY_BASIC_FEATURES,
153 .flags = PHY_HAS_INTERRUPT,
153 .config_init = dm9161_config_init, 154 .config_init = dm9161_config_init,
154 .config_aneg = dm9161_config_aneg, 155 .config_aneg = dm9161_config_aneg,
155 .read_status = genphy_read_status, 156 .read_status = genphy_read_status,
157 .ack_interrupt = dm9161_ack_interrupt,
158 .config_intr = dm9161_config_intr,
156 .driver = { .owner = THIS_MODULE,}, 159 .driver = { .owner = THIS_MODULE,},
157}, { 160}, {
158 .phy_id = 0x0181b8a0, 161 .phy_id = 0x0181b8a0,
159 .name = "Davicom DM9161A", 162 .name = "Davicom DM9161A",
160 .phy_id_mask = 0x0ffffff0, 163 .phy_id_mask = 0x0ffffff0,
161 .features = PHY_BASIC_FEATURES, 164 .features = PHY_BASIC_FEATURES,
165 .flags = PHY_HAS_INTERRUPT,
162 .config_init = dm9161_config_init, 166 .config_init = dm9161_config_init,
163 .config_aneg = dm9161_config_aneg, 167 .config_aneg = dm9161_config_aneg,
164 .read_status = genphy_read_status, 168 .read_status = genphy_read_status,
169 .ack_interrupt = dm9161_ack_interrupt,
170 .config_intr = dm9161_config_intr,
165 .driver = { .owner = THIS_MODULE,}, 171 .driver = { .owner = THIS_MODULE,},
166}, { 172}, {
167 .phy_id = 0x00181b80, 173 .phy_id = 0x00181b80,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 24e05c43bff8..7490b6c866e6 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -48,6 +48,21 @@
48#define CAL_TRIGGER 7 48#define CAL_TRIGGER 7
49#define PER_TRIGGER 6 49#define PER_TRIGGER 6
50 50
51#define MII_DP83640_MICR 0x11
52#define MII_DP83640_MISR 0x12
53
54#define MII_DP83640_MICR_OE 0x1
55#define MII_DP83640_MICR_IE 0x2
56
57#define MII_DP83640_MISR_RHF_INT_EN 0x01
58#define MII_DP83640_MISR_FHF_INT_EN 0x02
59#define MII_DP83640_MISR_ANC_INT_EN 0x04
60#define MII_DP83640_MISR_DUP_INT_EN 0x08
61#define MII_DP83640_MISR_SPD_INT_EN 0x10
62#define MII_DP83640_MISR_LINK_INT_EN 0x20
63#define MII_DP83640_MISR_ED_INT_EN 0x40
64#define MII_DP83640_MISR_LQ_INT_EN 0x80
65
51/* phyter seems to miss the mark by 16 ns */ 66/* phyter seems to miss the mark by 16 ns */
52#define ADJTIME_FIX 16 67#define ADJTIME_FIX 16
53 68
@@ -1043,6 +1058,65 @@ static void dp83640_remove(struct phy_device *phydev)
1043 kfree(dp83640); 1058 kfree(dp83640);
1044} 1059}
1045 1060
1061static int dp83640_ack_interrupt(struct phy_device *phydev)
1062{
1063 int err = phy_read(phydev, MII_DP83640_MISR);
1064
1065 if (err < 0)
1066 return err;
1067
1068 return 0;
1069}
1070
1071static int dp83640_config_intr(struct phy_device *phydev)
1072{
1073 int micr;
1074 int misr;
1075 int err;
1076
1077 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1078 misr = phy_read(phydev, MII_DP83640_MISR);
1079 if (misr < 0)
1080 return misr;
1081 misr |=
1082 (MII_DP83640_MISR_ANC_INT_EN |
1083 MII_DP83640_MISR_DUP_INT_EN |
1084 MII_DP83640_MISR_SPD_INT_EN |
1085 MII_DP83640_MISR_LINK_INT_EN);
1086 err = phy_write(phydev, MII_DP83640_MISR, misr);
1087 if (err < 0)
1088 return err;
1089
1090 micr = phy_read(phydev, MII_DP83640_MICR);
1091 if (micr < 0)
1092 return micr;
1093 micr |=
1094 (MII_DP83640_MICR_OE |
1095 MII_DP83640_MICR_IE);
1096 return phy_write(phydev, MII_DP83640_MICR, micr);
1097 } else {
1098 micr = phy_read(phydev, MII_DP83640_MICR);
1099 if (micr < 0)
1100 return micr;
1101 micr &=
1102 ~(MII_DP83640_MICR_OE |
1103 MII_DP83640_MICR_IE);
1104 err = phy_write(phydev, MII_DP83640_MICR, micr);
1105 if (err < 0)
1106 return err;
1107
1108 misr = phy_read(phydev, MII_DP83640_MISR);
1109 if (misr < 0)
1110 return misr;
1111 misr &=
1112 ~(MII_DP83640_MISR_ANC_INT_EN |
1113 MII_DP83640_MISR_DUP_INT_EN |
1114 MII_DP83640_MISR_SPD_INT_EN |
1115 MII_DP83640_MISR_LINK_INT_EN);
1116 return phy_write(phydev, MII_DP83640_MISR, misr);
1117 }
1118}
1119
1046static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) 1120static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
1047{ 1121{
1048 struct dp83640_private *dp83640 = phydev->priv; 1122 struct dp83640_private *dp83640 = phydev->priv;
@@ -1253,11 +1327,13 @@ static struct phy_driver dp83640_driver = {
1253 .phy_id_mask = 0xfffffff0, 1327 .phy_id_mask = 0xfffffff0,
1254 .name = "NatSemi DP83640", 1328 .name = "NatSemi DP83640",
1255 .features = PHY_BASIC_FEATURES, 1329 .features = PHY_BASIC_FEATURES,
1256 .flags = 0, 1330 .flags = PHY_HAS_INTERRUPT,
1257 .probe = dp83640_probe, 1331 .probe = dp83640_probe,
1258 .remove = dp83640_remove, 1332 .remove = dp83640_remove,
1259 .config_aneg = genphy_config_aneg, 1333 .config_aneg = genphy_config_aneg,
1260 .read_status = genphy_read_status, 1334 .read_status = genphy_read_status,
1335 .ack_interrupt = dp83640_ack_interrupt,
1336 .config_intr = dp83640_config_intr,
1261 .ts_info = dp83640_ts_info, 1337 .ts_info = dp83640_ts_info,
1262 .hwtstamp = dp83640_hwtstamp, 1338 .hwtstamp = dp83640_hwtstamp,
1263 .rxtstamp = dp83640_rxtstamp, 1339 .rxtstamp = dp83640_rxtstamp,
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 2ed1140df3e9..27274986ab56 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -103,9 +103,9 @@ static struct mdiobb_ops mdio_gpio_ops = {
103 .get_mdio_data = mdio_get, 103 .get_mdio_data = mdio_get,
104}; 104};
105 105
106static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev, 106static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
107 struct mdio_gpio_platform_data *pdata, 107 struct mdio_gpio_platform_data *pdata,
108 int bus_id) 108 int bus_id)
109{ 109{
110 struct mii_bus *new_bus; 110 struct mii_bus *new_bus;
111 struct mdio_gpio_info *bitbang; 111 struct mdio_gpio_info *bitbang;
@@ -173,7 +173,7 @@ static void mdio_gpio_bus_deinit(struct device *dev)
173 kfree(bitbang); 173 kfree(bitbang);
174} 174}
175 175
176static void __devexit mdio_gpio_bus_destroy(struct device *dev) 176static void mdio_gpio_bus_destroy(struct device *dev)
177{ 177{
178 struct mii_bus *bus = dev_get_drvdata(dev); 178 struct mii_bus *bus = dev_get_drvdata(dev);
179 179
@@ -181,7 +181,7 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev)
181 mdio_gpio_bus_deinit(dev); 181 mdio_gpio_bus_deinit(dev);
182} 182}
183 183
184static int __devinit mdio_gpio_probe(struct platform_device *pdev) 184static int mdio_gpio_probe(struct platform_device *pdev)
185{ 185{
186 struct mdio_gpio_platform_data *pdata; 186 struct mdio_gpio_platform_data *pdata;
187 struct mii_bus *new_bus; 187 struct mii_bus *new_bus;
@@ -213,7 +213,7 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
213 return ret; 213 return ret;
214} 214}
215 215
216static int __devexit mdio_gpio_remove(struct platform_device *pdev) 216static int mdio_gpio_remove(struct platform_device *pdev)
217{ 217{
218 mdio_gpio_bus_destroy(&pdev->dev); 218 mdio_gpio_bus_destroy(&pdev->dev);
219 219
@@ -227,7 +227,7 @@ static struct of_device_id mdio_gpio_of_match[] = {
227 227
228static struct platform_driver mdio_gpio_driver = { 228static struct platform_driver mdio_gpio_driver = {
229 .probe = mdio_gpio_probe, 229 .probe = mdio_gpio_probe,
230 .remove = __devexit_p(mdio_gpio_remove), 230 .remove = mdio_gpio_remove,
231 .driver = { 231 .driver = {
232 .name = "mdio-gpio", 232 .name = "mdio-gpio",
233 .owner = THIS_MODULE, 233 .owner = THIS_MODULE,
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index eefe49e8713c..0c9accb1c14f 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -49,7 +49,7 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
49 return 0; 49 return 0;
50} 50}
51 51
52static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev) 52static int mdio_mux_gpio_probe(struct platform_device *pdev)
53{ 53{
54 enum of_gpio_flags f; 54 enum of_gpio_flags f;
55 struct mdio_mux_gpio_state *s; 55 struct mdio_mux_gpio_state *s;
@@ -104,7 +104,7 @@ err:
104 return r; 104 return r;
105} 105}
106 106
107static int __devexit mdio_mux_gpio_remove(struct platform_device *pdev) 107static int mdio_mux_gpio_remove(struct platform_device *pdev)
108{ 108{
109 struct mdio_mux_gpio_state *s = pdev->dev.platform_data; 109 struct mdio_mux_gpio_state *s = pdev->dev.platform_data;
110 mdio_mux_uninit(s->mux_handle); 110 mdio_mux_uninit(s->mux_handle);
@@ -130,7 +130,7 @@ static struct platform_driver mdio_mux_gpio_driver = {
130 .of_match_table = mdio_mux_gpio_match, 130 .of_match_table = mdio_mux_gpio_match,
131 }, 131 },
132 .probe = mdio_mux_gpio_probe, 132 .probe = mdio_mux_gpio_probe,
133 .remove = __devexit_p(mdio_mux_gpio_remove), 133 .remove = mdio_mux_gpio_remove,
134}; 134};
135 135
136module_platform_driver(mdio_mux_gpio_driver); 136module_platform_driver(mdio_mux_gpio_driver);
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 9061ba622ac4..9733bd239a86 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -67,7 +67,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
67 return 0; 67 return 0;
68} 68}
69 69
70static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev) 70static int mdio_mux_mmioreg_probe(struct platform_device *pdev)
71{ 71{
72 struct device_node *np2, *np = pdev->dev.of_node; 72 struct device_node *np2, *np = pdev->dev.of_node;
73 struct mdio_mux_mmioreg_state *s; 73 struct mdio_mux_mmioreg_state *s;
@@ -137,7 +137,7 @@ static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev)
137 return 0; 137 return 0;
138} 138}
139 139
140static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev) 140static int mdio_mux_mmioreg_remove(struct platform_device *pdev)
141{ 141{
142 struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev); 142 struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev);
143 143
@@ -161,7 +161,7 @@ static struct platform_driver mdio_mux_mmioreg_driver = {
161 .of_match_table = mdio_mux_mmioreg_match, 161 .of_match_table = mdio_mux_mmioreg_match,
162 }, 162 },
163 .probe = mdio_mux_mmioreg_probe, 163 .probe = mdio_mux_mmioreg_probe,
164 .remove = __devexit_p(mdio_mux_mmioreg_remove), 164 .remove = mdio_mux_mmioreg_remove,
165}; 165};
166 166
167module_platform_driver(mdio_mux_mmioreg_driver); 167module_platform_driver(mdio_mux_mmioreg_driver);
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index d4015aa663e6..09297fe05ae5 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -96,7 +96,7 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
96 return 0; 96 return 0;
97} 97}
98 98
99static int __devinit octeon_mdiobus_probe(struct platform_device *pdev) 99static int octeon_mdiobus_probe(struct platform_device *pdev)
100{ 100{
101 struct octeon_mdiobus *bus; 101 struct octeon_mdiobus *bus;
102 struct resource *res_mem; 102 struct resource *res_mem;
@@ -159,7 +159,7 @@ fail:
159 return err; 159 return err;
160} 160}
161 161
162static int __devexit octeon_mdiobus_remove(struct platform_device *pdev) 162static int octeon_mdiobus_remove(struct platform_device *pdev)
163{ 163{
164 struct octeon_mdiobus *bus; 164 struct octeon_mdiobus *bus;
165 union cvmx_smix_en smi_en; 165 union cvmx_smix_en smi_en;
@@ -188,7 +188,7 @@ static struct platform_driver octeon_mdiobus_driver = {
188 .of_match_table = octeon_mdiobus_match, 188 .of_match_table = octeon_mdiobus_match,
189 }, 189 },
190 .probe = octeon_mdiobus_probe, 190 .probe = octeon_mdiobus_probe,
191 .remove = __devexit_p(octeon_mdiobus_remove), 191 .remove = octeon_mdiobus_remove,
192}; 192};
193 193
194void octeon_mdiobus_force_mod_depencency(void) 194void octeon_mdiobus_force_mod_depencency(void)
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index c1ef3000ea60..044b5326459f 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -431,10 +431,24 @@ static struct dev_pm_ops mdio_bus_pm_ops = {
431 431
432#endif /* CONFIG_PM */ 432#endif /* CONFIG_PM */
433 433
434static ssize_t
435phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
436{
437 struct phy_device *phydev = to_phy_device(dev);
438
439 return sprintf(buf, "0x%.8lx\n", (unsigned long)phydev->phy_id);
440}
441
442static struct device_attribute mdio_dev_attrs[] = {
443 __ATTR_RO(phy_id),
444 __ATTR_NULL
445};
446
434struct bus_type mdio_bus_type = { 447struct bus_type mdio_bus_type = {
435 .name = "mdio_bus", 448 .name = "mdio_bus",
436 .match = mdio_bus_match, 449 .match = mdio_bus_match,
437 .pm = MDIO_BUS_PM_OPS, 450 .pm = MDIO_BUS_PM_OPS,
451 .dev_attrs = mdio_dev_attrs,
438}; 452};
439EXPORT_SYMBOL(mdio_bus_type); 453EXPORT_SYMBOL(mdio_bus_type);
440 454
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2165d5fdb8c0..b983596abcbb 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -127,6 +127,39 @@ static int ks8051_config_init(struct phy_device *phydev)
127 return 0; 127 return 0;
128} 128}
129 129
130#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
131#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
132#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
133int ksz8873mll_read_status(struct phy_device *phydev)
134{
135 int regval;
136
137 /* dummy read */
138 regval = phy_read(phydev, KSZ8873MLL_GLOBAL_CONTROL_4);
139
140 regval = phy_read(phydev, KSZ8873MLL_GLOBAL_CONTROL_4);
141
142 if (regval & KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX)
143 phydev->duplex = DUPLEX_HALF;
144 else
145 phydev->duplex = DUPLEX_FULL;
146
147 if (regval & KSZ8873MLL_GLOBAL_CONTROL_4_SPEED)
148 phydev->speed = SPEED_10;
149 else
150 phydev->speed = SPEED_100;
151
152 phydev->link = 1;
153 phydev->pause = phydev->asym_pause = 0;
154
155 return 0;
156}
157
158static int ksz8873mll_config_aneg(struct phy_device *phydev)
159{
160 return 0;
161}
162
130static struct phy_driver ksphy_driver[] = { 163static struct phy_driver ksphy_driver[] = {
131{ 164{
132 .phy_id = PHY_ID_KS8737, 165 .phy_id = PHY_ID_KS8737,
@@ -204,6 +237,16 @@ static struct phy_driver ksphy_driver[] = {
204 .ack_interrupt = kszphy_ack_interrupt, 237 .ack_interrupt = kszphy_ack_interrupt,
205 .config_intr = ksz9021_config_intr, 238 .config_intr = ksz9021_config_intr,
206 .driver = { .owner = THIS_MODULE, }, 239 .driver = { .owner = THIS_MODULE, },
240}, {
241 .phy_id = PHY_ID_KSZ8873MLL,
242 .phy_id_mask = 0x00fffff0,
243 .name = "Micrel KSZ8873MLL Switch",
244 .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
245 .flags = PHY_HAS_MAGICANEG,
246 .config_init = kszphy_config_init,
247 .config_aneg = ksz8873mll_config_aneg,
248 .read_status = ksz8873mll_read_status,
249 .driver = { .owner = THIS_MODULE, },
207} }; 250} };
208 251
209static int __init ksphy_init(void) 252static int __init ksphy_init(void)
@@ -232,6 +275,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
232 { PHY_ID_KSZ8021, 0x00ffffff }, 275 { PHY_ID_KSZ8021, 0x00ffffff },
233 { PHY_ID_KSZ8041, 0x00fffff0 }, 276 { PHY_ID_KSZ8041, 0x00fffff0 },
234 { PHY_ID_KSZ8051, 0x00fffff0 }, 277 { PHY_ID_KSZ8051, 0x00fffff0 },
278 { PHY_ID_KSZ8873MLL, 0x00fffff0 },
235 { } 279 { }
236}; 280};
237 281
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 88e3991464e7..11f34813e23f 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -43,7 +43,31 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
43 43
44static int smsc_phy_config_init(struct phy_device *phydev) 44static int smsc_phy_config_init(struct phy_device *phydev)
45{ 45{
46 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); 46 int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES);
47 if (rc < 0)
48 return rc;
49
50 /* If the SMSC PHY is in power down mode, then set it
51 * in all capable mode before using it.
52 */
53 if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
54 int timeout = 50000;
55
56 /* set "all capable" mode and reset the phy */
57 rc |= MII_LAN83C185_MODE_ALL;
58 phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
59 phy_write(phydev, MII_BMCR, BMCR_RESET);
60
61 /* wait end of reset (max 500 ms) */
62 do {
63 udelay(10);
64 if (timeout-- == 0)
65 return -1;
66 rc = phy_read(phydev, MII_BMCR);
67 } while (rc & BMCR_RESET);
68 }
69
70 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
47 if (rc < 0) 71 if (rc < 0)
48 return rc; 72 return rc;
49 73
@@ -56,35 +80,52 @@ static int smsc_phy_config_init(struct phy_device *phydev)
56 return smsc_phy_ack_interrupt (phydev); 80 return smsc_phy_ack_interrupt (phydev);
57} 81}
58 82
59static int lan87xx_config_init(struct phy_device *phydev) 83static int lan911x_config_init(struct phy_device *phydev)
60{ 84{
61 /*
62 * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
63 * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
64 * to a bug on the chip.
65 *
66 * When the system is powered on with the network cable being
67 * disconnected all the way until after ifconfig ethX up is
68 * issued for the LAN port with this PHY, connecting the cable
69 * afterwards does not cause LINK change detection, while the
70 * expected behavior is the Link UP being detected.
71 */
72 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
73 if (rc < 0)
74 return rc;
75
76 rc &= ~MII_LAN83C185_EDPWRDOWN;
77
78 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
79 if (rc < 0)
80 return rc;
81
82 return smsc_phy_ack_interrupt(phydev); 85 return smsc_phy_ack_interrupt(phydev);
83} 86}
84 87
85static int lan911x_config_init(struct phy_device *phydev) 88/*
89 * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
90 * other in order to set the ENERGYON bit and exit EDPD mode. If a link partner
91 * does send the pulses within this interval, the PHY will remained powered
92 * down.
93 *
94 * This workaround will manually toggle the PHY on/off upon calls to read_status
95 * in order to generate link test pulses if the link is down. If a link partner
96 * is present, it will respond to the pulses, which will cause the ENERGYON bit
97 * to be set and will cause the EDPD mode to be exited.
98 */
99static int lan87xx_read_status(struct phy_device *phydev)
86{ 100{
87 return smsc_phy_ack_interrupt(phydev); 101 int err = genphy_read_status(phydev);
102
103 if (!phydev->link) {
104 /* Disable EDPD to wake up PHY */
105 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
106 if (rc < 0)
107 return rc;
108
109 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
110 rc & ~MII_LAN83C185_EDPWRDOWN);
111 if (rc < 0)
112 return rc;
113
114 /* Sleep 64 ms to allow ~5 link test pulses to be sent */
115 msleep(64);
116
117 /* Re-enable EDPD */
118 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
119 if (rc < 0)
120 return rc;
121
122 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
123 rc | MII_LAN83C185_EDPWRDOWN);
124 if (rc < 0)
125 return rc;
126 }
127
128 return err;
88} 129}
89 130
90static struct phy_driver smsc_phy_driver[] = { 131static struct phy_driver smsc_phy_driver[] = {
@@ -187,8 +228,8 @@ static struct phy_driver smsc_phy_driver[] = {
187 228
188 /* basic functions */ 229 /* basic functions */
189 .config_aneg = genphy_config_aneg, 230 .config_aneg = genphy_config_aneg,
190 .read_status = genphy_read_status, 231 .read_status = lan87xx_read_status,
191 .config_init = lan87xx_config_init, 232 .config_init = smsc_phy_config_init,
192 233
193 /* IRQ related */ 234 /* IRQ related */
194 .ack_interrupt = smsc_phy_ack_interrupt, 235 .ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 1c3abce78b6a..41eb8ffeb53d 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -264,7 +264,7 @@ static struct bin_attribute ks8995_registers_attr = {
264 264
265/* ------------------------------------------------------------------------ */ 265/* ------------------------------------------------------------------------ */
266 266
267static int __devinit ks8995_probe(struct spi_device *spi) 267static int ks8995_probe(struct spi_device *spi)
268{ 268{
269 struct ks8995_switch *ks; 269 struct ks8995_switch *ks;
270 struct ks8995_pdata *pdata; 270 struct ks8995_pdata *pdata;
@@ -332,7 +332,7 @@ err_drvdata:
332 return err; 332 return err;
333} 333}
334 334
335static int __devexit ks8995_remove(struct spi_device *spi) 335static int ks8995_remove(struct spi_device *spi)
336{ 336{
337 struct ks8995_data *ks8995; 337 struct ks8995_data *ks8995;
338 338
@@ -353,7 +353,7 @@ static struct spi_driver ks8995_driver = {
353 .owner = THIS_MODULE, 353 .owner = THIS_MODULE,
354 }, 354 },
355 .probe = ks8995_probe, 355 .probe = ks8995_probe,
356 .remove = __devexit_p(ks8995_remove), 356 .remove = ks8995_remove,
357}; 357};
358 358
359static int __init ks8995_init(void) 359static int __init ks8995_init(void)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index eb3f5cefeba3..0b2706abe3e3 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1034,7 +1034,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1034 return err; 1034 return err;
1035} 1035}
1036 1036
1037struct rtnl_link_stats64* 1037static struct rtnl_link_stats64*
1038ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) 1038ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
1039{ 1039{
1040 struct ppp *ppp = netdev_priv(dev); 1040 struct ppp *ppp = netdev_priv(dev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0873cdcf39be..2ac2164a1e39 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -68,7 +68,6 @@
68#include <net/netns/generic.h> 68#include <net/netns/generic.h>
69#include <net/rtnetlink.h> 69#include <net/rtnetlink.h>
70#include <net/sock.h> 70#include <net/sock.h>
71#include <net/cls_cgroup.h>
72 71
73#include <asm/uaccess.h> 72#include <asm/uaccess.h>
74 73
@@ -110,16 +109,56 @@ struct tap_filter {
110 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; 109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
111}; 110};
112 111
112/* 1024 is probably a high enough limit: modern hypervisors seem to support on
113 * the order of 100-200 CPUs so this leaves us some breathing space if we want
114 * to match a queue per guest CPU.
115 */
116#define MAX_TAP_QUEUES 1024
117
118#define TUN_FLOW_EXPIRE (3 * HZ)
119
120/* A tun_file connects an open character device to a tuntap netdevice. It
121 * also contains all socket related strctures (except sock_fprog and tap_filter)
122 * to serve as one transmit queue for tuntap device. The sock_fprog and
123 * tap_filter were kept in tun_struct since they were used for filtering for the
124 * netdevice not for a specific queue (at least I didn't see the requirement for
125 * this).
126 *
127 * RCU usage:
128 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
129 * other can only be read while rcu_read_lock or rtnl_lock is held.
130 */
113struct tun_file { 131struct tun_file {
114 atomic_t count; 132 struct sock sk;
115 struct tun_struct *tun; 133 struct socket socket;
134 struct socket_wq wq;
135 struct tun_struct __rcu *tun;
116 struct net *net; 136 struct net *net;
137 struct fasync_struct *fasync;
138 /* only used for fasnyc */
139 unsigned int flags;
140 u16 queue_index;
141};
142
143struct tun_flow_entry {
144 struct hlist_node hash_link;
145 struct rcu_head rcu;
146 struct tun_struct *tun;
147
148 u32 rxhash;
149 int queue_index;
150 unsigned long updated;
117}; 151};
118 152
119struct tun_sock; 153#define TUN_NUM_FLOW_ENTRIES 1024
120 154
155/* Since the socket were moved to tun_file, to preserve the behavior of persist
156 * device, socket filter, sndbuf and vnet header size were restore when the
157 * file were attached to a persist device.
158 */
121struct tun_struct { 159struct tun_struct {
122 struct tun_file *tfile; 160 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
161 unsigned int numqueues;
123 unsigned int flags; 162 unsigned int flags;
124 kuid_t owner; 163 kuid_t owner;
125 kgid_t group; 164 kgid_t group;
@@ -128,88 +167,349 @@ struct tun_struct {
128 netdev_features_t set_features; 167 netdev_features_t set_features;
129#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 168#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
130 NETIF_F_TSO6|NETIF_F_UFO) 169 NETIF_F_TSO6|NETIF_F_UFO)
131 struct fasync_struct *fasync;
132
133 struct tap_filter txflt;
134 struct socket socket;
135 struct socket_wq wq;
136 170
137 int vnet_hdr_sz; 171 int vnet_hdr_sz;
138 172 int sndbuf;
173 struct tap_filter txflt;
174 struct sock_fprog fprog;
175 /* protected by rtnl lock */
176 bool filter_attached;
139#ifdef TUN_DEBUG 177#ifdef TUN_DEBUG
140 int debug; 178 int debug;
141#endif 179#endif
180 spinlock_t lock;
181 struct kmem_cache *flow_cache;
182 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
183 struct timer_list flow_gc_timer;
184 unsigned long ageing_time;
142}; 185};
143 186
144struct tun_sock { 187static inline u32 tun_hashfn(u32 rxhash)
145 struct sock sk; 188{
146 struct tun_struct *tun; 189 return rxhash & 0x3ff;
147}; 190}
148 191
149static inline struct tun_sock *tun_sk(struct sock *sk) 192static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
150{ 193{
151 return container_of(sk, struct tun_sock, sk); 194 struct tun_flow_entry *e;
195 struct hlist_node *n;
196
197 hlist_for_each_entry_rcu(e, n, head, hash_link) {
198 if (e->rxhash == rxhash)
199 return e;
200 }
201 return NULL;
152} 202}
153 203
154static int tun_attach(struct tun_struct *tun, struct file *file) 204static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
205 struct hlist_head *head,
206 u32 rxhash, u16 queue_index)
155{ 207{
156 struct tun_file *tfile = file->private_data; 208 struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
157 int err; 209 GFP_ATOMIC);
210 if (e) {
211 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
212 rxhash, queue_index);
213 e->updated = jiffies;
214 e->rxhash = rxhash;
215 e->queue_index = queue_index;
216 e->tun = tun;
217 hlist_add_head_rcu(&e->hash_link, head);
218 }
219 return e;
220}
158 221
159 ASSERT_RTNL(); 222static void tun_flow_free(struct rcu_head *head)
223{
224 struct tun_flow_entry *e
225 = container_of(head, struct tun_flow_entry, rcu);
226 kmem_cache_free(e->tun->flow_cache, e);
227}
160 228
161 netif_tx_lock_bh(tun->dev); 229static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
230{
231 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
232 e->rxhash, e->queue_index);
233 hlist_del_rcu(&e->hash_link);
234 call_rcu(&e->rcu, tun_flow_free);
235}
162 236
163 err = -EINVAL; 237static void tun_flow_flush(struct tun_struct *tun)
164 if (tfile->tun) 238{
165 goto out; 239 int i;
166 240
167 err = -EBUSY; 241 spin_lock_bh(&tun->lock);
168 if (tun->tfile) 242 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
169 goto out; 243 struct tun_flow_entry *e;
244 struct hlist_node *h, *n;
170 245
171 err = 0; 246 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
172 tfile->tun = tun; 247 tun_flow_delete(tun, e);
173 tun->tfile = tfile; 248 }
174 tun->socket.file = file; 249 spin_unlock_bh(&tun->lock);
175 netif_carrier_on(tun->dev); 250}
176 dev_hold(tun->dev);
177 sock_hold(tun->socket.sk);
178 atomic_inc(&tfile->count);
179 251
180out: 252static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
181 netif_tx_unlock_bh(tun->dev); 253{
182 return err; 254 int i;
255
256 spin_lock_bh(&tun->lock);
257 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
258 struct tun_flow_entry *e;
259 struct hlist_node *h, *n;
260
261 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
262 if (e->queue_index == queue_index)
263 tun_flow_delete(tun, e);
264 }
265 }
266 spin_unlock_bh(&tun->lock);
183} 267}
184 268
185static void __tun_detach(struct tun_struct *tun) 269static void tun_flow_cleanup(unsigned long data)
186{ 270{
187 /* Detach from net device */ 271 struct tun_struct *tun = (struct tun_struct *)data;
188 netif_tx_lock_bh(tun->dev); 272 unsigned long delay = tun->ageing_time;
189 netif_carrier_off(tun->dev); 273 unsigned long next_timer = jiffies + delay;
190 tun->tfile = NULL; 274 unsigned long count = 0;
191 netif_tx_unlock_bh(tun->dev); 275 int i;
192 276
193 /* Drop read queue */ 277 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
194 skb_queue_purge(&tun->socket.sk->sk_receive_queue); 278
279 spin_lock_bh(&tun->lock);
280 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
281 struct tun_flow_entry *e;
282 struct hlist_node *h, *n;
283
284 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
285 unsigned long this_timer;
286 count++;
287 this_timer = e->updated + delay;
288 if (time_before_eq(this_timer, jiffies))
289 tun_flow_delete(tun, e);
290 else if (time_before(this_timer, next_timer))
291 next_timer = this_timer;
292 }
293 }
195 294
196 /* Drop the extra count on the net device */ 295 if (count)
197 dev_put(tun->dev); 296 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
297 spin_unlock_bh(&tun->lock);
198} 298}
199 299
200static void tun_detach(struct tun_struct *tun) 300static void tun_flow_update(struct tun_struct *tun, struct sk_buff *skb,
301 u16 queue_index)
302{
303 struct hlist_head *head;
304 struct tun_flow_entry *e;
305 unsigned long delay = tun->ageing_time;
306 u32 rxhash = skb_get_rxhash(skb);
307
308 if (!rxhash)
309 return;
310 else
311 head = &tun->flows[tun_hashfn(rxhash)];
312
313 rcu_read_lock();
314
315 if (tun->numqueues == 1)
316 goto unlock;
317
318 e = tun_flow_find(head, rxhash);
319 if (likely(e)) {
320 /* TODO: keep queueing to old queue until it's empty? */
321 e->queue_index = queue_index;
322 e->updated = jiffies;
323 } else {
324 spin_lock_bh(&tun->lock);
325 if (!tun_flow_find(head, rxhash))
326 tun_flow_create(tun, head, rxhash, queue_index);
327
328 if (!timer_pending(&tun->flow_gc_timer))
329 mod_timer(&tun->flow_gc_timer,
330 round_jiffies_up(jiffies + delay));
331 spin_unlock_bh(&tun->lock);
332 }
333
334unlock:
335 rcu_read_unlock();
336}
337
338/* We try to identify a flow through its rxhash first. The reason that
339 * we do not check rxq no. is becuase some cards(e.g 82599), chooses
340 * the rxq based on the txq where the last packet of the flow comes. As
341 * the userspace application move between processors, we may get a
342 * different rxq no. here. If we could not get rxhash, then we would
343 * hope the rxq no. may help here.
344 */
345static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
346{
347 struct tun_struct *tun = netdev_priv(dev);
348 struct tun_flow_entry *e;
349 u32 txq = 0;
350 u32 numqueues = 0;
351
352 rcu_read_lock();
353 numqueues = tun->numqueues;
354
355 txq = skb_get_rxhash(skb);
356 if (txq) {
357 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
358 if (e)
359 txq = e->queue_index;
360 else
361 /* use multiply and shift instead of expensive divide */
362 txq = ((u64)txq * numqueues) >> 32;
363 } else if (likely(skb_rx_queue_recorded(skb))) {
364 txq = skb_get_rx_queue(skb);
365 while (unlikely(txq >= numqueues))
366 txq -= numqueues;
367 }
368
369 rcu_read_unlock();
370 return txq;
371}
372
373static inline bool tun_not_capable(struct tun_struct *tun)
374{
375 const struct cred *cred = current_cred();
376 struct net *net = dev_net(tun->dev);
377
378 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
379 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
380 !ns_capable(net->user_ns, CAP_NET_ADMIN);
381}
382
383static void tun_set_real_num_queues(struct tun_struct *tun)
384{
385 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
386 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
387}
388
389static void __tun_detach(struct tun_file *tfile, bool clean)
390{
391 struct tun_file *ntfile;
392 struct tun_struct *tun;
393 struct net_device *dev;
394
395 tun = rcu_dereference_protected(tfile->tun,
396 lockdep_rtnl_is_held());
397 if (tun) {
398 u16 index = tfile->queue_index;
399 BUG_ON(index >= tun->numqueues);
400 dev = tun->dev;
401
402 rcu_assign_pointer(tun->tfiles[index],
403 tun->tfiles[tun->numqueues - 1]);
404 rcu_assign_pointer(tfile->tun, NULL);
405 ntfile = rcu_dereference_protected(tun->tfiles[index],
406 lockdep_rtnl_is_held());
407 ntfile->queue_index = index;
408
409 --tun->numqueues;
410 sock_put(&tfile->sk);
411
412 synchronize_net();
413 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
414 /* Drop read queue */
415 skb_queue_purge(&tfile->sk.sk_receive_queue);
416 tun_set_real_num_queues(tun);
417
418 if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST))
419 if (dev->reg_state == NETREG_REGISTERED)
420 unregister_netdevice(dev);
421 }
422
423 if (clean) {
424 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
425 &tfile->socket.flags));
426 sk_release_kernel(&tfile->sk);
427 }
428}
429
430static void tun_detach(struct tun_file *tfile, bool clean)
201{ 431{
202 rtnl_lock(); 432 rtnl_lock();
203 __tun_detach(tun); 433 __tun_detach(tfile, clean);
204 rtnl_unlock(); 434 rtnl_unlock();
205} 435}
206 436
437static void tun_detach_all(struct net_device *dev)
438{
439 struct tun_struct *tun = netdev_priv(dev);
440 struct tun_file *tfile;
441 int i, n = tun->numqueues;
442
443 for (i = 0; i < n; i++) {
444 tfile = rcu_dereference_protected(tun->tfiles[i],
445 lockdep_rtnl_is_held());
446 BUG_ON(!tfile);
447 wake_up_all(&tfile->wq.wait);
448 rcu_assign_pointer(tfile->tun, NULL);
449 --tun->numqueues;
450 }
451 BUG_ON(tun->numqueues != 0);
452
453 synchronize_net();
454 for (i = 0; i < n; i++) {
455 tfile = rcu_dereference_protected(tun->tfiles[i],
456 lockdep_rtnl_is_held());
457 /* Drop read queue */
458 skb_queue_purge(&tfile->sk.sk_receive_queue);
459 sock_put(&tfile->sk);
460 }
461}
462
463static int tun_attach(struct tun_struct *tun, struct file *file)
464{
465 struct tun_file *tfile = file->private_data;
466 int err;
467
468 err = -EINVAL;
469 if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
470 goto out;
471
472 err = -EBUSY;
473 if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
474 goto out;
475
476 err = -E2BIG;
477 if (tun->numqueues == MAX_TAP_QUEUES)
478 goto out;
479
480 err = 0;
481
482 /* Re-attach the filter to presist device */
483 if (tun->filter_attached == true) {
484 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
485 if (!err)
486 goto out;
487 }
488 tfile->queue_index = tun->numqueues;
489 rcu_assign_pointer(tfile->tun, tun);
490 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
491 sock_hold(&tfile->sk);
492 tun->numqueues++;
493
494 tun_set_real_num_queues(tun);
495
496 /* device is allowed to go away first, so no need to hold extra
497 * refcnt.
498 */
499
500out:
501 return err;
502}
503
207static struct tun_struct *__tun_get(struct tun_file *tfile) 504static struct tun_struct *__tun_get(struct tun_file *tfile)
208{ 505{
209 struct tun_struct *tun = NULL; 506 struct tun_struct *tun;
210 507
211 if (atomic_inc_not_zero(&tfile->count)) 508 rcu_read_lock();
212 tun = tfile->tun; 509 tun = rcu_dereference(tfile->tun);
510 if (tun)
511 dev_hold(tun->dev);
512 rcu_read_unlock();
213 513
214 return tun; 514 return tun;
215} 515}
@@ -221,10 +521,7 @@ static struct tun_struct *tun_get(struct file *file)
221 521
222static void tun_put(struct tun_struct *tun) 522static void tun_put(struct tun_struct *tun)
223{ 523{
224 struct tun_file *tfile = tun->tfile; 524 dev_put(tun->dev);
225
226 if (atomic_dec_and_test(&tfile->count))
227 tun_detach(tfile->tun);
228} 525}
229 526
230/* TAP filtering */ 527/* TAP filtering */
@@ -344,38 +641,20 @@ static const struct ethtool_ops tun_ethtool_ops;
344/* Net device detach from fd. */ 641/* Net device detach from fd. */
345static void tun_net_uninit(struct net_device *dev) 642static void tun_net_uninit(struct net_device *dev)
346{ 643{
347 struct tun_struct *tun = netdev_priv(dev); 644 tun_detach_all(dev);
348 struct tun_file *tfile = tun->tfile;
349
350 /* Inform the methods they need to stop using the dev.
351 */
352 if (tfile) {
353 wake_up_all(&tun->wq.wait);
354 if (atomic_dec_and_test(&tfile->count))
355 __tun_detach(tun);
356 }
357}
358
359static void tun_free_netdev(struct net_device *dev)
360{
361 struct tun_struct *tun = netdev_priv(dev);
362
363 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
364
365 sk_release_kernel(tun->socket.sk);
366} 645}
367 646
368/* Net device open. */ 647/* Net device open. */
369static int tun_net_open(struct net_device *dev) 648static int tun_net_open(struct net_device *dev)
370{ 649{
371 netif_start_queue(dev); 650 netif_tx_start_all_queues(dev);
372 return 0; 651 return 0;
373} 652}
374 653
375/* Net device close. */ 654/* Net device close. */
376static int tun_net_close(struct net_device *dev) 655static int tun_net_close(struct net_device *dev)
377{ 656{
378 netif_stop_queue(dev); 657 netif_tx_stop_all_queues(dev);
379 return 0; 658 return 0;
380} 659}
381 660
@@ -383,38 +662,36 @@ static int tun_net_close(struct net_device *dev)
383static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) 662static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
384{ 663{
385 struct tun_struct *tun = netdev_priv(dev); 664 struct tun_struct *tun = netdev_priv(dev);
665 int txq = skb->queue_mapping;
666 struct tun_file *tfile;
386 667
387 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); 668 rcu_read_lock();
669 tfile = rcu_dereference(tun->tfiles[txq]);
388 670
389 /* Drop packet if interface is not attached */ 671 /* Drop packet if interface is not attached */
390 if (!tun->tfile) 672 if (txq >= tun->numqueues)
391 goto drop; 673 goto drop;
392 674
675 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
676
677 BUG_ON(!tfile);
678
393 /* Drop if the filter does not like it. 679 /* Drop if the filter does not like it.
394 * This is a noop if the filter is disabled. 680 * This is a noop if the filter is disabled.
395 * Filter can be enabled only for the TAP devices. */ 681 * Filter can be enabled only for the TAP devices. */
396 if (!check_filter(&tun->txflt, skb)) 682 if (!check_filter(&tun->txflt, skb))
397 goto drop; 683 goto drop;
398 684
399 if (tun->socket.sk->sk_filter && 685 if (tfile->socket.sk->sk_filter &&
400 sk_filter(tun->socket.sk, skb)) 686 sk_filter(tfile->socket.sk, skb))
401 goto drop; 687 goto drop;
402 688
403 if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) { 689 /* Limit the number of packets queued by dividing txq length with the
404 if (!(tun->flags & TUN_ONE_QUEUE)) { 690 * number of queues.
405 /* Normal queueing mode. */ 691 */
406 /* Packet scheduler handles dropping of further packets. */ 692 if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
407 netif_stop_queue(dev); 693 >= dev->tx_queue_len / tun->numqueues)
408 694 goto drop;
409 /* We won't see all dropped packets individually, so overrun
410 * error is more appropriate. */
411 dev->stats.tx_fifo_errors++;
412 } else {
413 /* Single queue mode.
414 * Driver handles dropping of all packets itself. */
415 goto drop;
416 }
417 }
418 695
419 /* Orphan the skb - required as we might hang on to it 696 /* Orphan the skb - required as we might hang on to it
420 * for indefinite time. */ 697 * for indefinite time. */
@@ -423,18 +700,22 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
423 skb_orphan(skb); 700 skb_orphan(skb);
424 701
425 /* Enqueue packet */ 702 /* Enqueue packet */
426 skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); 703 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
427 704
428 /* Notify and wake up reader process */ 705 /* Notify and wake up reader process */
429 if (tun->flags & TUN_FASYNC) 706 if (tfile->flags & TUN_FASYNC)
430 kill_fasync(&tun->fasync, SIGIO, POLL_IN); 707 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
431 wake_up_interruptible_poll(&tun->wq.wait, POLLIN | 708 wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
432 POLLRDNORM | POLLRDBAND); 709 POLLRDNORM | POLLRDBAND);
710
711 rcu_read_unlock();
433 return NETDEV_TX_OK; 712 return NETDEV_TX_OK;
434 713
435drop: 714drop:
436 dev->stats.tx_dropped++; 715 dev->stats.tx_dropped++;
716 skb_tx_error(skb);
437 kfree_skb(skb); 717 kfree_skb(skb);
718 rcu_read_unlock();
438 return NETDEV_TX_OK; 719 return NETDEV_TX_OK;
439} 720}
440 721
@@ -490,6 +771,7 @@ static const struct net_device_ops tun_netdev_ops = {
490 .ndo_start_xmit = tun_net_xmit, 771 .ndo_start_xmit = tun_net_xmit,
491 .ndo_change_mtu = tun_net_change_mtu, 772 .ndo_change_mtu = tun_net_change_mtu,
492 .ndo_fix_features = tun_net_fix_features, 773 .ndo_fix_features = tun_net_fix_features,
774 .ndo_select_queue = tun_select_queue,
493#ifdef CONFIG_NET_POLL_CONTROLLER 775#ifdef CONFIG_NET_POLL_CONTROLLER
494 .ndo_poll_controller = tun_poll_controller, 776 .ndo_poll_controller = tun_poll_controller,
495#endif 777#endif
@@ -505,11 +787,43 @@ static const struct net_device_ops tap_netdev_ops = {
505 .ndo_set_rx_mode = tun_net_mclist, 787 .ndo_set_rx_mode = tun_net_mclist,
506 .ndo_set_mac_address = eth_mac_addr, 788 .ndo_set_mac_address = eth_mac_addr,
507 .ndo_validate_addr = eth_validate_addr, 789 .ndo_validate_addr = eth_validate_addr,
790 .ndo_select_queue = tun_select_queue,
508#ifdef CONFIG_NET_POLL_CONTROLLER 791#ifdef CONFIG_NET_POLL_CONTROLLER
509 .ndo_poll_controller = tun_poll_controller, 792 .ndo_poll_controller = tun_poll_controller,
510#endif 793#endif
511}; 794};
512 795
796static int tun_flow_init(struct tun_struct *tun)
797{
798 int i;
799
800 tun->flow_cache = kmem_cache_create("tun_flow_cache",
801 sizeof(struct tun_flow_entry), 0, 0,
802 NULL);
803 if (!tun->flow_cache)
804 return -ENOMEM;
805
806 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
807 INIT_HLIST_HEAD(&tun->flows[i]);
808
809 tun->ageing_time = TUN_FLOW_EXPIRE;
810 setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
811 mod_timer(&tun->flow_gc_timer,
812 round_jiffies_up(jiffies + tun->ageing_time));
813
814 return 0;
815}
816
817static void tun_flow_uninit(struct tun_struct *tun)
818{
819 del_timer_sync(&tun->flow_gc_timer);
820 tun_flow_flush(tun);
821
822 /* Wait for completion of call_rcu()'s */
823 rcu_barrier();
824 kmem_cache_destroy(tun->flow_cache);
825}
826
513/* Initialize net device. */ 827/* Initialize net device. */
514static void tun_net_init(struct net_device *dev) 828static void tun_net_init(struct net_device *dev)
515{ 829{
@@ -535,6 +849,7 @@ static void tun_net_init(struct net_device *dev)
535 /* Ethernet TAP Device */ 849 /* Ethernet TAP Device */
536 ether_setup(dev); 850 ether_setup(dev);
537 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 851 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
852 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
538 853
539 eth_hw_addr_random(dev); 854 eth_hw_addr_random(dev);
540 855
@@ -546,7 +861,7 @@ static void tun_net_init(struct net_device *dev)
546/* Character device part */ 861/* Character device part */
547 862
548/* Poll */ 863/* Poll */
549static unsigned int tun_chr_poll(struct file *file, poll_table * wait) 864static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
550{ 865{
551 struct tun_file *tfile = file->private_data; 866 struct tun_file *tfile = file->private_data;
552 struct tun_struct *tun = __tun_get(tfile); 867 struct tun_struct *tun = __tun_get(tfile);
@@ -556,11 +871,11 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
556 if (!tun) 871 if (!tun)
557 return POLLERR; 872 return POLLERR;
558 873
559 sk = tun->socket.sk; 874 sk = tfile->socket.sk;
560 875
561 tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); 876 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
562 877
563 poll_wait(file, &tun->wq.wait, wait); 878 poll_wait(file, &tfile->wq.wait, wait);
564 879
565 if (!skb_queue_empty(&sk->sk_receive_queue)) 880 if (!skb_queue_empty(&sk->sk_receive_queue))
566 mask |= POLLIN | POLLRDNORM; 881 mask |= POLLIN | POLLRDNORM;
@@ -579,16 +894,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
579 894
580/* prepad is the amount to reserve at front. len is length after that. 895/* prepad is the amount to reserve at front. len is length after that.
581 * linear is a hint as to how much to copy (usually headers). */ 896 * linear is a hint as to how much to copy (usually headers). */
582static struct sk_buff *tun_alloc_skb(struct tun_struct *tun, 897static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
583 size_t prepad, size_t len, 898 size_t prepad, size_t len,
584 size_t linear, int noblock) 899 size_t linear, int noblock)
585{ 900{
586 struct sock *sk = tun->socket.sk; 901 struct sock *sk = tfile->socket.sk;
587 struct sk_buff *skb; 902 struct sk_buff *skb;
588 int err; 903 int err;
589 904
590 sock_update_classid(sk);
591
592 /* Under a page? Don't bother with paged skb. */ 905 /* Under a page? Don't bother with paged skb. */
593 if (prepad + len < PAGE_SIZE || !linear) 906 if (prepad + len < PAGE_SIZE || !linear)
594 linear = len; 907 linear = len;
@@ -685,9 +998,9 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
685} 998}
686 999
687/* Get packet from user space buffer */ 1000/* Get packet from user space buffer */
688static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control, 1001static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
689 const struct iovec *iv, size_t total_len, 1002 void *msg_control, const struct iovec *iv,
690 size_t count, int noblock) 1003 size_t total_len, size_t count, int noblock)
691{ 1004{
692 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; 1005 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
693 struct sk_buff *skb; 1006 struct sk_buff *skb;
@@ -757,7 +1070,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
757 } else 1070 } else
758 copylen = len; 1071 copylen = len;
759 1072
760 skb = tun_alloc_skb(tun, align, copylen, gso.hdr_len, noblock); 1073 skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
761 if (IS_ERR(skb)) { 1074 if (IS_ERR(skb)) {
762 if (PTR_ERR(skb) != -EAGAIN) 1075 if (PTR_ERR(skb) != -EAGAIN)
763 tun->dev->stats.rx_dropped++; 1076 tun->dev->stats.rx_dropped++;
@@ -854,6 +1167,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
854 tun->dev->stats.rx_packets++; 1167 tun->dev->stats.rx_packets++;
855 tun->dev->stats.rx_bytes += len; 1168 tun->dev->stats.rx_bytes += len;
856 1169
1170 tun_flow_update(tun, skb, tfile->queue_index);
857 return total_len; 1171 return total_len;
858} 1172}
859 1173
@@ -862,6 +1176,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
862{ 1176{
863 struct file *file = iocb->ki_filp; 1177 struct file *file = iocb->ki_filp;
864 struct tun_struct *tun = tun_get(file); 1178 struct tun_struct *tun = tun_get(file);
1179 struct tun_file *tfile = file->private_data;
865 ssize_t result; 1180 ssize_t result;
866 1181
867 if (!tun) 1182 if (!tun)
@@ -869,8 +1184,8 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
869 1184
870 tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count); 1185 tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
871 1186
872 result = tun_get_user(tun, NULL, iv, iov_length(iv, count), count, 1187 result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
873 file->f_flags & O_NONBLOCK); 1188 count, file->f_flags & O_NONBLOCK);
874 1189
875 tun_put(tun); 1190 tun_put(tun);
876 return result; 1191 return result;
@@ -878,6 +1193,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
878 1193
879/* Put packet to the user space buffer */ 1194/* Put packet to the user space buffer */
880static ssize_t tun_put_user(struct tun_struct *tun, 1195static ssize_t tun_put_user(struct tun_struct *tun,
1196 struct tun_file *tfile,
881 struct sk_buff *skb, 1197 struct sk_buff *skb,
882 const struct iovec *iv, int len) 1198 const struct iovec *iv, int len)
883{ 1199{
@@ -957,7 +1273,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
957 return total; 1273 return total;
958} 1274}
959 1275
960static ssize_t tun_do_read(struct tun_struct *tun, 1276static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
961 struct kiocb *iocb, const struct iovec *iv, 1277 struct kiocb *iocb, const struct iovec *iv,
962 ssize_t len, int noblock) 1278 ssize_t len, int noblock)
963{ 1279{
@@ -965,15 +1281,15 @@ static ssize_t tun_do_read(struct tun_struct *tun,
965 struct sk_buff *skb; 1281 struct sk_buff *skb;
966 ssize_t ret = 0; 1282 ssize_t ret = 0;
967 1283
968 tun_debug(KERN_INFO, tun, "tun_chr_read\n"); 1284 tun_debug(KERN_INFO, tun, "tun_do_read\n");
969 1285
970 if (unlikely(!noblock)) 1286 if (unlikely(!noblock))
971 add_wait_queue(&tun->wq.wait, &wait); 1287 add_wait_queue(&tfile->wq.wait, &wait);
972 while (len) { 1288 while (len) {
973 current->state = TASK_INTERRUPTIBLE; 1289 current->state = TASK_INTERRUPTIBLE;
974 1290
975 /* Read frames from the queue */ 1291 /* Read frames from the queue */
976 if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) { 1292 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
977 if (noblock) { 1293 if (noblock) {
978 ret = -EAGAIN; 1294 ret = -EAGAIN;
979 break; 1295 break;
@@ -991,16 +1307,15 @@ static ssize_t tun_do_read(struct tun_struct *tun,
991 schedule(); 1307 schedule();
992 continue; 1308 continue;
993 } 1309 }
994 netif_wake_queue(tun->dev);
995 1310
996 ret = tun_put_user(tun, skb, iv, len); 1311 ret = tun_put_user(tun, tfile, skb, iv, len);
997 kfree_skb(skb); 1312 kfree_skb(skb);
998 break; 1313 break;
999 } 1314 }
1000 1315
1001 current->state = TASK_RUNNING; 1316 current->state = TASK_RUNNING;
1002 if (unlikely(!noblock)) 1317 if (unlikely(!noblock))
1003 remove_wait_queue(&tun->wq.wait, &wait); 1318 remove_wait_queue(&tfile->wq.wait, &wait);
1004 1319
1005 return ret; 1320 return ret;
1006} 1321}
@@ -1021,13 +1336,22 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1021 goto out; 1336 goto out;
1022 } 1337 }
1023 1338
1024 ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); 1339 ret = tun_do_read(tun, tfile, iocb, iv, len,
1340 file->f_flags & O_NONBLOCK);
1025 ret = min_t(ssize_t, ret, len); 1341 ret = min_t(ssize_t, ret, len);
1026out: 1342out:
1027 tun_put(tun); 1343 tun_put(tun);
1028 return ret; 1344 return ret;
1029} 1345}
1030 1346
1347static void tun_free_netdev(struct net_device *dev)
1348{
1349 struct tun_struct *tun = netdev_priv(dev);
1350
1351 tun_flow_uninit(tun);
1352 free_netdev(dev);
1353}
1354
1031static void tun_setup(struct net_device *dev) 1355static void tun_setup(struct net_device *dev)
1032{ 1356{
1033 struct tun_struct *tun = netdev_priv(dev); 1357 struct tun_struct *tun = netdev_priv(dev);
@@ -1056,7 +1380,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
1056 1380
1057static void tun_sock_write_space(struct sock *sk) 1381static void tun_sock_write_space(struct sock *sk)
1058{ 1382{
1059 struct tun_struct *tun; 1383 struct tun_file *tfile;
1060 wait_queue_head_t *wqueue; 1384 wait_queue_head_t *wqueue;
1061 1385
1062 if (!sock_writeable(sk)) 1386 if (!sock_writeable(sk))
@@ -1070,37 +1394,46 @@ static void tun_sock_write_space(struct sock *sk)
1070 wake_up_interruptible_sync_poll(wqueue, POLLOUT | 1394 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
1071 POLLWRNORM | POLLWRBAND); 1395 POLLWRNORM | POLLWRBAND);
1072 1396
1073 tun = tun_sk(sk)->tun; 1397 tfile = container_of(sk, struct tun_file, sk);
1074 kill_fasync(&tun->fasync, SIGIO, POLL_OUT); 1398 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
1075}
1076
1077static void tun_sock_destruct(struct sock *sk)
1078{
1079 free_netdev(tun_sk(sk)->tun->dev);
1080} 1399}
1081 1400
1082static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, 1401static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
1083 struct msghdr *m, size_t total_len) 1402 struct msghdr *m, size_t total_len)
1084{ 1403{
1085 struct tun_struct *tun = container_of(sock, struct tun_struct, socket); 1404 int ret;
1086 return tun_get_user(tun, m->msg_control, m->msg_iov, total_len, 1405 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1087 m->msg_iovlen, m->msg_flags & MSG_DONTWAIT); 1406 struct tun_struct *tun = __tun_get(tfile);
1407
1408 if (!tun)
1409 return -EBADFD;
1410 ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
1411 m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
1412 tun_put(tun);
1413 return ret;
1088} 1414}
1089 1415
1416
1090static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, 1417static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1091 struct msghdr *m, size_t total_len, 1418 struct msghdr *m, size_t total_len,
1092 int flags) 1419 int flags)
1093{ 1420{
1094 struct tun_struct *tun = container_of(sock, struct tun_struct, socket); 1421 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1422 struct tun_struct *tun = __tun_get(tfile);
1095 int ret; 1423 int ret;
1424
1425 if (!tun)
1426 return -EBADFD;
1427
1096 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) 1428 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1097 return -EINVAL; 1429 return -EINVAL;
1098 ret = tun_do_read(tun, iocb, m->msg_iov, total_len, 1430 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
1099 flags & MSG_DONTWAIT); 1431 flags & MSG_DONTWAIT);
1100 if (ret > total_len) { 1432 if (ret > total_len) {
1101 m->msg_flags |= MSG_TRUNC; 1433 m->msg_flags |= MSG_TRUNC;
1102 ret = flags & MSG_TRUNC ? ret : total_len; 1434 ret = flags & MSG_TRUNC ? ret : total_len;
1103 } 1435 }
1436 tun_put(tun);
1104 return ret; 1437 return ret;
1105} 1438}
1106 1439
@@ -1121,7 +1454,7 @@ static const struct proto_ops tun_socket_ops = {
1121static struct proto tun_proto = { 1454static struct proto tun_proto = {
1122 .name = "tun", 1455 .name = "tun",
1123 .owner = THIS_MODULE, 1456 .owner = THIS_MODULE,
1124 .obj_size = sizeof(struct tun_sock), 1457 .obj_size = sizeof(struct tun_file),
1125}; 1458};
1126 1459
1127static int tun_flags(struct tun_struct *tun) 1460static int tun_flags(struct tun_struct *tun)
@@ -1136,12 +1469,18 @@ static int tun_flags(struct tun_struct *tun)
1136 if (tun->flags & TUN_NO_PI) 1469 if (tun->flags & TUN_NO_PI)
1137 flags |= IFF_NO_PI; 1470 flags |= IFF_NO_PI;
1138 1471
1472 /* This flag has no real effect. We track the value for backwards
1473 * compatibility.
1474 */
1139 if (tun->flags & TUN_ONE_QUEUE) 1475 if (tun->flags & TUN_ONE_QUEUE)
1140 flags |= IFF_ONE_QUEUE; 1476 flags |= IFF_ONE_QUEUE;
1141 1477
1142 if (tun->flags & TUN_VNET_HDR) 1478 if (tun->flags & TUN_VNET_HDR)
1143 flags |= IFF_VNET_HDR; 1479 flags |= IFF_VNET_HDR;
1144 1480
1481 if (tun->flags & TUN_TAP_MQ)
1482 flags |= IFF_MULTI_QUEUE;
1483
1145 return flags; 1484 return flags;
1146} 1485}
1147 1486
@@ -1178,15 +1517,13 @@ static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1178 1517
1179static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) 1518static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1180{ 1519{
1181 struct sock *sk;
1182 struct tun_struct *tun; 1520 struct tun_struct *tun;
1521 struct tun_file *tfile = file->private_data;
1183 struct net_device *dev; 1522 struct net_device *dev;
1184 int err; 1523 int err;
1185 1524
1186 dev = __dev_get_by_name(net, ifr->ifr_name); 1525 dev = __dev_get_by_name(net, ifr->ifr_name);
1187 if (dev) { 1526 if (dev) {
1188 const struct cred *cred = current_cred();
1189
1190 if (ifr->ifr_flags & IFF_TUN_EXCL) 1527 if (ifr->ifr_flags & IFF_TUN_EXCL)
1191 return -EBUSY; 1528 return -EBUSY;
1192 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) 1529 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -1196,11 +1533,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1196 else 1533 else
1197 return -EINVAL; 1534 return -EINVAL;
1198 1535
1199 if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || 1536 if (tun_not_capable(tun))
1200 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
1201 !capable(CAP_NET_ADMIN))
1202 return -EPERM; 1537 return -EPERM;
1203 err = security_tun_dev_attach(tun->socket.sk); 1538 err = security_tun_dev_attach(tfile->socket.sk);
1204 if (err < 0) 1539 if (err < 0)
1205 return err; 1540 return err;
1206 1541
@@ -1212,7 +1547,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1212 char *name; 1547 char *name;
1213 unsigned long flags = 0; 1548 unsigned long flags = 0;
1214 1549
1215 if (!capable(CAP_NET_ADMIN)) 1550 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1216 return -EPERM; 1551 return -EPERM;
1217 err = security_tun_dev_create(); 1552 err = security_tun_dev_create();
1218 if (err < 0) 1553 if (err < 0)
@@ -1233,8 +1568,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1233 if (*ifr->ifr_name) 1568 if (*ifr->ifr_name)
1234 name = ifr->ifr_name; 1569 name = ifr->ifr_name;
1235 1570
1236 dev = alloc_netdev(sizeof(struct tun_struct), name, 1571 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1237 tun_setup); 1572 tun_setup,
1573 MAX_TAP_QUEUES, MAX_TAP_QUEUES);
1238 if (!dev) 1574 if (!dev)
1239 return -ENOMEM; 1575 return -ENOMEM;
1240 1576
@@ -1246,46 +1582,38 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1246 tun->flags = flags; 1582 tun->flags = flags;
1247 tun->txflt.count = 0; 1583 tun->txflt.count = 0;
1248 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); 1584 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1249 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
1250
1251 err = -ENOMEM;
1252 sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
1253 if (!sk)
1254 goto err_free_dev;
1255 1585
1256 sk_change_net(sk, net); 1586 tun->filter_attached = false;
1257 tun->socket.wq = &tun->wq; 1587 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
1258 init_waitqueue_head(&tun->wq.wait);
1259 tun->socket.ops = &tun_socket_ops;
1260 sock_init_data(&tun->socket, sk);
1261 sk->sk_write_space = tun_sock_write_space;
1262 sk->sk_sndbuf = INT_MAX;
1263 sock_set_flag(sk, SOCK_ZEROCOPY);
1264 1588
1265 tun_sk(sk)->tun = tun; 1589 spin_lock_init(&tun->lock);
1266 1590
1267 security_tun_dev_post_create(sk); 1591 security_tun_dev_post_create(&tfile->sk);
1268 1592
1269 tun_net_init(dev); 1593 tun_net_init(dev);
1270 1594
1595 err = tun_flow_init(tun);
1596 if (err < 0)
1597 goto err_free_dev;
1598
1271 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | 1599 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1272 TUN_USER_FEATURES; 1600 TUN_USER_FEATURES;
1273 dev->features = dev->hw_features; 1601 dev->features = dev->hw_features;
1274 1602
1603 err = tun_attach(tun, file);
1604 if (err < 0)
1605 goto err_free_dev;
1606
1275 err = register_netdevice(tun->dev); 1607 err = register_netdevice(tun->dev);
1276 if (err < 0) 1608 if (err < 0)
1277 goto err_free_sk; 1609 goto err_free_dev;
1278 1610
1279 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || 1611 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1280 device_create_file(&tun->dev->dev, &dev_attr_owner) || 1612 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1281 device_create_file(&tun->dev->dev, &dev_attr_group)) 1613 device_create_file(&tun->dev->dev, &dev_attr_group))
1282 pr_err("Failed to create tun sysfs files\n"); 1614 pr_err("Failed to create tun sysfs files\n");
1283 1615
1284 sk->sk_destruct = tun_sock_destruct; 1616 netif_carrier_on(tun->dev);
1285
1286 err = tun_attach(tun, file);
1287 if (err < 0)
1288 goto failed;
1289 } 1617 }
1290 1618
1291 tun_debug(KERN_INFO, tun, "tun_set_iff\n"); 1619 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
@@ -1295,6 +1623,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1295 else 1623 else
1296 tun->flags &= ~TUN_NO_PI; 1624 tun->flags &= ~TUN_NO_PI;
1297 1625
1626 /* This flag has no real effect. We track the value for backwards
1627 * compatibility.
1628 */
1298 if (ifr->ifr_flags & IFF_ONE_QUEUE) 1629 if (ifr->ifr_flags & IFF_ONE_QUEUE)
1299 tun->flags |= TUN_ONE_QUEUE; 1630 tun->flags |= TUN_ONE_QUEUE;
1300 else 1631 else
@@ -1305,24 +1636,26 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1305 else 1636 else
1306 tun->flags &= ~TUN_VNET_HDR; 1637 tun->flags &= ~TUN_VNET_HDR;
1307 1638
1639 if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1640 tun->flags |= TUN_TAP_MQ;
1641 else
1642 tun->flags &= ~TUN_TAP_MQ;
1643
1308 /* Make sure persistent devices do not get stuck in 1644 /* Make sure persistent devices do not get stuck in
1309 * xoff state. 1645 * xoff state.
1310 */ 1646 */
1311 if (netif_running(tun->dev)) 1647 if (netif_running(tun->dev))
1312 netif_wake_queue(tun->dev); 1648 netif_tx_wake_all_queues(tun->dev);
1313 1649
1314 strcpy(ifr->ifr_name, tun->dev->name); 1650 strcpy(ifr->ifr_name, tun->dev->name);
1315 return 0; 1651 return 0;
1316 1652
1317 err_free_sk:
1318 tun_free_netdev(dev);
1319 err_free_dev: 1653 err_free_dev:
1320 free_netdev(dev); 1654 free_netdev(dev);
1321 failed:
1322 return err; 1655 return err;
1323} 1656}
1324 1657
1325static int tun_get_iff(struct net *net, struct tun_struct *tun, 1658static void tun_get_iff(struct net *net, struct tun_struct *tun,
1326 struct ifreq *ifr) 1659 struct ifreq *ifr)
1327{ 1660{
1328 tun_debug(KERN_INFO, tun, "tun_get_iff\n"); 1661 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
@@ -1331,7 +1664,6 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
1331 1664
1332 ifr->ifr_flags = tun_flags(tun); 1665 ifr->ifr_flags = tun_flags(tun);
1333 1666
1334 return 0;
1335} 1667}
1336 1668
1337/* This is like a cut-down ethtool ops, except done via tun fd so no 1669/* This is like a cut-down ethtool ops, except done via tun fd so no
@@ -1373,13 +1705,91 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1373 return 0; 1705 return 0;
1374} 1706}
1375 1707
1708static void tun_detach_filter(struct tun_struct *tun, int n)
1709{
1710 int i;
1711 struct tun_file *tfile;
1712
1713 for (i = 0; i < n; i++) {
1714 tfile = rcu_dereference_protected(tun->tfiles[i],
1715 lockdep_rtnl_is_held());
1716 sk_detach_filter(tfile->socket.sk);
1717 }
1718
1719 tun->filter_attached = false;
1720}
1721
1722static int tun_attach_filter(struct tun_struct *tun)
1723{
1724 int i, ret = 0;
1725 struct tun_file *tfile;
1726
1727 for (i = 0; i < tun->numqueues; i++) {
1728 tfile = rcu_dereference_protected(tun->tfiles[i],
1729 lockdep_rtnl_is_held());
1730 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1731 if (ret) {
1732 tun_detach_filter(tun, i);
1733 return ret;
1734 }
1735 }
1736
1737 tun->filter_attached = true;
1738 return ret;
1739}
1740
1741static void tun_set_sndbuf(struct tun_struct *tun)
1742{
1743 struct tun_file *tfile;
1744 int i;
1745
1746 for (i = 0; i < tun->numqueues; i++) {
1747 tfile = rcu_dereference_protected(tun->tfiles[i],
1748 lockdep_rtnl_is_held());
1749 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1750 }
1751}
1752
1753static int tun_set_queue(struct file *file, struct ifreq *ifr)
1754{
1755 struct tun_file *tfile = file->private_data;
1756 struct tun_struct *tun;
1757 struct net_device *dev;
1758 int ret = 0;
1759
1760 rtnl_lock();
1761
1762 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1763 dev = __dev_get_by_name(tfile->net, ifr->ifr_name);
1764 if (!dev) {
1765 ret = -EINVAL;
1766 goto unlock;
1767 }
1768
1769 tun = netdev_priv(dev);
1770 if (dev->netdev_ops != &tap_netdev_ops &&
1771 dev->netdev_ops != &tun_netdev_ops)
1772 ret = -EINVAL;
1773 else if (tun_not_capable(tun))
1774 ret = -EPERM;
1775 else
1776 ret = tun_attach(tun, file);
1777 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE)
1778 __tun_detach(tfile, false);
1779 else
1780 ret = -EINVAL;
1781
1782unlock:
1783 rtnl_unlock();
1784 return ret;
1785}
1786
1376static long __tun_chr_ioctl(struct file *file, unsigned int cmd, 1787static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1377 unsigned long arg, int ifreq_len) 1788 unsigned long arg, int ifreq_len)
1378{ 1789{
1379 struct tun_file *tfile = file->private_data; 1790 struct tun_file *tfile = file->private_data;
1380 struct tun_struct *tun; 1791 struct tun_struct *tun;
1381 void __user* argp = (void __user*)arg; 1792 void __user* argp = (void __user*)arg;
1382 struct sock_fprog fprog;
1383 struct ifreq ifr; 1793 struct ifreq ifr;
1384 kuid_t owner; 1794 kuid_t owner;
1385 kgid_t group; 1795 kgid_t group;
@@ -1387,7 +1797,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1387 int vnet_hdr_sz; 1797 int vnet_hdr_sz;
1388 int ret; 1798 int ret;
1389 1799
1390 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) { 1800 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
1391 if (copy_from_user(&ifr, argp, ifreq_len)) 1801 if (copy_from_user(&ifr, argp, ifreq_len))
1392 return -EFAULT; 1802 return -EFAULT;
1393 } else { 1803 } else {
@@ -1398,10 +1808,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1398 * This is needed because we never checked for invalid flags on 1808 * This is needed because we never checked for invalid flags on
1399 * TUNSETIFF. */ 1809 * TUNSETIFF. */
1400 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | 1810 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1401 IFF_VNET_HDR, 1811 IFF_VNET_HDR | IFF_MULTI_QUEUE,
1402 (unsigned int __user*)argp); 1812 (unsigned int __user*)argp);
1403 } 1813 } else if (cmd == TUNSETQUEUE)
1814 return tun_set_queue(file, &ifr);
1404 1815
1816 ret = 0;
1405 rtnl_lock(); 1817 rtnl_lock();
1406 1818
1407 tun = __tun_get(tfile); 1819 tun = __tun_get(tfile);
@@ -1422,14 +1834,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1422 if (!tun) 1834 if (!tun)
1423 goto unlock; 1835 goto unlock;
1424 1836
1425 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd); 1837 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1426 1838
1427 ret = 0; 1839 ret = 0;
1428 switch (cmd) { 1840 switch (cmd) {
1429 case TUNGETIFF: 1841 case TUNGETIFF:
1430 ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr); 1842 tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1431 if (ret)
1432 break;
1433 1843
1434 if (copy_to_user(argp, &ifr, ifreq_len)) 1844 if (copy_to_user(argp, &ifr, ifreq_len))
1435 ret = -EFAULT; 1845 ret = -EFAULT;
@@ -1444,11 +1854,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1444 break; 1854 break;
1445 1855
1446 case TUNSETPERSIST: 1856 case TUNSETPERSIST:
1447 /* Disable/Enable persist mode */ 1857 /* Disable/Enable persist mode. Keep an extra reference to the
1448 if (arg) 1858 * module to prevent the module being unprobed.
1859 */
1860 if (arg) {
1449 tun->flags |= TUN_PERSIST; 1861 tun->flags |= TUN_PERSIST;
1450 else 1862 __module_get(THIS_MODULE);
1863 } else {
1451 tun->flags &= ~TUN_PERSIST; 1864 tun->flags &= ~TUN_PERSIST;
1865 module_put(THIS_MODULE);
1866 }
1452 1867
1453 tun_debug(KERN_INFO, tun, "persist %s\n", 1868 tun_debug(KERN_INFO, tun, "persist %s\n",
1454 arg ? "enabled" : "disabled"); 1869 arg ? "enabled" : "disabled");
@@ -1462,7 +1877,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1462 break; 1877 break;
1463 } 1878 }
1464 tun->owner = owner; 1879 tun->owner = owner;
1465 tun_debug(KERN_INFO, tun, "owner set to %d\n", 1880 tun_debug(KERN_INFO, tun, "owner set to %u\n",
1466 from_kuid(&init_user_ns, tun->owner)); 1881 from_kuid(&init_user_ns, tun->owner));
1467 break; 1882 break;
1468 1883
@@ -1474,7 +1889,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1474 break; 1889 break;
1475 } 1890 }
1476 tun->group = group; 1891 tun->group = group;
1477 tun_debug(KERN_INFO, tun, "group set to %d\n", 1892 tun_debug(KERN_INFO, tun, "group set to %u\n",
1478 from_kgid(&init_user_ns, tun->group)); 1893 from_kgid(&init_user_ns, tun->group));
1479 break; 1894 break;
1480 1895
@@ -1526,7 +1941,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1526 break; 1941 break;
1527 1942
1528 case TUNGETSNDBUF: 1943 case TUNGETSNDBUF:
1529 sndbuf = tun->socket.sk->sk_sndbuf; 1944 sndbuf = tfile->socket.sk->sk_sndbuf;
1530 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) 1945 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
1531 ret = -EFAULT; 1946 ret = -EFAULT;
1532 break; 1947 break;
@@ -1537,7 +1952,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1537 break; 1952 break;
1538 } 1953 }
1539 1954
1540 tun->socket.sk->sk_sndbuf = sndbuf; 1955 tun->sndbuf = sndbuf;
1956 tun_set_sndbuf(tun);
1541 break; 1957 break;
1542 1958
1543 case TUNGETVNETHDRSZ: 1959 case TUNGETVNETHDRSZ:
@@ -1565,10 +1981,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1565 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 1981 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1566 break; 1982 break;
1567 ret = -EFAULT; 1983 ret = -EFAULT;
1568 if (copy_from_user(&fprog, argp, sizeof(fprog))) 1984 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
1569 break; 1985 break;
1570 1986
1571 ret = sk_attach_filter(&fprog, tun->socket.sk); 1987 ret = tun_attach_filter(tun);
1572 break; 1988 break;
1573 1989
1574 case TUNDETACHFILTER: 1990 case TUNDETACHFILTER:
@@ -1576,7 +1992,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1576 ret = -EINVAL; 1992 ret = -EINVAL;
1577 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) 1993 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1578 break; 1994 break;
1579 ret = sk_detach_filter(tun->socket.sk); 1995 ret = 0;
1996 tun_detach_filter(tun, tun->numqueues);
1580 break; 1997 break;
1581 1998
1582 default: 1999 default:
@@ -1628,27 +2045,21 @@ static long tun_chr_compat_ioctl(struct file *file,
1628 2045
1629static int tun_chr_fasync(int fd, struct file *file, int on) 2046static int tun_chr_fasync(int fd, struct file *file, int on)
1630{ 2047{
1631 struct tun_struct *tun = tun_get(file); 2048 struct tun_file *tfile = file->private_data;
1632 int ret; 2049 int ret;
1633 2050
1634 if (!tun) 2051 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
1635 return -EBADFD;
1636
1637 tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
1638
1639 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1640 goto out; 2052 goto out;
1641 2053
1642 if (on) { 2054 if (on) {
1643 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0); 2055 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
1644 if (ret) 2056 if (ret)
1645 goto out; 2057 goto out;
1646 tun->flags |= TUN_FASYNC; 2058 tfile->flags |= TUN_FASYNC;
1647 } else 2059 } else
1648 tun->flags &= ~TUN_FASYNC; 2060 tfile->flags &= ~TUN_FASYNC;
1649 ret = 0; 2061 ret = 0;
1650out: 2062out:
1651 tun_put(tun);
1652 return ret; 2063 return ret;
1653} 2064}
1654 2065
@@ -1658,44 +2069,39 @@ static int tun_chr_open(struct inode *inode, struct file * file)
1658 2069
1659 DBG1(KERN_INFO, "tunX: tun_chr_open\n"); 2070 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
1660 2071
1661 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 2072 tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
2073 &tun_proto);
1662 if (!tfile) 2074 if (!tfile)
1663 return -ENOMEM; 2075 return -ENOMEM;
1664 atomic_set(&tfile->count, 0); 2076 rcu_assign_pointer(tfile->tun, NULL);
1665 tfile->tun = NULL;
1666 tfile->net = get_net(current->nsproxy->net_ns); 2077 tfile->net = get_net(current->nsproxy->net_ns);
2078 tfile->flags = 0;
2079
2080 rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2081 init_waitqueue_head(&tfile->wq.wait);
2082
2083 tfile->socket.file = file;
2084 tfile->socket.ops = &tun_socket_ops;
2085
2086 sock_init_data(&tfile->socket, &tfile->sk);
2087 sk_change_net(&tfile->sk, tfile->net);
2088
2089 tfile->sk.sk_write_space = tun_sock_write_space;
2090 tfile->sk.sk_sndbuf = INT_MAX;
2091
1667 file->private_data = tfile; 2092 file->private_data = tfile;
2093 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2094
1668 return 0; 2095 return 0;
1669} 2096}
1670 2097
1671static int tun_chr_close(struct inode *inode, struct file *file) 2098static int tun_chr_close(struct inode *inode, struct file *file)
1672{ 2099{
1673 struct tun_file *tfile = file->private_data; 2100 struct tun_file *tfile = file->private_data;
1674 struct tun_struct *tun; 2101 struct net *net = tfile->net;
1675
1676 tun = __tun_get(tfile);
1677 if (tun) {
1678 struct net_device *dev = tun->dev;
1679
1680 tun_debug(KERN_INFO, tun, "tun_chr_close\n");
1681
1682 __tun_detach(tun);
1683
1684 /* If desirable, unregister the netdevice. */
1685 if (!(tun->flags & TUN_PERSIST)) {
1686 rtnl_lock();
1687 if (dev->reg_state == NETREG_REGISTERED)
1688 unregister_netdevice(dev);
1689 rtnl_unlock();
1690 }
1691 }
1692 2102
1693 tun = tfile->tun; 2103 tun_detach(tfile, true);
1694 if (tun) 2104 put_net(net);
1695 sock_put(tun->socket.sk);
1696
1697 put_net(tfile->net);
1698 kfree(tfile);
1699 2105
1700 return 0; 2106 return 0;
1701} 2107}
@@ -1822,14 +2228,13 @@ static void tun_cleanup(void)
1822 * holding a reference to the file for as long as the socket is in use. */ 2228 * holding a reference to the file for as long as the socket is in use. */
1823struct socket *tun_get_socket(struct file *file) 2229struct socket *tun_get_socket(struct file *file)
1824{ 2230{
1825 struct tun_struct *tun; 2231 struct tun_file *tfile;
1826 if (file->f_op != &tun_fops) 2232 if (file->f_op != &tun_fops)
1827 return ERR_PTR(-EINVAL); 2233 return ERR_PTR(-EINVAL);
1828 tun = tun_get(file); 2234 tfile = file->private_data;
1829 if (!tun) 2235 if (!tfile)
1830 return ERR_PTR(-EBADFD); 2236 return ERR_PTR(-EBADFD);
1831 tun_put(tun); 2237 return &tfile->socket;
1832 return &tun->socket;
1833} 2238}
1834EXPORT_SYMBOL_GPL(tun_get_socket); 2239EXPORT_SYMBOL_GPL(tun_get_socket);
1835 2240
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c1ae76968f47..ef976215b649 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -219,6 +219,24 @@ config USB_NET_CDC_NCM
219 * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design) 219 * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
220 * Ericsson F5521gw Mobile Broadband Module 220 * Ericsson F5521gw Mobile Broadband Module
221 221
222config USB_NET_CDC_MBIM
223 tristate "CDC MBIM support"
224 depends on USB_USBNET
225 select USB_WDM
226 select USB_NET_CDC_NCM
227 help
228 This driver provides support for CDC MBIM (Mobile Broadband
229 Interface Model) devices. The CDC MBIM specification is
230 available from <http://www.usb.org/>.
231
232 MBIM devices require configuration using the management
233 protocol defined by the MBIM specification. This driver
234 provides unfiltered access to the MBIM control channel
235 through the associated /dev/cdc-wdmx character device.
236
237 To compile this driver as a module, choose M here: the
238 module will be called cdc_mbim.
239
222config USB_NET_DM9601 240config USB_NET_DM9601
223 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 241 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
224 depends on USB_USBNET 242 depends on USB_USBNET
@@ -230,6 +248,8 @@ config USB_NET_DM9601
230config USB_NET_SMSC75XX 248config USB_NET_SMSC75XX
231 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices" 249 tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
232 depends on USB_USBNET 250 depends on USB_USBNET
251 select BITREVERSE
252 select CRC16
233 select CRC32 253 select CRC32
234 help 254 help
235 This option adds support for SMSC LAN95XX based USB 2.0 255 This option adds support for SMSC LAN95XX based USB 2.0
@@ -238,6 +258,8 @@ config USB_NET_SMSC75XX
238config USB_NET_SMSC95XX 258config USB_NET_SMSC95XX
239 tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices" 259 tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
240 depends on USB_USBNET 260 depends on USB_USBNET
261 select BITREVERSE
262 select CRC16
241 select CRC32 263 select CRC32
242 help 264 help
243 This option adds support for SMSC LAN95XX based USB 2.0 265 This option adds support for SMSC LAN95XX based USB 2.0
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index bf063008c1af..478691326f37 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -31,4 +31,5 @@ obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
31obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o 31obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
32obj-$(CONFIG_USB_VL600) += lg-vl600.o 32obj-$(CONFIG_USB_VL600) += lg-vl600.o
33obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o 33obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
34obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
34 35
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 774d9ce2dafc..50d167330d38 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -25,121 +25,30 @@
25int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, 25int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
26 u16 size, void *data) 26 u16 size, void *data)
27{ 27{
28 void *buf; 28 int ret;
29 int err = -ENOMEM; 29 ret = usbnet_read_cmd(dev, cmd,
30 30 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
31 netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n", 31 value, index, data, size);
32 cmd, value, index, size);
33
34 buf = kmalloc(size, GFP_KERNEL);
35 if (!buf)
36 goto out;
37
38 err = usb_control_msg(
39 dev->udev,
40 usb_rcvctrlpipe(dev->udev, 0),
41 cmd,
42 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
43 value,
44 index,
45 buf,
46 size,
47 USB_CTRL_GET_TIMEOUT);
48 if (err == size)
49 memcpy(data, buf, size);
50 else if (err >= 0)
51 err = -EINVAL;
52 kfree(buf);
53 32
54out: 33 if (ret != size && ret >= 0)
55 return err; 34 return -EINVAL;
35 return ret;
56} 36}
57 37
58int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, 38int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
59 u16 size, void *data) 39 u16 size, void *data)
60{ 40{
61 void *buf = NULL; 41 return usbnet_write_cmd(dev, cmd,
62 int err = -ENOMEM; 42 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
63 43 value, index, data, size);
64 netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
65 cmd, value, index, size);
66
67 if (data) {
68 buf = kmemdup(data, size, GFP_KERNEL);
69 if (!buf)
70 goto out;
71 }
72
73 err = usb_control_msg(
74 dev->udev,
75 usb_sndctrlpipe(dev->udev, 0),
76 cmd,
77 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
78 value,
79 index,
80 buf,
81 size,
82 USB_CTRL_SET_TIMEOUT);
83 kfree(buf);
84
85out:
86 return err;
87}
88
89static void asix_async_cmd_callback(struct urb *urb)
90{
91 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
92 int status = urb->status;
93
94 if (status < 0)
95 printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
96 status);
97
98 kfree(req);
99 usb_free_urb(urb);
100} 44}
101 45
102void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index, 46void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
103 u16 size, void *data) 47 u16 size, void *data)
104{ 48{
105 struct usb_ctrlrequest *req; 49 usbnet_write_cmd_async(dev, cmd,
106 int status; 50 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
107 struct urb *urb; 51 value, index, data, size);
108
109 netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
110 cmd, value, index, size);
111
112 urb = usb_alloc_urb(0, GFP_ATOMIC);
113 if (!urb) {
114 netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
115 return;
116 }
117
118 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
119 if (!req) {
120 netdev_err(dev->net, "Failed to allocate memory for control request\n");
121 usb_free_urb(urb);
122 return;
123 }
124
125 req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
126 req->bRequest = cmd;
127 req->wValue = cpu_to_le16(value);
128 req->wIndex = cpu_to_le16(index);
129 req->wLength = cpu_to_le16(size);
130
131 usb_fill_control_urb(urb, dev->udev,
132 usb_sndctrlpipe(dev->udev, 0),
133 (void *)req, data, size,
134 asix_async_cmd_callback, req);
135
136 status = usb_submit_urb(urb, GFP_ATOMIC);
137 if (status < 0) {
138 netdev_err(dev->net, "Error submitting the control message: status=%d\n",
139 status);
140 kfree(req);
141 usb_free_urb(urb);
142 }
143} 52}
144 53
145int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 54int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 33ab824773c5..7a6e758f48e7 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -64,6 +64,16 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
64 } 64 }
65} 65}
66 66
67static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr)
68{
69 if (is_valid_ether_addr(addr)) {
70 memcpy(dev->net->dev_addr, addr, ETH_ALEN);
71 } else {
72 netdev_info(dev->net, "invalid hw address, using random\n");
73 eth_hw_addr_random(dev->net);
74 }
75}
76
67/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */ 77/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
68static u32 asix_get_phyid(struct usbnet *dev) 78static u32 asix_get_phyid(struct usbnet *dev)
69{ 79{
@@ -225,7 +235,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
225 ret); 235 ret);
226 goto out; 236 goto out;
227 } 237 }
228 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 238
239 asix_set_netdev_dev_addr(dev, buf);
229 240
230 /* Initialize MII structure */ 241 /* Initialize MII structure */
231 dev->mii.dev = dev->net; 242 dev->mii.dev = dev->net;
@@ -423,7 +434,8 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
423 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); 434 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
424 return ret; 435 return ret;
425 } 436 }
426 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 437
438 asix_set_netdev_dev_addr(dev, buf);
427 439
428 /* Initialize MII structure */ 440 /* Initialize MII structure */
429 dev->mii.dev = dev->net; 441 dev->mii.dev = dev->net;
@@ -777,7 +789,8 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
777 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); 789 netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
778 return ret; 790 return ret;
779 } 791 }
780 memcpy(dev->net->dev_addr, buf, ETH_ALEN); 792
793 asix_set_netdev_dev_addr(dev, buf);
781 794
782 /* Initialize MII structure */ 795 /* Initialize MII structure */
783 dev->mii.dev = dev->net; 796 dev->mii.dev = dev->net;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
new file mode 100644
index 000000000000..42f51c71ec1f
--- /dev/null
+++ b/drivers/net/usb/cdc_mbim.c
@@ -0,0 +1,412 @@
1/*
2 * Copyright (c) 2012 Smith Micro Software, Inc.
3 * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
4 *
5 * This driver is based on and reuse most of cdc_ncm, which is
6 * Copyright (C) ST-Ericsson 2010-2012
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/ethtool.h>
16#include <linux/if_vlan.h>
17#include <linux/ip.h>
18#include <linux/mii.h>
19#include <linux/usb.h>
20#include <linux/usb/cdc.h>
21#include <linux/usb/usbnet.h>
22#include <linux/usb/cdc-wdm.h>
23#include <linux/usb/cdc_ncm.h>
24
25/* driver specific data - must match cdc_ncm usage */
26struct cdc_mbim_state {
27 struct cdc_ncm_ctx *ctx;
28 atomic_t pmcount;
29 struct usb_driver *subdriver;
30 struct usb_interface *control;
31 struct usb_interface *data;
32};
33
34/* using a counter to merge subdriver requests with our own into a combined state */
35static int cdc_mbim_manage_power(struct usbnet *dev, int on)
36{
37 struct cdc_mbim_state *info = (void *)&dev->data;
38 int rv = 0;
39
40 dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
41
42 if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
43 /* need autopm_get/put here to ensure the usbcore sees the new value */
44 rv = usb_autopm_get_interface(dev->intf);
45 if (rv < 0)
46 goto err;
47 dev->intf->needs_remote_wakeup = on;
48 usb_autopm_put_interface(dev->intf);
49 }
50err:
51 return rv;
52}
53
54static int cdc_mbim_wdm_manage_power(struct usb_interface *intf, int status)
55{
56 struct usbnet *dev = usb_get_intfdata(intf);
57
58 /* can be called while disconnecting */
59 if (!dev)
60 return 0;
61
62 return cdc_mbim_manage_power(dev, status);
63}
64
65
66static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
67{
68 struct cdc_ncm_ctx *ctx;
69 struct usb_driver *subdriver = ERR_PTR(-ENODEV);
70 int ret = -ENODEV;
71 u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM;
72 struct cdc_mbim_state *info = (void *)&dev->data;
73
74 /* see if interface supports MBIM alternate setting */
75 if (intf->num_altsetting == 2) {
76 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
77 usb_set_interface(dev->udev,
78 intf->cur_altsetting->desc.bInterfaceNumber,
79 CDC_NCM_COMM_ALTSETTING_MBIM);
80 data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
81 }
82
83 /* Probably NCM, defer for cdc_ncm_bind */
84 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
85 goto err;
86
87 ret = cdc_ncm_bind_common(dev, intf, data_altsetting);
88 if (ret)
89 goto err;
90
91 ctx = info->ctx;
92
93 /* The MBIM descriptor and the status endpoint are required */
94 if (ctx->mbim_desc && dev->status)
95 subdriver = usb_cdc_wdm_register(ctx->control,
96 &dev->status->desc,
97 le16_to_cpu(ctx->mbim_desc->wMaxControlMessage),
98 cdc_mbim_wdm_manage_power);
99 if (IS_ERR(subdriver)) {
100 ret = PTR_ERR(subdriver);
101 cdc_ncm_unbind(dev, intf);
102 goto err;
103 }
104
105 /* can't let usbnet use the interrupt endpoint */
106 dev->status = NULL;
107 info->subdriver = subdriver;
108
109 /* MBIM cannot do ARP */
110 dev->net->flags |= IFF_NOARP;
111
112 /* no need to put the VLAN tci in the packet headers */
113 dev->net->features |= NETIF_F_HW_VLAN_TX;
114err:
115 return ret;
116}
117
118static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf)
119{
120 struct cdc_mbim_state *info = (void *)&dev->data;
121 struct cdc_ncm_ctx *ctx = info->ctx;
122
123 /* disconnect subdriver from control interface */
124 if (info->subdriver && info->subdriver->disconnect)
125 info->subdriver->disconnect(ctx->control);
126 info->subdriver = NULL;
127
128 /* let NCM unbind clean up both control and data interface */
129 cdc_ncm_unbind(dev, intf);
130}
131
132
133static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
134{
135 struct sk_buff *skb_out;
136 struct cdc_mbim_state *info = (void *)&dev->data;
137 struct cdc_ncm_ctx *ctx = info->ctx;
138 __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
139 u16 tci = 0;
140 u8 *c;
141
142 if (!ctx)
143 goto error;
144
145 if (skb) {
146 if (skb->len <= sizeof(ETH_HLEN))
147 goto error;
148
149 /* mapping VLANs to MBIM sessions:
150 * no tag => IPS session <0>
151 * 1 - 255 => IPS session <vlanid>
152 * 256 - 511 => DSS session <vlanid - 256>
153 * 512 - 4095 => unsupported, drop
154 */
155 vlan_get_tag(skb, &tci);
156
157 switch (tci & 0x0f00) {
158 case 0x0000: /* VLAN ID 0 - 255 */
159 /* verify that datagram is IPv4 or IPv6 */
160 skb_reset_mac_header(skb);
161 switch (eth_hdr(skb)->h_proto) {
162 case htons(ETH_P_IP):
163 case htons(ETH_P_IPV6):
164 break;
165 default:
166 goto error;
167 }
168 c = (u8 *)&sign;
169 c[3] = tci;
170 break;
171 case 0x0100: /* VLAN ID 256 - 511 */
172 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
173 c = (u8 *)&sign;
174 c[3] = tci;
175 break;
176 default:
177 netif_err(dev, tx_err, dev->net,
178 "unsupported tci=0x%04x\n", tci);
179 goto error;
180 }
181 skb_pull(skb, ETH_HLEN);
182 }
183
184 spin_lock_bh(&ctx->mtx);
185 skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign);
186 spin_unlock_bh(&ctx->mtx);
187 return skb_out;
188
189error:
190 if (skb)
191 dev_kfree_skb_any(skb);
192
193 return NULL;
194}
195
196static struct sk_buff *cdc_mbim_process_dgram(struct usbnet *dev, u8 *buf, size_t len, u16 tci)
197{
198 __be16 proto = htons(ETH_P_802_3);
199 struct sk_buff *skb = NULL;
200
201 if (tci < 256) { /* IPS session? */
202 if (len < sizeof(struct iphdr))
203 goto err;
204
205 switch (*buf & 0xf0) {
206 case 0x40:
207 proto = htons(ETH_P_IP);
208 break;
209 case 0x60:
210 proto = htons(ETH_P_IPV6);
211 break;
212 default:
213 goto err;
214 }
215 }
216
217 skb = netdev_alloc_skb_ip_align(dev->net, len + ETH_HLEN);
218 if (!skb)
219 goto err;
220
221 /* add an ethernet header */
222 skb_put(skb, ETH_HLEN);
223 skb_reset_mac_header(skb);
224 eth_hdr(skb)->h_proto = proto;
225 memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
226 memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
227
228 /* add datagram */
229 memcpy(skb_put(skb, len), buf, len);
230
231 /* map MBIM session to VLAN */
232 if (tci)
233 vlan_put_tag(skb, tci);
234err:
235 return skb;
236}
237
238static int cdc_mbim_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
239{
240 struct sk_buff *skb;
241 struct cdc_mbim_state *info = (void *)&dev->data;
242 struct cdc_ncm_ctx *ctx = info->ctx;
243 int len;
244 int nframes;
245 int x;
246 int offset;
247 struct usb_cdc_ncm_ndp16 *ndp16;
248 struct usb_cdc_ncm_dpe16 *dpe16;
249 int ndpoffset;
250 int loopcount = 50; /* arbitrary max preventing infinite loop */
251 u8 *c;
252 u16 tci;
253
254 ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
255 if (ndpoffset < 0)
256 goto error;
257
258next_ndp:
259 nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
260 if (nframes < 0)
261 goto error;
262
263 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
264
265 switch (ndp16->dwSignature & cpu_to_le32(0x00ffffff)) {
266 case cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN):
267 c = (u8 *)&ndp16->dwSignature;
268 tci = c[3];
269 break;
270 case cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN):
271 c = (u8 *)&ndp16->dwSignature;
272 tci = c[3] + 256;
273 break;
274 default:
275 netif_dbg(dev, rx_err, dev->net,
276 "unsupported NDP signature <0x%08x>\n",
277 le32_to_cpu(ndp16->dwSignature));
278 goto err_ndp;
279
280 }
281
282 dpe16 = ndp16->dpe16;
283 for (x = 0; x < nframes; x++, dpe16++) {
284 offset = le16_to_cpu(dpe16->wDatagramIndex);
285 len = le16_to_cpu(dpe16->wDatagramLength);
286
287 /*
288 * CDC NCM ch. 3.7
289 * All entries after first NULL entry are to be ignored
290 */
291 if ((offset == 0) || (len == 0)) {
292 if (!x)
293 goto err_ndp; /* empty NTB */
294 break;
295 }
296
297 /* sanity checking */
298 if (((offset + len) > skb_in->len) || (len > ctx->rx_max)) {
299 netif_dbg(dev, rx_err, dev->net,
300 "invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
301 x, offset, len, skb_in);
302 if (!x)
303 goto err_ndp;
304 break;
305 } else {
306 skb = cdc_mbim_process_dgram(dev, skb_in->data + offset, len, tci);
307 if (!skb)
308 goto error;
309 usbnet_skb_return(dev, skb);
310 }
311 }
312err_ndp:
313 /* are there more NDPs to process? */
314 ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
315 if (ndpoffset && loopcount--)
316 goto next_ndp;
317
318 return 1;
319error:
320 return 0;
321}
322
323static int cdc_mbim_suspend(struct usb_interface *intf, pm_message_t message)
324{
325 int ret = 0;
326 struct usbnet *dev = usb_get_intfdata(intf);
327 struct cdc_mbim_state *info = (void *)&dev->data;
328 struct cdc_ncm_ctx *ctx = info->ctx;
329
330 if (ctx == NULL) {
331 ret = -1;
332 goto error;
333 }
334
335 ret = usbnet_suspend(intf, message);
336 if (ret < 0)
337 goto error;
338
339 if (intf == ctx->control && info->subdriver && info->subdriver->suspend)
340 ret = info->subdriver->suspend(intf, message);
341 if (ret < 0)
342 usbnet_resume(intf);
343
344error:
345 return ret;
346}
347
348static int cdc_mbim_resume(struct usb_interface *intf)
349{
350 int ret = 0;
351 struct usbnet *dev = usb_get_intfdata(intf);
352 struct cdc_mbim_state *info = (void *)&dev->data;
353 struct cdc_ncm_ctx *ctx = info->ctx;
354 bool callsub = (intf == ctx->control && info->subdriver && info->subdriver->resume);
355
356 if (callsub)
357 ret = info->subdriver->resume(intf);
358 if (ret < 0)
359 goto err;
360 ret = usbnet_resume(intf);
361 if (ret < 0 && callsub && info->subdriver->suspend)
362 info->subdriver->suspend(intf, PMSG_SUSPEND);
363err:
364 return ret;
365}
366
367static const struct driver_info cdc_mbim_info = {
368 .description = "CDC MBIM",
369 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
370 .bind = cdc_mbim_bind,
371 .unbind = cdc_mbim_unbind,
372 .manage_power = cdc_mbim_manage_power,
373 .rx_fixup = cdc_mbim_rx_fixup,
374 .tx_fixup = cdc_mbim_tx_fixup,
375};
376
377static const struct usb_device_id mbim_devs[] = {
378 /* This duplicate NCM entry is intentional. MBIM devices can
379 * be disguised as NCM by default, and this is necessary to
380 * allow us to bind the correct driver_info to such devices.
381 *
382 * bind() will sort out this for us, selecting the correct
383 * entry and reject the other
384 */
385 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
386 .driver_info = (unsigned long)&cdc_mbim_info,
387 },
388 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
389 .driver_info = (unsigned long)&cdc_mbim_info,
390 },
391 {
392 },
393};
394MODULE_DEVICE_TABLE(usb, mbim_devs);
395
396static struct usb_driver cdc_mbim_driver = {
397 .name = "cdc_mbim",
398 .id_table = mbim_devs,
399 .probe = usbnet_probe,
400 .disconnect = usbnet_disconnect,
401 .suspend = cdc_mbim_suspend,
402 .resume = cdc_mbim_resume,
403 .reset_resume = cdc_mbim_resume,
404 .supports_autosuspend = 1,
405 .disable_hub_initiated_lpm = 1,
406};
407module_usb_driver(cdc_mbim_driver);
408
409MODULE_AUTHOR("Greg Suarez <gsuarez@smithmicro.com>");
410MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
411MODULE_DESCRIPTION("USB CDC MBIM host driver");
412MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 74fab1a40156..d38bc20a60e2 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -51,90 +51,10 @@
51#include <linux/atomic.h> 51#include <linux/atomic.h>
52#include <linux/usb/usbnet.h> 52#include <linux/usb/usbnet.h>
53#include <linux/usb/cdc.h> 53#include <linux/usb/cdc.h>
54#include <linux/usb/cdc_ncm.h>
54 55
55#define DRIVER_VERSION "14-Mar-2012" 56#define DRIVER_VERSION "14-Mar-2012"
56 57
57/* CDC NCM subclass 3.2.1 */
58#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
59
60/* Maximum NTB length */
61#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */
62#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
63
64/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
65#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
66
67#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
68
69/* Default value for MaxDatagramSize */
70#define CDC_NCM_MAX_DATAGRAM_SIZE 8192 /* bytes */
71
72/*
73 * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
74 * the last NULL entry.
75 */
76#define CDC_NCM_DPT_DATAGRAMS_MAX 40
77
78/* Restart the timer, if amount of datagrams is less than given value */
79#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
80#define CDC_NCM_TIMER_PENDING_CNT 2
81#define CDC_NCM_TIMER_INTERVAL (400UL * NSEC_PER_USEC)
82
83/* The following macro defines the minimum header space */
84#define CDC_NCM_MIN_HDR_SIZE \
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87
88struct cdc_ncm_data {
89 struct usb_cdc_ncm_nth16 nth16;
90 struct usb_cdc_ncm_ndp16 ndp16;
91 struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
92};
93
94struct cdc_ncm_ctx {
95 struct cdc_ncm_data tx_ncm;
96 struct usb_cdc_ncm_ntb_parameters ncm_parm;
97 struct hrtimer tx_timer;
98 struct tasklet_struct bh;
99
100 const struct usb_cdc_ncm_desc *func_desc;
101 const struct usb_cdc_header_desc *header_desc;
102 const struct usb_cdc_union_desc *union_desc;
103 const struct usb_cdc_ether_desc *ether_desc;
104
105 struct net_device *netdev;
106 struct usb_device *udev;
107 struct usb_host_endpoint *in_ep;
108 struct usb_host_endpoint *out_ep;
109 struct usb_host_endpoint *status_ep;
110 struct usb_interface *intf;
111 struct usb_interface *control;
112 struct usb_interface *data;
113
114 struct sk_buff *tx_curr_skb;
115 struct sk_buff *tx_rem_skb;
116
117 spinlock_t mtx;
118 atomic_t stop;
119
120 u32 tx_timer_pending;
121 u32 tx_curr_offset;
122 u32 tx_curr_last_offset;
123 u32 tx_curr_frame_num;
124 u32 rx_speed;
125 u32 tx_speed;
126 u32 rx_max;
127 u32 tx_max;
128 u32 max_datagram_size;
129 u16 tx_max_datagrams;
130 u16 tx_remainder;
131 u16 tx_modulus;
132 u16 tx_ndp_modulus;
133 u16 tx_seq;
134 u16 rx_seq;
135 u16 connected;
136};
137
138static void cdc_ncm_txpath_bh(unsigned long param); 58static void cdc_ncm_txpath_bh(unsigned long param);
139static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); 59static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
140static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); 60static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
@@ -158,17 +78,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
158 u8 flags; 78 u8 flags;
159 u8 iface_no; 79 u8 iface_no;
160 int err; 80 int err;
81 int eth_hlen;
161 u16 ntb_fmt_supported; 82 u16 ntb_fmt_supported;
83 u32 min_dgram_size;
84 u32 min_hdr_size;
85 struct usbnet *dev = netdev_priv(ctx->netdev);
162 86
163 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; 87 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
164 88
165 err = usb_control_msg(ctx->udev, 89 err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
166 usb_rcvctrlpipe(ctx->udev, 0), 90 USB_TYPE_CLASS | USB_DIR_IN
167 USB_CDC_GET_NTB_PARAMETERS, 91 |USB_RECIP_INTERFACE,
168 USB_TYPE_CLASS | USB_DIR_IN 92 0, iface_no, &ctx->ncm_parm,
169 | USB_RECIP_INTERFACE, 93 sizeof(ctx->ncm_parm));
170 0, iface_no, &ctx->ncm_parm,
171 sizeof(ctx->ncm_parm), 10000);
172 if (err < 0) { 94 if (err < 0) {
173 pr_debug("failed GET_NTB_PARAMETERS\n"); 95 pr_debug("failed GET_NTB_PARAMETERS\n");
174 return 1; 96 return 1;
@@ -184,10 +106,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
184 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); 106 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
185 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); 107 ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
186 108
187 if (ctx->func_desc != NULL) 109 eth_hlen = ETH_HLEN;
110 min_dgram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
111 min_hdr_size = CDC_NCM_MIN_HDR_SIZE;
112 if (ctx->mbim_desc != NULL) {
113 flags = ctx->mbim_desc->bmNetworkCapabilities;
114 eth_hlen = 0;
115 min_dgram_size = CDC_MBIM_MIN_DATAGRAM_SIZE;
116 min_hdr_size = 0;
117 } else if (ctx->func_desc != NULL) {
188 flags = ctx->func_desc->bmNetworkCapabilities; 118 flags = ctx->func_desc->bmNetworkCapabilities;
189 else 119 } else {
190 flags = 0; 120 flags = 0;
121 }
191 122
192 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u " 123 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
193 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u " 124 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
@@ -215,49 +146,19 @@ static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
215 146
216 /* inform device about NTB input size changes */ 147 /* inform device about NTB input size changes */
217 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { 148 if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
149 __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
218 150
219 if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) { 151 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
220 struct usb_cdc_ncm_ndp_input_size *ndp_in_sz; 152 USB_TYPE_CLASS | USB_DIR_OUT
221 153 | USB_RECIP_INTERFACE,
222 ndp_in_sz = kzalloc(sizeof(*ndp_in_sz), GFP_KERNEL); 154 0, iface_no, &dwNtbInMaxSize, 4);
223 if (!ndp_in_sz) {
224 err = -ENOMEM;
225 goto size_err;
226 }
227
228 err = usb_control_msg(ctx->udev,
229 usb_sndctrlpipe(ctx->udev, 0),
230 USB_CDC_SET_NTB_INPUT_SIZE,
231 USB_TYPE_CLASS | USB_DIR_OUT
232 | USB_RECIP_INTERFACE,
233 0, iface_no, ndp_in_sz, 8, 1000);
234 kfree(ndp_in_sz);
235 } else {
236 __le32 *dwNtbInMaxSize;
237 dwNtbInMaxSize = kzalloc(sizeof(*dwNtbInMaxSize),
238 GFP_KERNEL);
239 if (!dwNtbInMaxSize) {
240 err = -ENOMEM;
241 goto size_err;
242 }
243 *dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
244
245 err = usb_control_msg(ctx->udev,
246 usb_sndctrlpipe(ctx->udev, 0),
247 USB_CDC_SET_NTB_INPUT_SIZE,
248 USB_TYPE_CLASS | USB_DIR_OUT
249 | USB_RECIP_INTERFACE,
250 0, iface_no, dwNtbInMaxSize, 4, 1000);
251 kfree(dwNtbInMaxSize);
252 }
253size_err:
254 if (err < 0) 155 if (err < 0)
255 pr_debug("Setting NTB Input Size failed\n"); 156 pr_debug("Setting NTB Input Size failed\n");
256 } 157 }
257 158
258 /* verify maximum size of transmitted NTB in bytes */ 159 /* verify maximum size of transmitted NTB in bytes */
259 if ((ctx->tx_max < 160 if ((ctx->tx_max <
260 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) || 161 (min_hdr_size + min_dgram_size)) ||
261 (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) { 162 (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
262 pr_debug("Using default maximum transmit length=%d\n", 163 pr_debug("Using default maximum transmit length=%d\n",
263 CDC_NCM_NTB_MAX_SIZE_TX); 164 CDC_NCM_NTB_MAX_SIZE_TX);
@@ -299,93 +200,85 @@ size_err:
299 } 200 }
300 201
301 /* adjust TX-remainder according to NCM specification. */ 202 /* adjust TX-remainder according to NCM specification. */
302 ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) & 203 ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) &
303 (ctx->tx_modulus - 1)); 204 (ctx->tx_modulus - 1));
304 205
305 /* additional configuration */ 206 /* additional configuration */
306 207
307 /* set CRC Mode */ 208 /* set CRC Mode */
308 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { 209 if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
309 err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0), 210 err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE,
310 USB_CDC_SET_CRC_MODE, 211 USB_TYPE_CLASS | USB_DIR_OUT
311 USB_TYPE_CLASS | USB_DIR_OUT 212 | USB_RECIP_INTERFACE,
312 | USB_RECIP_INTERFACE, 213 USB_CDC_NCM_CRC_NOT_APPENDED,
313 USB_CDC_NCM_CRC_NOT_APPENDED, 214 iface_no, NULL, 0);
314 iface_no, NULL, 0, 1000);
315 if (err < 0) 215 if (err < 0)
316 pr_debug("Setting CRC mode off failed\n"); 216 pr_debug("Setting CRC mode off failed\n");
317 } 217 }
318 218
319 /* set NTB format, if both formats are supported */ 219 /* set NTB format, if both formats are supported */
320 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { 220 if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
321 err = usb_control_msg(ctx->udev, usb_sndctrlpipe(ctx->udev, 0), 221 err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT,
322 USB_CDC_SET_NTB_FORMAT, USB_TYPE_CLASS 222 USB_TYPE_CLASS | USB_DIR_OUT
323 | USB_DIR_OUT | USB_RECIP_INTERFACE, 223 | USB_RECIP_INTERFACE,
324 USB_CDC_NCM_NTB16_FORMAT, 224 USB_CDC_NCM_NTB16_FORMAT,
325 iface_no, NULL, 0, 1000); 225 iface_no, NULL, 0);
326 if (err < 0) 226 if (err < 0)
327 pr_debug("Setting NTB format to 16-bit failed\n"); 227 pr_debug("Setting NTB format to 16-bit failed\n");
328 } 228 }
329 229
330 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; 230 ctx->max_datagram_size = min_dgram_size;
331 231
332 /* set Max Datagram Size (MTU) */ 232 /* set Max Datagram Size (MTU) */
333 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) { 233 if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
334 __le16 *max_datagram_size; 234 __le16 max_datagram_size;
335 u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); 235 u16 eth_max_sz;
336 236 if (ctx->ether_desc != NULL)
337 max_datagram_size = kzalloc(sizeof(*max_datagram_size), 237 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
338 GFP_KERNEL); 238 else if (ctx->mbim_desc != NULL)
339 if (!max_datagram_size) { 239 eth_max_sz = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize);
340 err = -ENOMEM; 240 else
341 goto max_dgram_err; 241 goto max_dgram_err;
342 }
343 242
344 err = usb_control_msg(ctx->udev, usb_rcvctrlpipe(ctx->udev, 0), 243 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
345 USB_CDC_GET_MAX_DATAGRAM_SIZE, 244 USB_TYPE_CLASS | USB_DIR_IN
346 USB_TYPE_CLASS | USB_DIR_IN 245 | USB_RECIP_INTERFACE,
347 | USB_RECIP_INTERFACE, 246 0, iface_no, &max_datagram_size, 2);
348 0, iface_no, max_datagram_size,
349 2, 1000);
350 if (err < 0) { 247 if (err < 0) {
351 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n", 248 pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
352 CDC_NCM_MIN_DATAGRAM_SIZE); 249 min_dgram_size);
353 } else { 250 } else {
354 ctx->max_datagram_size = 251 ctx->max_datagram_size =
355 le16_to_cpu(*max_datagram_size); 252 le16_to_cpu(max_datagram_size);
356 /* Check Eth descriptor value */ 253 /* Check Eth descriptor value */
357 if (ctx->max_datagram_size > eth_max_sz) 254 if (ctx->max_datagram_size > eth_max_sz)
358 ctx->max_datagram_size = eth_max_sz; 255 ctx->max_datagram_size = eth_max_sz;
359 256
360 if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE) 257 if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
361 ctx->max_datagram_size = 258 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
362 CDC_NCM_MAX_DATAGRAM_SIZE;
363 259
364 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) 260 if (ctx->max_datagram_size < min_dgram_size)
365 ctx->max_datagram_size = 261 ctx->max_datagram_size = min_dgram_size;
366 CDC_NCM_MIN_DATAGRAM_SIZE;
367 262
368 /* if value changed, update device */ 263 /* if value changed, update device */
369 if (ctx->max_datagram_size != 264 if (ctx->max_datagram_size !=
370 le16_to_cpu(*max_datagram_size)) { 265 le16_to_cpu(max_datagram_size)) {
371 err = usb_control_msg(ctx->udev, 266 err = usbnet_write_cmd(dev,
372 usb_sndctrlpipe(ctx->udev, 0),
373 USB_CDC_SET_MAX_DATAGRAM_SIZE, 267 USB_CDC_SET_MAX_DATAGRAM_SIZE,
374 USB_TYPE_CLASS | USB_DIR_OUT 268 USB_TYPE_CLASS | USB_DIR_OUT
375 | USB_RECIP_INTERFACE, 269 | USB_RECIP_INTERFACE,
376 0, 270 0,
377 iface_no, max_datagram_size, 271 iface_no, &max_datagram_size,
378 2, 1000); 272 2);
379 if (err < 0) 273 if (err < 0)
380 pr_debug("SET_MAX_DGRAM_SIZE failed\n"); 274 pr_debug("SET_MAX_DGRAM_SIZE failed\n");
381 } 275 }
382 } 276 }
383 kfree(max_datagram_size);
384 } 277 }
385 278
386max_dgram_err: 279max_dgram_err:
387 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN)) 280 if (ctx->netdev->mtu != (ctx->max_datagram_size - eth_hlen))
388 ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN; 281 ctx->netdev->mtu = ctx->max_datagram_size - eth_hlen;
389 282
390 return 0; 283 return 0;
391} 284}
@@ -451,7 +344,7 @@ static const struct ethtool_ops cdc_ncm_ethtool_ops = {
451 .nway_reset = usbnet_nway_reset, 344 .nway_reset = usbnet_nway_reset,
452}; 345};
453 346
454static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) 347int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
455{ 348{
456 struct cdc_ncm_ctx *ctx; 349 struct cdc_ncm_ctx *ctx;
457 struct usb_driver *driver; 350 struct usb_driver *driver;
@@ -525,6 +418,13 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
525 ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf; 418 ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
526 break; 419 break;
527 420
421 case USB_CDC_MBIM_TYPE:
422 if (buf[0] < sizeof(*(ctx->mbim_desc)))
423 break;
424
425 ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf;
426 break;
427
528 default: 428 default:
529 break; 429 break;
530 } 430 }
@@ -537,7 +437,7 @@ advance:
537 437
538 /* check if we got everything */ 438 /* check if we got everything */
539 if ((ctx->control == NULL) || (ctx->data == NULL) || 439 if ((ctx->control == NULL) || (ctx->data == NULL) ||
540 (ctx->ether_desc == NULL) || (ctx->control != intf)) 440 ((!ctx->mbim_desc) && ((ctx->ether_desc == NULL) || (ctx->control != intf))))
541 goto error; 441 goto error;
542 442
543 /* claim data interface, if different from control */ 443 /* claim data interface, if different from control */
@@ -559,7 +459,7 @@ advance:
559 goto error2; 459 goto error2;
560 460
561 /* configure data interface */ 461 /* configure data interface */
562 temp = usb_set_interface(dev->udev, iface_no, 1); 462 temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
563 if (temp) 463 if (temp)
564 goto error2; 464 goto error2;
565 465
@@ -576,11 +476,13 @@ advance:
576 usb_set_intfdata(ctx->control, dev); 476 usb_set_intfdata(ctx->control, dev);
577 usb_set_intfdata(ctx->intf, dev); 477 usb_set_intfdata(ctx->intf, dev);
578 478
579 temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress); 479 if (ctx->ether_desc) {
580 if (temp) 480 temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
581 goto error2; 481 if (temp)
482 goto error2;
483 dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
484 }
582 485
583 dev_info(&dev->udev->dev, "MAC-Address: %pM\n", dev->net->dev_addr);
584 486
585 dev->in = usb_rcvbulkpipe(dev->udev, 487 dev->in = usb_rcvbulkpipe(dev->udev,
586 ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 488 ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
@@ -589,13 +491,6 @@ advance:
589 dev->status = ctx->status_ep; 491 dev->status = ctx->status_ep;
590 dev->rx_urb_size = ctx->rx_max; 492 dev->rx_urb_size = ctx->rx_max;
591 493
592 /*
593 * We should get an event when network connection is "connected" or
594 * "disconnected". Set network connection in "disconnected" state
595 * (carrier is OFF) during attach, so the IP network stack does not
596 * start IPv6 negotiation and more.
597 */
598 netif_carrier_off(dev->net);
599 ctx->tx_speed = ctx->rx_speed = 0; 494 ctx->tx_speed = ctx->rx_speed = 0;
600 return 0; 495 return 0;
601 496
@@ -609,8 +504,9 @@ error:
609 dev_info(&dev->udev->dev, "bind() failure\n"); 504 dev_info(&dev->udev->dev, "bind() failure\n");
610 return -ENODEV; 505 return -ENODEV;
611} 506}
507EXPORT_SYMBOL_GPL(cdc_ncm_bind_common);
612 508
613static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) 509void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
614{ 510{
615 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; 511 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
616 struct usb_driver *driver = driver_of(intf); 512 struct usb_driver *driver = driver_of(intf);
@@ -644,52 +540,121 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
644 usb_set_intfdata(ctx->intf, NULL); 540 usb_set_intfdata(ctx->intf, NULL);
645 cdc_ncm_free(ctx); 541 cdc_ncm_free(ctx);
646} 542}
543EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
647 544
648static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max) 545static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
649{ 546{
650 if (first >= max) 547 int ret;
651 return; 548
652 if (first >= end) 549 /* The MBIM spec defines a NCM compatible default altsetting,
653 return; 550 * which we may have matched:
654 if (end > max) 551 *
655 end = max; 552 * "Functions that implement both NCM 1.0 and MBIM (an
656 memset(ptr + first, 0, end - first); 553 * “NCM/MBIM function”) according to this recommendation
554 * shall provide two alternate settings for the
555 * Communication Interface. Alternate setting 0, and the
556 * associated class and endpoint descriptors, shall be
557 * constructed according to the rules given for the
558 * Communication Interface in section 5 of [USBNCM10].
559 * Alternate setting 1, and the associated class and
560 * endpoint descriptors, shall be constructed according to
561 * the rules given in section 6 (USB Device Model) of this
562 * specification."
563 *
564 * Do not bind to such interfaces, allowing cdc_mbim to handle
565 * them
566 */
567#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
568 if ((intf->num_altsetting == 2) &&
569 !usb_set_interface(dev->udev,
570 intf->cur_altsetting->desc.bInterfaceNumber,
571 CDC_NCM_COMM_ALTSETTING_MBIM) &&
572 cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
573 return -ENODEV;
574#endif
575
576 /* NCM data altsetting is always 1 */
577 ret = cdc_ncm_bind_common(dev, intf, 1);
578
579 /*
580 * We should get an event when network connection is "connected" or
581 * "disconnected". Set network connection in "disconnected" state
582 * (carrier is OFF) during attach, so the IP network stack does not
583 * start IPv6 negotiation and more.
584 */
585 netif_carrier_off(dev->net);
586 return ret;
657} 587}
658 588
659static struct sk_buff * 589static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
660cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
661{ 590{
591 size_t align = ALIGN(skb->len, modulus) - skb->len + remainder;
592
593 if (skb->len + align > max)
594 align = max - skb->len;
595 if (align && skb_tailroom(skb) >= align)
596 memset(skb_put(skb, align), 0, align);
597}
598
599/* return a pointer to a valid struct usb_cdc_ncm_ndp16 of type sign, possibly
600 * allocating a new one within skb
601 */
602static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve)
603{
604 struct usb_cdc_ncm_ndp16 *ndp16 = NULL;
605 struct usb_cdc_ncm_nth16 *nth16 = (void *)skb->data;
606 size_t ndpoffset = le16_to_cpu(nth16->wNdpIndex);
607
608 /* follow the chain of NDPs, looking for a match */
609 while (ndpoffset) {
610 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
611 if (ndp16->dwSignature == sign)
612 return ndp16;
613 ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
614 }
615
616 /* align new NDP */
617 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
618
619 /* verify that there is room for the NDP and the datagram (reserve) */
620 if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE)
621 return NULL;
622
623 /* link to it */
624 if (ndp16)
625 ndp16->wNextNdpIndex = cpu_to_le16(skb->len);
626 else
627 nth16->wNdpIndex = cpu_to_le16(skb->len);
628
629 /* push a new empty NDP */
630 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE);
631 ndp16->dwSignature = sign;
632 ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16));
633 return ndp16;
634}
635
636struct sk_buff *
637cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign)
638{
639 struct usb_cdc_ncm_nth16 *nth16;
640 struct usb_cdc_ncm_ndp16 *ndp16;
662 struct sk_buff *skb_out; 641 struct sk_buff *skb_out;
663 u32 rem; 642 u16 n = 0, index, ndplen;
664 u32 offset;
665 u32 last_offset;
666 u16 n = 0, index;
667 u8 ready2send = 0; 643 u8 ready2send = 0;
668 644
669 /* if there is a remaining skb, it gets priority */ 645 /* if there is a remaining skb, it gets priority */
670 if (skb != NULL) 646 if (skb != NULL) {
671 swap(skb, ctx->tx_rem_skb); 647 swap(skb, ctx->tx_rem_skb);
672 else 648 swap(sign, ctx->tx_rem_sign);
649 } else {
673 ready2send = 1; 650 ready2send = 1;
674 651 }
675 /*
676 * +----------------+
677 * | skb_out |
678 * +----------------+
679 * ^ offset
680 * ^ last_offset
681 */
682 652
683 /* check if we are resuming an OUT skb */ 653 /* check if we are resuming an OUT skb */
684 if (ctx->tx_curr_skb != NULL) { 654 skb_out = ctx->tx_curr_skb;
685 /* pop variables */
686 skb_out = ctx->tx_curr_skb;
687 offset = ctx->tx_curr_offset;
688 last_offset = ctx->tx_curr_last_offset;
689 n = ctx->tx_curr_frame_num;
690 655
691 } else { 656 /* allocate a new OUT skb */
692 /* reset variables */ 657 if (!skb_out) {
693 skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC); 658 skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
694 if (skb_out == NULL) { 659 if (skb_out == NULL) {
695 if (skb != NULL) { 660 if (skb != NULL) {
@@ -698,35 +663,21 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
698 } 663 }
699 goto exit_no_skb; 664 goto exit_no_skb;
700 } 665 }
666 /* fill out the initial 16-bit NTB header */
667 nth16 = (struct usb_cdc_ncm_nth16 *)memset(skb_put(skb_out, sizeof(struct usb_cdc_ncm_nth16)), 0, sizeof(struct usb_cdc_ncm_nth16));
668 nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
669 nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16));
670 nth16->wSequence = cpu_to_le16(ctx->tx_seq++);
701 671
702 /* make room for NTH and NDP */ 672 /* count total number of frames in this NTB */
703 offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
704 ctx->tx_ndp_modulus) +
705 sizeof(struct usb_cdc_ncm_ndp16) +
706 (ctx->tx_max_datagrams + 1) *
707 sizeof(struct usb_cdc_ncm_dpe16);
708
709 /* store last valid offset before alignment */
710 last_offset = offset;
711 /* align first Datagram offset correctly */
712 offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
713 /* zero buffer till the first IP datagram */
714 cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
715 n = 0;
716 ctx->tx_curr_frame_num = 0; 673 ctx->tx_curr_frame_num = 0;
717 } 674 }
718 675
719 for (; n < ctx->tx_max_datagrams; n++) { 676 for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) {
720 /* check if end of transmit buffer is reached */ 677 /* send any remaining skb first */
721 if (offset >= ctx->tx_max) {
722 ready2send = 1;
723 break;
724 }
725 /* compute maximum buffer size */
726 rem = ctx->tx_max - offset;
727
728 if (skb == NULL) { 678 if (skb == NULL) {
729 skb = ctx->tx_rem_skb; 679 skb = ctx->tx_rem_skb;
680 sign = ctx->tx_rem_sign;
730 ctx->tx_rem_skb = NULL; 681 ctx->tx_rem_skb = NULL;
731 682
732 /* check for end of skb */ 683 /* check for end of skb */
@@ -734,7 +685,14 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
734 break; 685 break;
735 } 686 }
736 687
737 if (skb->len > rem) { 688 /* get the appropriate NDP for this skb */
689 ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder);
690
691 /* align beginning of next frame */
692 cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max);
693
694 /* check if we had enough room left for both NDP and frame */
695 if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) {
738 if (n == 0) { 696 if (n == 0) {
739 /* won't fit, MTU problem? */ 697 /* won't fit, MTU problem? */
740 dev_kfree_skb_any(skb); 698 dev_kfree_skb_any(skb);
@@ -747,31 +705,30 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
747 ctx->netdev->stats.tx_dropped++; 705 ctx->netdev->stats.tx_dropped++;
748 } 706 }
749 ctx->tx_rem_skb = skb; 707 ctx->tx_rem_skb = skb;
708 ctx->tx_rem_sign = sign;
750 skb = NULL; 709 skb = NULL;
751 ready2send = 1; 710 ready2send = 1;
752 } 711 }
753 break; 712 break;
754 } 713 }
755 714
756 memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len); 715 /* calculate frame number withing this NDP */
757 716 ndplen = le16_to_cpu(ndp16->wLength);
758 ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len); 717 index = (ndplen - sizeof(struct usb_cdc_ncm_ndp16)) / sizeof(struct usb_cdc_ncm_dpe16) - 1;
759 ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
760
761 /* update offset */
762 offset += skb->len;
763
764 /* store last valid offset before alignment */
765 last_offset = offset;
766
767 /* align offset correctly */
768 offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
769 718
770 /* zero padding */ 719 /* OK, add this skb */
771 cdc_ncm_zero_fill(skb_out->data, last_offset, offset, 720 ndp16->dpe16[index].wDatagramLength = cpu_to_le16(skb->len);
772 ctx->tx_max); 721 ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len);
722 ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16));
723 memcpy(skb_put(skb_out, skb->len), skb->data, skb->len);
773 dev_kfree_skb_any(skb); 724 dev_kfree_skb_any(skb);
774 skb = NULL; 725 skb = NULL;
726
727 /* send now if this NDP is full */
728 if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) {
729 ready2send = 1;
730 break;
731 }
775 } 732 }
776 733
777 /* free up any dangling skb */ 734 /* free up any dangling skb */
@@ -787,16 +744,12 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
787 /* wait for more frames */ 744 /* wait for more frames */
788 /* push variables */ 745 /* push variables */
789 ctx->tx_curr_skb = skb_out; 746 ctx->tx_curr_skb = skb_out;
790 ctx->tx_curr_offset = offset;
791 ctx->tx_curr_last_offset = last_offset;
792 goto exit_no_skb; 747 goto exit_no_skb;
793 748
794 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { 749 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
795 /* wait for more frames */ 750 /* wait for more frames */
796 /* push variables */ 751 /* push variables */
797 ctx->tx_curr_skb = skb_out; 752 ctx->tx_curr_skb = skb_out;
798 ctx->tx_curr_offset = offset;
799 ctx->tx_curr_last_offset = last_offset;
800 /* set the pending count */ 753 /* set the pending count */
801 if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT) 754 if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
802 ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT; 755 ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT;
@@ -807,75 +760,24 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
807 /* variables will be reset at next call */ 760 /* variables will be reset at next call */
808 } 761 }
809 762
810 /* check for overflow */
811 if (last_offset > ctx->tx_max)
812 last_offset = ctx->tx_max;
813
814 /* revert offset */
815 offset = last_offset;
816
817 /* 763 /*
818 * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes, 764 * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
819 * we send buffers as it is. If we get more data, it would be more 765 * we send buffers as it is. If we get more data, it would be more
820 * efficient for USB HS mobile device with DMA engine to receive a full 766 * efficient for USB HS mobile device with DMA engine to receive a full
821 * size NTB, than canceling DMA transfer and receiving a short packet. 767 * size NTB, than canceling DMA transfer and receiving a short packet.
822 */ 768 */
823 if (offset > CDC_NCM_MIN_TX_PKT) 769 if (skb_out->len > CDC_NCM_MIN_TX_PKT)
824 offset = ctx->tx_max; 770 /* final zero padding */
825 771 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len);
826 /* final zero padding */
827 cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
828
829 /* store last offset */
830 last_offset = offset;
831
832 if (((last_offset < ctx->tx_max) && ((last_offset %
833 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
834 (((last_offset == ctx->tx_max) && ((ctx->tx_max %
835 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
836 (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
837 /* force short packet */
838 *(((u8 *)skb_out->data) + last_offset) = 0;
839 last_offset++;
840 }
841
842 /* zero the rest of the DPEs plus the last NULL entry */
843 for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
844 ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
845 ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
846 }
847 772
848 /* fill out 16-bit NTB header */ 773 /* do we need to prevent a ZLP? */
849 ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); 774 if (((skb_out->len % le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0) &&
850 ctx->tx_ncm.nth16.wHeaderLength = 775 (skb_out->len < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)) && skb_tailroom(skb_out))
851 cpu_to_le16(sizeof(ctx->tx_ncm.nth16)); 776 *skb_put(skb_out, 1) = 0; /* force short packet */
852 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
853 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
854 index = ALIGN(sizeof(struct usb_cdc_ncm_nth16), ctx->tx_ndp_modulus);
855 ctx->tx_ncm.nth16.wNdpIndex = cpu_to_le16(index);
856
857 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
858 ctx->tx_seq++;
859
860 /* fill out 16-bit NDP table */
861 ctx->tx_ncm.ndp16.dwSignature =
862 cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
863 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
864 sizeof(struct usb_cdc_ncm_dpe16));
865 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
866 ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
867
868 memcpy(((u8 *)skb_out->data) + index,
869 &(ctx->tx_ncm.ndp16),
870 sizeof(ctx->tx_ncm.ndp16));
871 777
872 memcpy(((u8 *)skb_out->data) + index + sizeof(ctx->tx_ncm.ndp16), 778 /* set final frame length */
873 &(ctx->tx_ncm.dpe16), 779 nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
874 (ctx->tx_curr_frame_num + 1) * 780 nth16->wBlockLength = cpu_to_le16(skb_out->len);
875 sizeof(struct usb_cdc_ncm_dpe16));
876
877 /* set frame length */
878 skb_put(skb_out, last_offset);
879 781
880 /* return skb */ 782 /* return skb */
881 ctx->tx_curr_skb = NULL; 783 ctx->tx_curr_skb = NULL;
@@ -888,6 +790,7 @@ exit_no_skb:
888 cdc_ncm_tx_timeout_start(ctx); 790 cdc_ncm_tx_timeout_start(ctx);
889 return NULL; 791 return NULL;
890} 792}
793EXPORT_SYMBOL_GPL(cdc_ncm_fill_tx_frame);
891 794
892static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx) 795static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
893{ 796{
@@ -922,6 +825,8 @@ static void cdc_ncm_txpath_bh(unsigned long param)
922 netif_tx_lock_bh(ctx->netdev); 825 netif_tx_lock_bh(ctx->netdev);
923 usbnet_start_xmit(NULL, ctx->netdev); 826 usbnet_start_xmit(NULL, ctx->netdev);
924 netif_tx_unlock_bh(ctx->netdev); 827 netif_tx_unlock_bh(ctx->netdev);
828 } else {
829 spin_unlock_bh(&ctx->mtx);
925 } 830 }
926} 831}
927 832
@@ -942,7 +847,7 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
942 goto error; 847 goto error;
943 848
944 spin_lock_bh(&ctx->mtx); 849 spin_lock_bh(&ctx->mtx);
945 skb_out = cdc_ncm_fill_tx_frame(ctx, skb); 850 skb_out = cdc_ncm_fill_tx_frame(ctx, skb, cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN));
946 spin_unlock_bh(&ctx->mtx); 851 spin_unlock_bh(&ctx->mtx);
947 return skb_out; 852 return skb_out;
948 853
@@ -953,17 +858,12 @@ error:
953 return NULL; 858 return NULL;
954} 859}
955 860
956static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in) 861/* verify NTB header and return offset of first NDP, or negative error */
862int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in)
957{ 863{
958 struct sk_buff *skb;
959 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
960 int len;
961 int nframes;
962 int x;
963 int offset;
964 struct usb_cdc_ncm_nth16 *nth16; 864 struct usb_cdc_ncm_nth16 *nth16;
965 struct usb_cdc_ncm_ndp16 *ndp16; 865 int len;
966 struct usb_cdc_ncm_dpe16 *dpe16; 866 int ret = -EINVAL;
967 867
968 if (ctx == NULL) 868 if (ctx == NULL)
969 goto error; 869 goto error;
@@ -997,20 +897,23 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
997 } 897 }
998 ctx->rx_seq = le16_to_cpu(nth16->wSequence); 898 ctx->rx_seq = le16_to_cpu(nth16->wSequence);
999 899
1000 len = le16_to_cpu(nth16->wNdpIndex); 900 ret = le16_to_cpu(nth16->wNdpIndex);
1001 if ((len + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) { 901error:
1002 pr_debug("invalid DPT16 index <%u>\n", 902 return ret;
1003 le16_to_cpu(nth16->wNdpIndex)); 903}
1004 goto error; 904EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_nth16);
1005 }
1006 905
1007 ndp16 = (struct usb_cdc_ncm_ndp16 *)(((u8 *)skb_in->data) + len); 906/* verify NDP header and return number of datagrams, or negative error */
907int cdc_ncm_rx_verify_ndp16(struct sk_buff *skb_in, int ndpoffset)
908{
909 struct usb_cdc_ncm_ndp16 *ndp16;
910 int ret = -EINVAL;
1008 911
1009 if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) { 912 if ((ndpoffset + sizeof(struct usb_cdc_ncm_ndp16)) > skb_in->len) {
1010 pr_debug("invalid DPT16 signature <%u>\n", 913 pr_debug("invalid NDP offset <%u>\n", ndpoffset);
1011 le32_to_cpu(ndp16->dwSignature));
1012 goto error; 914 goto error;
1013 } 915 }
916 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
1014 917
1015 if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) { 918 if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) {
1016 pr_debug("invalid DPT16 length <%u>\n", 919 pr_debug("invalid DPT16 length <%u>\n",
@@ -1018,20 +921,52 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1018 goto error; 921 goto error;
1019 } 922 }
1020 923
1021 nframes = ((le16_to_cpu(ndp16->wLength) - 924 ret = ((le16_to_cpu(ndp16->wLength) -
1022 sizeof(struct usb_cdc_ncm_ndp16)) / 925 sizeof(struct usb_cdc_ncm_ndp16)) /
1023 sizeof(struct usb_cdc_ncm_dpe16)); 926 sizeof(struct usb_cdc_ncm_dpe16));
1024 nframes--; /* we process NDP entries except for the last one */ 927 ret--; /* we process NDP entries except for the last one */
1025
1026 len += sizeof(struct usb_cdc_ncm_ndp16);
1027 928
1028 if ((len + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > 929 if ((sizeof(struct usb_cdc_ncm_ndp16) + ret * (sizeof(struct usb_cdc_ncm_dpe16))) >
1029 skb_in->len) { 930 skb_in->len) {
1030 pr_debug("Invalid nframes = %d\n", nframes); 931 pr_debug("Invalid nframes = %d\n", ret);
1031 goto error; 932 ret = -EINVAL;
1032 } 933 }
1033 934
1034 dpe16 = (struct usb_cdc_ncm_dpe16 *)(((u8 *)skb_in->data) + len); 935error:
936 return ret;
937}
938EXPORT_SYMBOL_GPL(cdc_ncm_rx_verify_ndp16);
939
940static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
941{
942 struct sk_buff *skb;
943 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
944 int len;
945 int nframes;
946 int x;
947 int offset;
948 struct usb_cdc_ncm_ndp16 *ndp16;
949 struct usb_cdc_ncm_dpe16 *dpe16;
950 int ndpoffset;
951 int loopcount = 50; /* arbitrary max preventing infinite loop */
952
953 ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in);
954 if (ndpoffset < 0)
955 goto error;
956
957next_ndp:
958 nframes = cdc_ncm_rx_verify_ndp16(skb_in, ndpoffset);
959 if (nframes < 0)
960 goto error;
961
962 ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb_in->data + ndpoffset);
963
964 if (le32_to_cpu(ndp16->dwSignature) != USB_CDC_NCM_NDP16_NOCRC_SIGN) {
965 pr_debug("invalid DPT16 signature <%u>\n",
966 le32_to_cpu(ndp16->dwSignature));
967 goto err_ndp;
968 }
969 dpe16 = ndp16->dpe16;
1035 970
1036 for (x = 0; x < nframes; x++, dpe16++) { 971 for (x = 0; x < nframes; x++, dpe16++) {
1037 offset = le16_to_cpu(dpe16->wDatagramIndex); 972 offset = le16_to_cpu(dpe16->wDatagramIndex);
@@ -1043,7 +978,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1043 */ 978 */
1044 if ((offset == 0) || (len == 0)) { 979 if ((offset == 0) || (len == 0)) {
1045 if (!x) 980 if (!x)
1046 goto error; /* empty NTB */ 981 goto err_ndp; /* empty NTB */
1047 break; 982 break;
1048 } 983 }
1049 984
@@ -1054,7 +989,7 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1054 "offset[%u]=%u, length=%u, skb=%p\n", 989 "offset[%u]=%u, length=%u, skb=%p\n",
1055 x, offset, len, skb_in); 990 x, offset, len, skb_in);
1056 if (!x) 991 if (!x)
1057 goto error; 992 goto err_ndp;
1058 break; 993 break;
1059 994
1060 } else { 995 } else {
@@ -1067,6 +1002,12 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1067 usbnet_skb_return(dev, skb); 1002 usbnet_skb_return(dev, skb);
1068 } 1003 }
1069 } 1004 }
1005err_ndp:
1006 /* are there more NDPs to process? */
1007 ndpoffset = le16_to_cpu(ndp16->wNextNdpIndex);
1008 if (ndpoffset && loopcount--)
1009 goto next_ndp;
1010
1070 return 1; 1011 return 1;
1071error: 1012error:
1072 return 0; 1013 return 0;
@@ -1131,7 +1072,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1131 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be 1072 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
1132 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. 1073 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
1133 */ 1074 */
1134 ctx->connected = event->wValue; 1075 ctx->connected = le16_to_cpu(event->wValue);
1135 1076
1136 printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:" 1077 printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
1137 " %sconnected\n", 1078 " %sconnected\n",
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index e0433ce6ced7..3f554c1149f3 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -56,27 +56,12 @@
56 56
57static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data) 57static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
58{ 58{
59 void *buf; 59 int err;
60 int err = -ENOMEM; 60 err = usbnet_read_cmd(dev, DM_READ_REGS,
61 61 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
62 netdev_dbg(dev->net, "dm_read() reg=0x%02x length=%d\n", reg, length); 62 0, reg, data, length);
63 63 if(err != length && err >= 0)
64 buf = kmalloc(length, GFP_KERNEL);
65 if (!buf)
66 goto out;
67
68 err = usb_control_msg(dev->udev,
69 usb_rcvctrlpipe(dev->udev, 0),
70 DM_READ_REGS,
71 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
72 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
73 if (err == length)
74 memcpy(data, buf, length);
75 else if (err >= 0)
76 err = -EINVAL; 64 err = -EINVAL;
77 kfree(buf);
78
79 out:
80 return err; 65 return err;
81} 66}
82 67
@@ -87,91 +72,29 @@ static int dm_read_reg(struct usbnet *dev, u8 reg, u8 *value)
87 72
88static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data) 73static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
89{ 74{
90 void *buf = NULL; 75 int err;
91 int err = -ENOMEM; 76 err = usbnet_write_cmd(dev, DM_WRITE_REGS,
92 77 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
93 netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length); 78 0, reg, data, length);
94 79
95 if (data) {
96 buf = kmemdup(data, length, GFP_KERNEL);
97 if (!buf)
98 goto out;
99 }
100
101 err = usb_control_msg(dev->udev,
102 usb_sndctrlpipe(dev->udev, 0),
103 DM_WRITE_REGS,
104 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
105 0, reg, buf, length, USB_CTRL_SET_TIMEOUT);
106 kfree(buf);
107 if (err >= 0 && err < length) 80 if (err >= 0 && err < length)
108 err = -EINVAL; 81 err = -EINVAL;
109 out:
110 return err; 82 return err;
111} 83}
112 84
113static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value) 85static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
114{ 86{
115 netdev_dbg(dev->net, "dm_write_reg() reg=0x%02x, value=0x%02x\n", 87 return usbnet_write_cmd(dev, DM_WRITE_REGS,
116 reg, value); 88 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
117 return usb_control_msg(dev->udev, 89 value, reg, NULL, 0);
118 usb_sndctrlpipe(dev->udev, 0),
119 DM_WRITE_REG,
120 USB_DIR_OUT | USB_TYPE_VENDOR |USB_RECIP_DEVICE,
121 value, reg, NULL, 0, USB_CTRL_SET_TIMEOUT);
122}
123
124static void dm_write_async_callback(struct urb *urb)
125{
126 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
127 int status = urb->status;
128
129 if (status < 0)
130 printk(KERN_DEBUG "dm_write_async_callback() failed with %d\n",
131 status);
132
133 kfree(req);
134 usb_free_urb(urb);
135} 90}
136 91
137static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value, 92static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
138 u16 length, void *data) 93 u16 length, void *data)
139{ 94{
140 struct usb_ctrlrequest *req; 95 usbnet_write_cmd_async(dev, DM_WRITE_REGS,
141 struct urb *urb; 96 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
142 int status; 97 value, reg, data, length);
143
144 urb = usb_alloc_urb(0, GFP_ATOMIC);
145 if (!urb) {
146 netdev_err(dev->net, "Error allocating URB in dm_write_async_helper!\n");
147 return;
148 }
149
150 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
151 if (!req) {
152 netdev_err(dev->net, "Failed to allocate memory for control request\n");
153 usb_free_urb(urb);
154 return;
155 }
156
157 req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
158 req->bRequest = length ? DM_WRITE_REGS : DM_WRITE_REG;
159 req->wValue = cpu_to_le16(value);
160 req->wIndex = cpu_to_le16(reg);
161 req->wLength = cpu_to_le16(length);
162
163 usb_fill_control_urb(urb, dev->udev,
164 usb_sndctrlpipe(dev->udev, 0),
165 (void *)req, data, length,
166 dm_write_async_callback, req);
167
168 status = usb_submit_urb(urb, GFP_ATOMIC);
169 if (status < 0) {
170 netdev_err(dev->net, "Error submitting the control message: status=%d\n",
171 status);
172 kfree(req);
173 usb_free_urb(urb);
174 }
175} 98}
176 99
177static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data) 100static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 8de641713d5f..ace9e74ffbdd 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -116,23 +116,8 @@ static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
116 return skb; 116 return skb;
117} 117}
118 118
119static void int51x1_async_cmd_callback(struct urb *urb)
120{
121 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
122 int status = urb->status;
123
124 if (status < 0)
125 dev_warn(&urb->dev->dev, "async callback failed with %d\n", status);
126
127 kfree(req);
128 usb_free_urb(urb);
129}
130
131static void int51x1_set_multicast(struct net_device *netdev) 119static void int51x1_set_multicast(struct net_device *netdev)
132{ 120{
133 struct usb_ctrlrequest *req;
134 int status;
135 struct urb *urb;
136 struct usbnet *dev = netdev_priv(netdev); 121 struct usbnet *dev = netdev_priv(netdev);
137 u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST; 122 u16 filter = PACKET_TYPE_DIRECTED | PACKET_TYPE_BROADCAST;
138 123
@@ -149,40 +134,9 @@ static void int51x1_set_multicast(struct net_device *netdev)
149 netdev_dbg(dev->net, "receive own packets only\n"); 134 netdev_dbg(dev->net, "receive own packets only\n");
150 } 135 }
151 136
152 urb = usb_alloc_urb(0, GFP_ATOMIC); 137 usbnet_write_cmd_async(dev, SET_ETHERNET_PACKET_FILTER,
153 if (!urb) { 138 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
154 netdev_warn(dev->net, "Error allocating URB\n"); 139 filter, 0, NULL, 0);
155 return;
156 }
157
158 req = kmalloc(sizeof(*req), GFP_ATOMIC);
159 if (!req) {
160 netdev_warn(dev->net, "Error allocating control msg\n");
161 goto out;
162 }
163
164 req->bRequestType = USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
165 req->bRequest = SET_ETHERNET_PACKET_FILTER;
166 req->wValue = cpu_to_le16(filter);
167 req->wIndex = 0;
168 req->wLength = 0;
169
170 usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
171 (void *)req, NULL, 0,
172 int51x1_async_cmd_callback,
173 (void *)req);
174
175 status = usb_submit_urb(urb, GFP_ATOMIC);
176 if (status < 0) {
177 netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
178 status);
179 goto out1;
180 }
181 return;
182out1:
183 kfree(req);
184out:
185 usb_free_urb(urb);
186} 140}
187 141
188static const struct net_device_ops int51x1_netdev_ops = { 142static const struct net_device_ops int51x1_netdev_ops = {
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index cc7e72010ac3..3f3f566afa0b 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -124,93 +124,20 @@ static const char driver_name[] = "MOSCHIP usb-ethernet driver";
124 124
125static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) 125static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
126{ 126{
127 struct usb_device *xdev = dev->udev; 127 return usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ,
128 int ret; 128 0x0000, index, data, size);
129 void *buffer;
130
131 buffer = kmalloc(size, GFP_NOIO);
132 if (buffer == NULL)
133 return -ENOMEM;
134
135 ret = usb_control_msg(xdev, usb_rcvctrlpipe(xdev, 0), MCS7830_RD_BREQ,
136 MCS7830_RD_BMREQ, 0x0000, index, buffer,
137 size, MCS7830_CTRL_TIMEOUT);
138 memcpy(data, buffer, size);
139 kfree(buffer);
140
141 return ret;
142} 129}
143 130
144static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) 131static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
145{ 132{
146 struct usb_device *xdev = dev->udev; 133 return usbnet_write_cmd(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ,
147 int ret; 134 0x0000, index, data, size);
148 void *buffer;
149
150 buffer = kmemdup(data, size, GFP_NOIO);
151 if (buffer == NULL)
152 return -ENOMEM;
153
154 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
155 MCS7830_WR_BMREQ, 0x0000, index, buffer,
156 size, MCS7830_CTRL_TIMEOUT);
157 kfree(buffer);
158 return ret;
159}
160
161static void mcs7830_async_cmd_callback(struct urb *urb)
162{
163 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
164 int status = urb->status;
165
166 if (status < 0)
167 printk(KERN_DEBUG "%s() failed with %d\n",
168 __func__, status);
169
170 kfree(req);
171 usb_free_urb(urb);
172} 135}
173 136
174static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data) 137static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data)
175{ 138{
176 struct usb_ctrlrequest *req; 139 usbnet_write_cmd_async(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ,
177 int ret; 140 0x0000, index, data, size);
178 struct urb *urb;
179
180 urb = usb_alloc_urb(0, GFP_ATOMIC);
181 if (!urb) {
182 dev_dbg(&dev->udev->dev,
183 "Error allocating URB in write_cmd_async!\n");
184 return;
185 }
186
187 req = kmalloc(sizeof *req, GFP_ATOMIC);
188 if (!req) {
189 dev_err(&dev->udev->dev,
190 "Failed to allocate memory for control request\n");
191 goto out;
192 }
193 req->bRequestType = MCS7830_WR_BMREQ;
194 req->bRequest = MCS7830_WR_BREQ;
195 req->wValue = 0;
196 req->wIndex = cpu_to_le16(index);
197 req->wLength = cpu_to_le16(size);
198
199 usb_fill_control_urb(urb, dev->udev,
200 usb_sndctrlpipe(dev->udev, 0),
201 (void *)req, data, size,
202 mcs7830_async_cmd_callback, req);
203
204 ret = usb_submit_urb(urb, GFP_ATOMIC);
205 if (ret < 0) {
206 dev_err(&dev->udev->dev,
207 "Error submitting the control message: ret=%d\n", ret);
208 goto out;
209 }
210 return;
211out:
212 kfree(req);
213 usb_free_urb(urb);
214} 141}
215 142
216static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr) 143static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index c062a3e8295c..93e0716a118c 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -109,13 +109,11 @@ struct nc_trailer {
109static int 109static int
110nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr) 110nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr)
111{ 111{
112 int status = usb_control_msg(dev->udev, 112 int status = usbnet_read_cmd(dev, req,
113 usb_rcvctrlpipe(dev->udev, 0), 113 USB_DIR_IN | USB_TYPE_VENDOR |
114 req, 114 USB_RECIP_DEVICE,
115 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 115 0, regnum, retval_ptr,
116 0, regnum, 116 sizeof *retval_ptr);
117 retval_ptr, sizeof *retval_ptr,
118 USB_CTRL_GET_TIMEOUT);
119 if (status > 0) 117 if (status > 0)
120 status = 0; 118 status = 0;
121 if (!status) 119 if (!status)
@@ -133,13 +131,9 @@ nc_register_read(struct usbnet *dev, u8 regnum, u16 *retval_ptr)
133static void 131static void
134nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value) 132nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
135{ 133{
136 usb_control_msg(dev->udev, 134 usbnet_write_cmd(dev, req,
137 usb_sndctrlpipe(dev->udev, 0), 135 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
138 req, 136 value, regnum, NULL, 0);
139 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
140 value, regnum,
141 NULL, 0, // data is in setup packet
142 USB_CTRL_SET_TIMEOUT);
143} 137}
144 138
145static inline void 139static inline void
@@ -288,37 +282,34 @@ static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
288static int net1080_reset(struct usbnet *dev) 282static int net1080_reset(struct usbnet *dev)
289{ 283{
290 u16 usbctl, status, ttl; 284 u16 usbctl, status, ttl;
291 u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL); 285 u16 vp;
292 int retval; 286 int retval;
293 287
294 if (!vp)
295 return -ENOMEM;
296
297 // nc_dump_registers(dev); 288 // nc_dump_registers(dev);
298 289
299 if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) { 290 if ((retval = nc_register_read(dev, REG_STATUS, &vp)) < 0) {
300 netdev_dbg(dev->net, "can't read %s-%s status: %d\n", 291 netdev_dbg(dev->net, "can't read %s-%s status: %d\n",
301 dev->udev->bus->bus_name, dev->udev->devpath, retval); 292 dev->udev->bus->bus_name, dev->udev->devpath, retval);
302 goto done; 293 goto done;
303 } 294 }
304 status = *vp; 295 status = vp;
305 nc_dump_status(dev, status); 296 nc_dump_status(dev, status);
306 297
307 if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) { 298 if ((retval = nc_register_read(dev, REG_USBCTL, &vp)) < 0) {
308 netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval); 299 netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval);
309 goto done; 300 goto done;
310 } 301 }
311 usbctl = *vp; 302 usbctl = vp;
312 nc_dump_usbctl(dev, usbctl); 303 nc_dump_usbctl(dev, usbctl);
313 304
314 nc_register_write(dev, REG_USBCTL, 305 nc_register_write(dev, REG_USBCTL,
315 USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER); 306 USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
316 307
317 if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) { 308 if ((retval = nc_register_read(dev, REG_TTL, &vp)) < 0) {
318 netdev_dbg(dev->net, "can't read TTL, %d\n", retval); 309 netdev_dbg(dev->net, "can't read TTL, %d\n", retval);
319 goto done; 310 goto done;
320 } 311 }
321 ttl = *vp; 312 ttl = vp;
322 // nc_dump_ttl(dev, ttl); 313 // nc_dump_ttl(dev, ttl);
323 314
324 nc_register_write(dev, REG_TTL, 315 nc_register_write(dev, REG_TTL,
@@ -331,7 +322,6 @@ static int net1080_reset(struct usbnet *dev)
331 retval = 0; 322 retval = 0;
332 323
333done: 324done:
334 kfree(vp);
335 return retval; 325 return retval;
336} 326}
337 327
@@ -339,13 +329,10 @@ static int net1080_check_connect(struct usbnet *dev)
339{ 329{
340 int retval; 330 int retval;
341 u16 status; 331 u16 status;
342 u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL); 332 u16 vp;
343 333
344 if (!vp) 334 retval = nc_register_read(dev, REG_STATUS, &vp);
345 return -ENOMEM; 335 status = vp;
346 retval = nc_register_read(dev, REG_STATUS, vp);
347 status = *vp;
348 kfree(vp);
349 if (retval != 0) { 336 if (retval != 0) {
350 netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval); 337 netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval);
351 return retval; 338 return retval;
@@ -355,59 +342,22 @@ static int net1080_check_connect(struct usbnet *dev)
355 return 0; 342 return 0;
356} 343}
357 344
358static void nc_flush_complete(struct urb *urb)
359{
360 kfree(urb->context);
361 usb_free_urb(urb);
362}
363
364static void nc_ensure_sync(struct usbnet *dev) 345static void nc_ensure_sync(struct usbnet *dev)
365{ 346{
366 dev->frame_errors++; 347 if (++dev->frame_errors <= 5)
367 if (dev->frame_errors > 5) { 348 return;
368 struct urb *urb;
369 struct usb_ctrlrequest *req;
370 int status;
371
372 /* Send a flush */
373 urb = usb_alloc_urb(0, GFP_ATOMIC);
374 if (!urb)
375 return;
376
377 req = kmalloc(sizeof *req, GFP_ATOMIC);
378 if (!req) {
379 usb_free_urb(urb);
380 return;
381 }
382 349
383 req->bRequestType = USB_DIR_OUT 350 if (usbnet_write_cmd_async(dev, REQUEST_REGISTER,
384 | USB_TYPE_VENDOR 351 USB_DIR_OUT | USB_TYPE_VENDOR |
385 | USB_RECIP_DEVICE; 352 USB_RECIP_DEVICE,
386 req->bRequest = REQUEST_REGISTER; 353 USBCTL_FLUSH_THIS |
387 req->wValue = cpu_to_le16(USBCTL_FLUSH_THIS 354 USBCTL_FLUSH_OTHER,
388 | USBCTL_FLUSH_OTHER); 355 REG_USBCTL, NULL, 0))
389 req->wIndex = cpu_to_le16(REG_USBCTL); 356 return;
390 req->wLength = cpu_to_le16(0);
391
392 /* queue an async control request, we don't need
393 * to do anything when it finishes except clean up.
394 */
395 usb_fill_control_urb(urb, dev->udev,
396 usb_sndctrlpipe(dev->udev, 0),
397 (unsigned char *) req,
398 NULL, 0,
399 nc_flush_complete, req);
400 status = usb_submit_urb(urb, GFP_ATOMIC);
401 if (status) {
402 kfree(req);
403 usb_free_urb(urb);
404 return;
405 }
406 357
407 netif_dbg(dev, rx_err, dev->net, 358 netif_dbg(dev, rx_err, dev->net,
408 "flush net1080; too many framing errors\n"); 359 "flush net1080; too many framing errors\n");
409 dev->frame_errors = 0; 360 dev->frame_errors = 0;
410 }
411} 361}
412 362
413static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) 363static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 4584b9a805b3..0fcc8e65a068 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -71,13 +71,10 @@
71static inline int 71static inline int
72pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index) 72pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
73{ 73{
74 return usb_control_msg(dev->udev, 74 return usbnet_read_cmd(dev, req,
75 usb_rcvctrlpipe(dev->udev, 0), 75 USB_DIR_IN | USB_TYPE_VENDOR |
76 req, 76 USB_RECIP_DEVICE,
77 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 77 val, index, NULL, 0);
78 val, index,
79 NULL, 0,
80 USB_CTRL_GET_TIMEOUT);
81} 78}
82 79
83static inline int 80static inline int
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index c27d27701aee..18dd4257ab17 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -311,10 +311,9 @@ static int sierra_net_send_cmd(struct usbnet *dev,
311 struct sierra_net_data *priv = sierra_net_get_private(dev); 311 struct sierra_net_data *priv = sierra_net_get_private(dev);
312 int status; 312 int status;
313 313
314 status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 314 status = usbnet_write_cmd(dev, USB_CDC_SEND_ENCAPSULATED_COMMAND,
315 USB_CDC_SEND_ENCAPSULATED_COMMAND, 315 USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
316 USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0, 316 0, priv->ifnum, cmd, cmdlen);
317 priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
318 317
319 if (status != cmdlen && status != -ENODEV) 318 if (status != cmdlen && status != -ENODEV)
320 netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status); 319 netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
@@ -340,7 +339,7 @@ static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
340 dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix); 339 dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
341 priv->tx_hdr_template[0] = 0x3F; 340 priv->tx_hdr_template[0] = 0x3F;
342 priv->tx_hdr_template[1] = ctx_ix; 341 priv->tx_hdr_template[1] = ctx_ix;
343 *((u16 *)&priv->tx_hdr_template[2]) = 342 *((__be16 *)&priv->tx_hdr_template[2]) =
344 cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID); 343 cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
345} 344}
346 345
@@ -632,32 +631,22 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
632static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) 631static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
633{ 632{
634 int result = 0; 633 int result = 0;
635 u16 *attrdata; 634 __le16 attrdata;
636 635
637 attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL); 636 result = usbnet_read_cmd(dev,
638 if (!attrdata) 637 /* _u8 vendor specific request */
639 return -ENOMEM; 638 SWI_USB_REQUEST_GET_FW_ATTR,
640 639 USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
641 result = usb_control_msg( 640 0x0000, /* __u16 value not used */
642 dev->udev, 641 0x0000, /* __u16 index not used */
643 usb_rcvctrlpipe(dev->udev, 0), 642 &attrdata, /* char *data */
644 /* _u8 vendor specific request */ 643 sizeof(attrdata) /* __u16 size */
645 SWI_USB_REQUEST_GET_FW_ATTR, 644 );
646 USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */ 645
647 0x0000, /* __u16 value not used */ 646 if (result < 0)
648 0x0000, /* __u16 index not used */
649 attrdata, /* char *data */
650 sizeof(*attrdata), /* __u16 size */
651 USB_CTRL_SET_TIMEOUT); /* int timeout */
652
653 if (result < 0) {
654 kfree(attrdata);
655 return -EIO; 647 return -EIO;
656 }
657
658 *datap = le16_to_cpu(*attrdata);
659 648
660 kfree(attrdata); 649 *datap = le16_to_cpu(attrdata);
661 return result; 650 return result;
662} 651}
663 652
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index b77ae76f4aa8..251a3354a4b0 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -26,6 +26,8 @@
26#include <linux/ethtool.h> 26#include <linux/ethtool.h>
27#include <linux/mii.h> 27#include <linux/mii.h>
28#include <linux/usb.h> 28#include <linux/usb.h>
29#include <linux/bitrev.h>
30#include <linux/crc16.h>
29#include <linux/crc32.h> 31#include <linux/crc32.h>
30#include <linux/usb/usbnet.h> 32#include <linux/usb/usbnet.h>
31#include <linux/slab.h> 33#include <linux/slab.h>
@@ -52,16 +54,15 @@
52#define USB_PRODUCT_ID_LAN7500 (0x7500) 54#define USB_PRODUCT_ID_LAN7500 (0x7500)
53#define USB_PRODUCT_ID_LAN7505 (0x7505) 55#define USB_PRODUCT_ID_LAN7505 (0x7505)
54#define RXW_PADDING 2 56#define RXW_PADDING 2
55#define SUPPORTED_WAKE (WAKE_MAGIC) 57#define SUPPORTED_WAKE (WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \
58 WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
56 59
57#define check_warn(ret, fmt, args...) \ 60#define SUSPEND_SUSPEND0 (0x01)
58 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) 61#define SUSPEND_SUSPEND1 (0x02)
59 62#define SUSPEND_SUSPEND2 (0x04)
60#define check_warn_return(ret, fmt, args...) \ 63#define SUSPEND_SUSPEND3 (0x08)
61 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } }) 64#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
62 65 SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
63#define check_warn_goto_done(ret, fmt, args...) \
64 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
65 66
66struct smsc75xx_priv { 67struct smsc75xx_priv {
67 struct usbnet *dev; 68 struct usbnet *dev;
@@ -71,6 +72,7 @@ struct smsc75xx_priv {
71 struct mutex dataport_mutex; 72 struct mutex dataport_mutex;
72 spinlock_t rfe_ctl_lock; 73 spinlock_t rfe_ctl_lock;
73 struct work_struct set_multicast; 74 struct work_struct set_multicast;
75 u8 suspend_flags;
74}; 76};
75 77
76struct usb_context { 78struct usb_context {
@@ -82,96 +84,99 @@ static bool turbo_mode = true;
82module_param(turbo_mode, bool, 0644); 84module_param(turbo_mode, bool, 0644);
83MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); 85MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
84 86
85static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index, 87static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
86 u32 *data) 88 u32 *data, int in_pm)
87{ 89{
88 u32 *buf = kmalloc(4, GFP_KERNEL); 90 u32 buf;
89 int ret; 91 int ret;
92 int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
90 93
91 BUG_ON(!dev); 94 BUG_ON(!dev);
92 95
93 if (!buf) 96 if (!in_pm)
94 return -ENOMEM; 97 fn = usbnet_read_cmd;
95 98 else
96 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 99 fn = usbnet_read_cmd_nopm;
97 USB_VENDOR_REQUEST_READ_REGISTER,
98 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
99 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
100 100
101 ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
102 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
103 0, index, &buf, 4);
101 if (unlikely(ret < 0)) 104 if (unlikely(ret < 0))
102 netdev_warn(dev->net, 105 netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
103 "Failed to read reg index 0x%08x: %d", index, ret); 106 index, ret);
104 107
105 le32_to_cpus(buf); 108 le32_to_cpus(&buf);
106 *data = *buf; 109 *data = buf;
107 kfree(buf);
108 110
109 return ret; 111 return ret;
110} 112}
111 113
112static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index, 114static int __must_check __smsc75xx_write_reg(struct usbnet *dev, u32 index,
113 u32 data) 115 u32 data, int in_pm)
114{ 116{
115 u32 *buf = kmalloc(4, GFP_KERNEL); 117 u32 buf;
116 int ret; 118 int ret;
119 int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
117 120
118 BUG_ON(!dev); 121 BUG_ON(!dev);
119 122
120 if (!buf) 123 if (!in_pm)
121 return -ENOMEM; 124 fn = usbnet_write_cmd;
122 125 else
123 *buf = data; 126 fn = usbnet_write_cmd_nopm;
124 cpu_to_le32s(buf);
125 127
126 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 128 buf = data;
127 USB_VENDOR_REQUEST_WRITE_REGISTER, 129 cpu_to_le32s(&buf);
128 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
129 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
130 130
131 ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
132 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
133 0, index, &buf, 4);
131 if (unlikely(ret < 0)) 134 if (unlikely(ret < 0))
132 netdev_warn(dev->net, 135 netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
133 "Failed to write reg index 0x%08x: %d", index, ret); 136 index, ret);
134
135 kfree(buf);
136 137
137 return ret; 138 return ret;
138} 139}
139 140
140static int smsc75xx_set_feature(struct usbnet *dev, u32 feature) 141static int __must_check smsc75xx_read_reg_nopm(struct usbnet *dev, u32 index,
142 u32 *data)
141{ 143{
142 if (WARN_ON_ONCE(!dev)) 144 return __smsc75xx_read_reg(dev, index, data, 1);
143 return -EINVAL;
144
145 cpu_to_le32s(&feature);
146
147 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
148 USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
149 USB_CTRL_SET_TIMEOUT);
150} 145}
151 146
152static int smsc75xx_clear_feature(struct usbnet *dev, u32 feature) 147static int __must_check smsc75xx_write_reg_nopm(struct usbnet *dev, u32 index,
148 u32 data)
153{ 149{
154 if (WARN_ON_ONCE(!dev)) 150 return __smsc75xx_write_reg(dev, index, data, 1);
155 return -EINVAL; 151}
156 152
157 cpu_to_le32s(&feature); 153static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index,
154 u32 *data)
155{
156 return __smsc75xx_read_reg(dev, index, data, 0);
157}
158 158
159 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 159static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index,
160 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0, 160 u32 data)
161 USB_CTRL_SET_TIMEOUT); 161{
162 return __smsc75xx_write_reg(dev, index, data, 0);
162} 163}
163 164
164/* Loop until the read is completed with timeout 165/* Loop until the read is completed with timeout
165 * called with phy_mutex held */ 166 * called with phy_mutex held */
166static int smsc75xx_phy_wait_not_busy(struct usbnet *dev) 167static __must_check int __smsc75xx_phy_wait_not_busy(struct usbnet *dev,
168 int in_pm)
167{ 169{
168 unsigned long start_time = jiffies; 170 unsigned long start_time = jiffies;
169 u32 val; 171 u32 val;
170 int ret; 172 int ret;
171 173
172 do { 174 do {
173 ret = smsc75xx_read_reg(dev, MII_ACCESS, &val); 175 ret = __smsc75xx_read_reg(dev, MII_ACCESS, &val, in_pm);
174 check_warn_return(ret, "Error reading MII_ACCESS"); 176 if (ret < 0) {
177 netdev_warn(dev->net, "Error reading MII_ACCESS\n");
178 return ret;
179 }
175 180
176 if (!(val & MII_ACCESS_BUSY)) 181 if (!(val & MII_ACCESS_BUSY))
177 return 0; 182 return 0;
@@ -180,7 +185,8 @@ static int smsc75xx_phy_wait_not_busy(struct usbnet *dev)
180 return -EIO; 185 return -EIO;
181} 186}
182 187
183static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx) 188static int __smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
189 int in_pm)
184{ 190{
185 struct usbnet *dev = netdev_priv(netdev); 191 struct usbnet *dev = netdev_priv(netdev);
186 u32 val, addr; 192 u32 val, addr;
@@ -189,8 +195,11 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
189 mutex_lock(&dev->phy_mutex); 195 mutex_lock(&dev->phy_mutex);
190 196
191 /* confirm MII not busy */ 197 /* confirm MII not busy */
192 ret = smsc75xx_phy_wait_not_busy(dev); 198 ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
193 check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_read"); 199 if (ret < 0) {
200 netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_read\n");
201 goto done;
202 }
194 203
195 /* set the address, index & direction (read from PHY) */ 204 /* set the address, index & direction (read from PHY) */
196 phy_id &= dev->mii.phy_id_mask; 205 phy_id &= dev->mii.phy_id_mask;
@@ -198,14 +207,23 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
198 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) 207 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
199 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) 208 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
200 | MII_ACCESS_READ | MII_ACCESS_BUSY; 209 | MII_ACCESS_READ | MII_ACCESS_BUSY;
201 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); 210 ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm);
202 check_warn_goto_done(ret, "Error writing MII_ACCESS"); 211 if (ret < 0) {
212 netdev_warn(dev->net, "Error writing MII_ACCESS\n");
213 goto done;
214 }
203 215
204 ret = smsc75xx_phy_wait_not_busy(dev); 216 ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
205 check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx); 217 if (ret < 0) {
218 netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
219 goto done;
220 }
206 221
207 ret = smsc75xx_read_reg(dev, MII_DATA, &val); 222 ret = __smsc75xx_read_reg(dev, MII_DATA, &val, in_pm);
208 check_warn_goto_done(ret, "Error reading MII_DATA"); 223 if (ret < 0) {
224 netdev_warn(dev->net, "Error reading MII_DATA\n");
225 goto done;
226 }
209 227
210 ret = (u16)(val & 0xFFFF); 228 ret = (u16)(val & 0xFFFF);
211 229
@@ -214,8 +232,8 @@ done:
214 return ret; 232 return ret;
215} 233}
216 234
217static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, 235static void __smsc75xx_mdio_write(struct net_device *netdev, int phy_id,
218 int regval) 236 int idx, int regval, int in_pm)
219{ 237{
220 struct usbnet *dev = netdev_priv(netdev); 238 struct usbnet *dev = netdev_priv(netdev);
221 u32 val, addr; 239 u32 val, addr;
@@ -224,12 +242,18 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
224 mutex_lock(&dev->phy_mutex); 242 mutex_lock(&dev->phy_mutex);
225 243
226 /* confirm MII not busy */ 244 /* confirm MII not busy */
227 ret = smsc75xx_phy_wait_not_busy(dev); 245 ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
228 check_warn_goto_done(ret, "MII is busy in smsc75xx_mdio_write"); 246 if (ret < 0) {
247 netdev_warn(dev->net, "MII is busy in smsc75xx_mdio_write\n");
248 goto done;
249 }
229 250
230 val = regval; 251 val = regval;
231 ret = smsc75xx_write_reg(dev, MII_DATA, val); 252 ret = __smsc75xx_write_reg(dev, MII_DATA, val, in_pm);
232 check_warn_goto_done(ret, "Error writing MII_DATA"); 253 if (ret < 0) {
254 netdev_warn(dev->net, "Error writing MII_DATA\n");
255 goto done;
256 }
233 257
234 /* set the address, index & direction (write to PHY) */ 258 /* set the address, index & direction (write to PHY) */
235 phy_id &= dev->mii.phy_id_mask; 259 phy_id &= dev->mii.phy_id_mask;
@@ -237,16 +261,45 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
237 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) 261 addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR)
238 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) 262 | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR)
239 | MII_ACCESS_WRITE | MII_ACCESS_BUSY; 263 | MII_ACCESS_WRITE | MII_ACCESS_BUSY;
240 ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); 264 ret = __smsc75xx_write_reg(dev, MII_ACCESS, addr, in_pm);
241 check_warn_goto_done(ret, "Error writing MII_ACCESS"); 265 if (ret < 0) {
266 netdev_warn(dev->net, "Error writing MII_ACCESS\n");
267 goto done;
268 }
242 269
243 ret = smsc75xx_phy_wait_not_busy(dev); 270 ret = __smsc75xx_phy_wait_not_busy(dev, in_pm);
244 check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx); 271 if (ret < 0) {
272 netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
273 goto done;
274 }
245 275
246done: 276done:
247 mutex_unlock(&dev->phy_mutex); 277 mutex_unlock(&dev->phy_mutex);
248} 278}
249 279
280static int smsc75xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
281 int idx)
282{
283 return __smsc75xx_mdio_read(netdev, phy_id, idx, 1);
284}
285
286static void smsc75xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
287 int idx, int regval)
288{
289 __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 1);
290}
291
292static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
293{
294 return __smsc75xx_mdio_read(netdev, phy_id, idx, 0);
295}
296
297static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
298 int regval)
299{
300 __smsc75xx_mdio_write(netdev, phy_id, idx, regval, 0);
301}
302
250static int smsc75xx_wait_eeprom(struct usbnet *dev) 303static int smsc75xx_wait_eeprom(struct usbnet *dev)
251{ 304{
252 unsigned long start_time = jiffies; 305 unsigned long start_time = jiffies;
@@ -255,7 +308,10 @@ static int smsc75xx_wait_eeprom(struct usbnet *dev)
255 308
256 do { 309 do {
257 ret = smsc75xx_read_reg(dev, E2P_CMD, &val); 310 ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
258 check_warn_return(ret, "Error reading E2P_CMD"); 311 if (ret < 0) {
312 netdev_warn(dev->net, "Error reading E2P_CMD\n");
313 return ret;
314 }
259 315
260 if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT)) 316 if (!(val & E2P_CMD_BUSY) || (val & E2P_CMD_TIMEOUT))
261 break; 317 break;
@@ -263,7 +319,7 @@ static int smsc75xx_wait_eeprom(struct usbnet *dev)
263 } while (!time_after(jiffies, start_time + HZ)); 319 } while (!time_after(jiffies, start_time + HZ));
264 320
265 if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) { 321 if (val & (E2P_CMD_TIMEOUT | E2P_CMD_BUSY)) {
266 netdev_warn(dev->net, "EEPROM read operation timeout"); 322 netdev_warn(dev->net, "EEPROM read operation timeout\n");
267 return -EIO; 323 return -EIO;
268 } 324 }
269 325
@@ -278,7 +334,10 @@ static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev)
278 334
279 do { 335 do {
280 ret = smsc75xx_read_reg(dev, E2P_CMD, &val); 336 ret = smsc75xx_read_reg(dev, E2P_CMD, &val);
281 check_warn_return(ret, "Error reading E2P_CMD"); 337 if (ret < 0) {
338 netdev_warn(dev->net, "Error reading E2P_CMD\n");
339 return ret;
340 }
282 341
283 if (!(val & E2P_CMD_BUSY)) 342 if (!(val & E2P_CMD_BUSY))
284 return 0; 343 return 0;
@@ -286,7 +345,7 @@ static int smsc75xx_eeprom_confirm_not_busy(struct usbnet *dev)
286 udelay(40); 345 udelay(40);
287 } while (!time_after(jiffies, start_time + HZ)); 346 } while (!time_after(jiffies, start_time + HZ));
288 347
289 netdev_warn(dev->net, "EEPROM is busy"); 348 netdev_warn(dev->net, "EEPROM is busy\n");
290 return -EIO; 349 return -EIO;
291} 350}
292 351
@@ -306,14 +365,20 @@ static int smsc75xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
306 for (i = 0; i < length; i++) { 365 for (i = 0; i < length; i++) {
307 val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR); 366 val = E2P_CMD_BUSY | E2P_CMD_READ | (offset & E2P_CMD_ADDR);
308 ret = smsc75xx_write_reg(dev, E2P_CMD, val); 367 ret = smsc75xx_write_reg(dev, E2P_CMD, val);
309 check_warn_return(ret, "Error writing E2P_CMD"); 368 if (ret < 0) {
369 netdev_warn(dev->net, "Error writing E2P_CMD\n");
370 return ret;
371 }
310 372
311 ret = smsc75xx_wait_eeprom(dev); 373 ret = smsc75xx_wait_eeprom(dev);
312 if (ret < 0) 374 if (ret < 0)
313 return ret; 375 return ret;
314 376
315 ret = smsc75xx_read_reg(dev, E2P_DATA, &val); 377 ret = smsc75xx_read_reg(dev, E2P_DATA, &val);
316 check_warn_return(ret, "Error reading E2P_DATA"); 378 if (ret < 0) {
379 netdev_warn(dev->net, "Error reading E2P_DATA\n");
380 return ret;
381 }
317 382
318 data[i] = val & 0xFF; 383 data[i] = val & 0xFF;
319 offset++; 384 offset++;
@@ -338,7 +403,10 @@ static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
338 /* Issue write/erase enable command */ 403 /* Issue write/erase enable command */
339 val = E2P_CMD_BUSY | E2P_CMD_EWEN; 404 val = E2P_CMD_BUSY | E2P_CMD_EWEN;
340 ret = smsc75xx_write_reg(dev, E2P_CMD, val); 405 ret = smsc75xx_write_reg(dev, E2P_CMD, val);
341 check_warn_return(ret, "Error writing E2P_CMD"); 406 if (ret < 0) {
407 netdev_warn(dev->net, "Error writing E2P_CMD\n");
408 return ret;
409 }
342 410
343 ret = smsc75xx_wait_eeprom(dev); 411 ret = smsc75xx_wait_eeprom(dev);
344 if (ret < 0) 412 if (ret < 0)
@@ -349,12 +417,18 @@ static int smsc75xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
349 /* Fill data register */ 417 /* Fill data register */
350 val = data[i]; 418 val = data[i];
351 ret = smsc75xx_write_reg(dev, E2P_DATA, val); 419 ret = smsc75xx_write_reg(dev, E2P_DATA, val);
352 check_warn_return(ret, "Error writing E2P_DATA"); 420 if (ret < 0) {
421 netdev_warn(dev->net, "Error writing E2P_DATA\n");
422 return ret;
423 }
353 424
354 /* Send "write" command */ 425 /* Send "write" command */
355 val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR); 426 val = E2P_CMD_BUSY | E2P_CMD_WRITE | (offset & E2P_CMD_ADDR);
356 ret = smsc75xx_write_reg(dev, E2P_CMD, val); 427 ret = smsc75xx_write_reg(dev, E2P_CMD, val);
357 check_warn_return(ret, "Error writing E2P_CMD"); 428 if (ret < 0) {
429 netdev_warn(dev->net, "Error writing E2P_CMD\n");
430 return ret;
431 }
358 432
359 ret = smsc75xx_wait_eeprom(dev); 433 ret = smsc75xx_wait_eeprom(dev);
360 if (ret < 0) 434 if (ret < 0)
@@ -373,7 +447,10 @@ static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev)
373 for (i = 0; i < 100; i++) { 447 for (i = 0; i < 100; i++) {
374 u32 dp_sel; 448 u32 dp_sel;
375 ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); 449 ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
376 check_warn_return(ret, "Error reading DP_SEL"); 450 if (ret < 0) {
451 netdev_warn(dev->net, "Error reading DP_SEL\n");
452 return ret;
453 }
377 454
378 if (dp_sel & DP_SEL_DPRDY) 455 if (dp_sel & DP_SEL_DPRDY)
379 return 0; 456 return 0;
@@ -381,7 +458,7 @@ static int smsc75xx_dataport_wait_not_busy(struct usbnet *dev)
381 udelay(40); 458 udelay(40);
382 } 459 }
383 460
384 netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out"); 461 netdev_warn(dev->net, "smsc75xx_dataport_wait_not_busy timed out\n");
385 462
386 return -EIO; 463 return -EIO;
387} 464}
@@ -396,28 +473,49 @@ static int smsc75xx_dataport_write(struct usbnet *dev, u32 ram_select, u32 addr,
396 mutex_lock(&pdata->dataport_mutex); 473 mutex_lock(&pdata->dataport_mutex);
397 474
398 ret = smsc75xx_dataport_wait_not_busy(dev); 475 ret = smsc75xx_dataport_wait_not_busy(dev);
399 check_warn_goto_done(ret, "smsc75xx_dataport_write busy on entry"); 476 if (ret < 0) {
477 netdev_warn(dev->net, "smsc75xx_dataport_write busy on entry\n");
478 goto done;
479 }
400 480
401 ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel); 481 ret = smsc75xx_read_reg(dev, DP_SEL, &dp_sel);
402 check_warn_goto_done(ret, "Error reading DP_SEL"); 482 if (ret < 0) {
483 netdev_warn(dev->net, "Error reading DP_SEL\n");
484 goto done;
485 }
403 486
404 dp_sel &= ~DP_SEL_RSEL; 487 dp_sel &= ~DP_SEL_RSEL;
405 dp_sel |= ram_select; 488 dp_sel |= ram_select;
406 ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel); 489 ret = smsc75xx_write_reg(dev, DP_SEL, dp_sel);
407 check_warn_goto_done(ret, "Error writing DP_SEL"); 490 if (ret < 0) {
491 netdev_warn(dev->net, "Error writing DP_SEL\n");
492 goto done;
493 }
408 494
409 for (i = 0; i < length; i++) { 495 for (i = 0; i < length; i++) {
410 ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i); 496 ret = smsc75xx_write_reg(dev, DP_ADDR, addr + i);
411 check_warn_goto_done(ret, "Error writing DP_ADDR"); 497 if (ret < 0) {
498 netdev_warn(dev->net, "Error writing DP_ADDR\n");
499 goto done;
500 }
412 501
413 ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]); 502 ret = smsc75xx_write_reg(dev, DP_DATA, buf[i]);
414 check_warn_goto_done(ret, "Error writing DP_DATA"); 503 if (ret < 0) {
504 netdev_warn(dev->net, "Error writing DP_DATA\n");
505 goto done;
506 }
415 507
416 ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE); 508 ret = smsc75xx_write_reg(dev, DP_CMD, DP_CMD_WRITE);
417 check_warn_goto_done(ret, "Error writing DP_CMD"); 509 if (ret < 0) {
510 netdev_warn(dev->net, "Error writing DP_CMD\n");
511 goto done;
512 }
418 513
419 ret = smsc75xx_dataport_wait_not_busy(dev); 514 ret = smsc75xx_dataport_wait_not_busy(dev);
420 check_warn_goto_done(ret, "smsc75xx_dataport_write timeout"); 515 if (ret < 0) {
516 netdev_warn(dev->net, "smsc75xx_dataport_write timeout\n");
517 goto done;
518 }
421 } 519 }
422 520
423done: 521done:
@@ -438,14 +536,15 @@ static void smsc75xx_deferred_multicast_write(struct work_struct *param)
438 struct usbnet *dev = pdata->dev; 536 struct usbnet *dev = pdata->dev;
439 int ret; 537 int ret;
440 538
441 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x", 539 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
442 pdata->rfe_ctl); 540 pdata->rfe_ctl);
443 541
444 smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN, 542 smsc75xx_dataport_write(dev, DP_SEL_VHF, DP_SEL_VHF_VLAN_LEN,
445 DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table); 543 DP_SEL_VHF_HASH_LEN, pdata->multicast_hash_table);
446 544
447 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 545 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
448 check_warn(ret, "Error writing RFE_CRL"); 546 if (ret < 0)
547 netdev_warn(dev->net, "Error writing RFE_CRL\n");
449} 548}
450 549
451static void smsc75xx_set_multicast(struct net_device *netdev) 550static void smsc75xx_set_multicast(struct net_device *netdev)
@@ -465,15 +564,15 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
465 pdata->multicast_hash_table[i] = 0; 564 pdata->multicast_hash_table[i] = 0;
466 565
467 if (dev->net->flags & IFF_PROMISC) { 566 if (dev->net->flags & IFF_PROMISC) {
468 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled"); 567 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n");
469 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU; 568 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_AU;
470 } else if (dev->net->flags & IFF_ALLMULTI) { 569 } else if (dev->net->flags & IFF_ALLMULTI) {
471 netif_dbg(dev, drv, dev->net, "receive all multicast enabled"); 570 netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n");
472 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; 571 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
473 } else if (!netdev_mc_empty(dev->net)) { 572 } else if (!netdev_mc_empty(dev->net)) {
474 struct netdev_hw_addr *ha; 573 struct netdev_hw_addr *ha;
475 574
476 netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); 575 netif_dbg(dev, drv, dev->net, "receive multicast hash filter\n");
477 576
478 pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; 577 pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
479 578
@@ -483,7 +582,7 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
483 (1 << (bitnum % 32)); 582 (1 << (bitnum % 32));
484 } 583 }
485 } else { 584 } else {
486 netif_dbg(dev, drv, dev->net, "receive own packets only"); 585 netif_dbg(dev, drv, dev->net, "receive own packets only\n");
487 pdata->rfe_ctl |= RFE_CTL_DPF; 586 pdata->rfe_ctl |= RFE_CTL_DPF;
488 } 587 }
489 588
@@ -511,18 +610,24 @@ static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex,
511 if (cap & FLOW_CTRL_RX) 610 if (cap & FLOW_CTRL_RX)
512 flow |= FLOW_RX_FCEN; 611 flow |= FLOW_RX_FCEN;
513 612
514 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", 613 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
515 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), 614 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
516 (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); 615 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
517 } else { 616 } else {
518 netif_dbg(dev, link, dev->net, "half duplex"); 617 netif_dbg(dev, link, dev->net, "half duplex\n");
519 } 618 }
520 619
521 ret = smsc75xx_write_reg(dev, FLOW, flow); 620 ret = smsc75xx_write_reg(dev, FLOW, flow);
522 check_warn_return(ret, "Error writing FLOW"); 621 if (ret < 0) {
622 netdev_warn(dev->net, "Error writing FLOW\n");
623 return ret;
624 }
523 625
524 ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow); 626 ret = smsc75xx_write_reg(dev, FCT_FLOW, fct_flow);
525 check_warn_return(ret, "Error writing FCT_FLOW"); 627 if (ret < 0) {
628 netdev_warn(dev->net, "Error writing FCT_FLOW\n");
629 return ret;
630 }
526 631
527 return 0; 632 return 0;
528} 633}
@@ -539,16 +644,18 @@ static int smsc75xx_link_reset(struct usbnet *dev)
539 PHY_INT_SRC_CLEAR_ALL); 644 PHY_INT_SRC_CLEAR_ALL);
540 645
541 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); 646 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
542 check_warn_return(ret, "Error writing INT_STS"); 647 if (ret < 0) {
648 netdev_warn(dev->net, "Error writing INT_STS\n");
649 return ret;
650 }
543 651
544 mii_check_media(mii, 1, 1); 652 mii_check_media(mii, 1, 1);
545 mii_ethtool_gset(&dev->mii, &ecmd); 653 mii_ethtool_gset(&dev->mii, &ecmd);
546 lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE); 654 lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
547 rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA); 655 rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
548 656
549 netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x" 657 netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
550 " rmtadv: %04x", ethtool_cmd_speed(&ecmd), 658 ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
551 ecmd.duplex, lcladv, rmtadv);
552 659
553 return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); 660 return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
554} 661}
@@ -558,21 +665,21 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
558 u32 intdata; 665 u32 intdata;
559 666
560 if (urb->actual_length != 4) { 667 if (urb->actual_length != 4) {
561 netdev_warn(dev->net, 668 netdev_warn(dev->net, "unexpected urb length %d\n",
562 "unexpected urb length %d", urb->actual_length); 669 urb->actual_length);
563 return; 670 return;
564 } 671 }
565 672
566 memcpy(&intdata, urb->transfer_buffer, 4); 673 memcpy(&intdata, urb->transfer_buffer, 4);
567 le32_to_cpus(&intdata); 674 le32_to_cpus(&intdata);
568 675
569 netif_dbg(dev, link, dev->net, "intdata: 0x%08X", intdata); 676 netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
570 677
571 if (intdata & INT_ENP_PHY_INT) 678 if (intdata & INT_ENP_PHY_INT)
572 usbnet_defer_kevent(dev, EVENT_LINK_RESET); 679 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
573 else 680 else
574 netdev_warn(dev->net, 681 netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
575 "unexpected interrupt, intdata=0x%08X", intdata); 682 intdata);
576} 683}
577 684
578static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net) 685static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
@@ -596,8 +703,8 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
596 struct usbnet *dev = netdev_priv(netdev); 703 struct usbnet *dev = netdev_priv(netdev);
597 704
598 if (ee->magic != LAN75XX_EEPROM_MAGIC) { 705 if (ee->magic != LAN75XX_EEPROM_MAGIC) {
599 netdev_warn(dev->net, 706 netdev_warn(dev->net, "EEPROM: magic value mismatch: 0x%x\n",
600 "EEPROM: magic value mismatch: 0x%x", ee->magic); 707 ee->magic);
601 return -EINVAL; 708 return -EINVAL;
602 } 709 }
603 710
@@ -619,9 +726,15 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
619{ 726{
620 struct usbnet *dev = netdev_priv(net); 727 struct usbnet *dev = netdev_priv(net);
621 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 728 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
729 int ret;
622 730
623 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; 731 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
624 return 0; 732
733 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
734 if (ret < 0)
735 netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret);
736
737 return ret;
625} 738}
626 739
627static const struct ethtool_ops smsc75xx_ethtool_ops = { 740static const struct ethtool_ops smsc75xx_ethtool_ops = {
@@ -657,14 +770,14 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
657 if (is_valid_ether_addr(dev->net->dev_addr)) { 770 if (is_valid_ether_addr(dev->net->dev_addr)) {
658 /* eeprom values are valid so use them */ 771 /* eeprom values are valid so use them */
659 netif_dbg(dev, ifup, dev->net, 772 netif_dbg(dev, ifup, dev->net,
660 "MAC address read from EEPROM"); 773 "MAC address read from EEPROM\n");
661 return; 774 return;
662 } 775 }
663 } 776 }
664 777
665 /* no eeprom, or eeprom values are invalid. generate random MAC */ 778 /* no eeprom, or eeprom values are invalid. generate random MAC */
666 eth_hw_addr_random(dev->net); 779 eth_hw_addr_random(dev->net);
667 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr"); 780 netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
668} 781}
669 782
670static int smsc75xx_set_mac_address(struct usbnet *dev) 783static int smsc75xx_set_mac_address(struct usbnet *dev)
@@ -674,19 +787,29 @@ static int smsc75xx_set_mac_address(struct usbnet *dev)
674 u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8; 787 u32 addr_hi = dev->net->dev_addr[4] | dev->net->dev_addr[5] << 8;
675 788
676 int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi); 789 int ret = smsc75xx_write_reg(dev, RX_ADDRH, addr_hi);
677 check_warn_return(ret, "Failed to write RX_ADDRH: %d", ret); 790 if (ret < 0) {
791 netdev_warn(dev->net, "Failed to write RX_ADDRH: %d\n", ret);
792 return ret;
793 }
678 794
679 ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo); 795 ret = smsc75xx_write_reg(dev, RX_ADDRL, addr_lo);
680 check_warn_return(ret, "Failed to write RX_ADDRL: %d", ret); 796 if (ret < 0) {
797 netdev_warn(dev->net, "Failed to write RX_ADDRL: %d\n", ret);
798 return ret;
799 }
681 800
682 addr_hi |= ADDR_FILTX_FB_VALID; 801 addr_hi |= ADDR_FILTX_FB_VALID;
683 ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi); 802 ret = smsc75xx_write_reg(dev, ADDR_FILTX, addr_hi);
684 check_warn_return(ret, "Failed to write ADDR_FILTX: %d", ret); 803 if (ret < 0) {
804 netdev_warn(dev->net, "Failed to write ADDR_FILTX: %d\n", ret);
805 return ret;
806 }
685 807
686 ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo); 808 ret = smsc75xx_write_reg(dev, ADDR_FILTX + 4, addr_lo);
687 check_warn_return(ret, "Failed to write ADDR_FILTX+4: %d", ret); 809 if (ret < 0)
810 netdev_warn(dev->net, "Failed to write ADDR_FILTX+4: %d\n", ret);
688 811
689 return 0; 812 return ret;
690} 813}
691 814
692static int smsc75xx_phy_initialize(struct usbnet *dev) 815static int smsc75xx_phy_initialize(struct usbnet *dev)
@@ -708,12 +831,15 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
708 do { 831 do {
709 msleep(10); 832 msleep(10);
710 bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); 833 bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
711 check_warn_return(bmcr, "Error reading MII_BMCR"); 834 if (bmcr < 0) {
835 netdev_warn(dev->net, "Error reading MII_BMCR\n");
836 return bmcr;
837 }
712 timeout++; 838 timeout++;
713 } while ((bmcr & BMCR_RESET) && (timeout < 100)); 839 } while ((bmcr & BMCR_RESET) && (timeout < 100));
714 840
715 if (timeout >= 100) { 841 if (timeout >= 100) {
716 netdev_warn(dev->net, "timeout on PHY Reset"); 842 netdev_warn(dev->net, "timeout on PHY Reset\n");
717 return -EIO; 843 return -EIO;
718 } 844 }
719 845
@@ -725,14 +851,18 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
725 851
726 /* read and write to clear phy interrupt status */ 852 /* read and write to clear phy interrupt status */
727 ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); 853 ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
728 check_warn_return(ret, "Error reading PHY_INT_SRC"); 854 if (ret < 0) {
855 netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
856 return ret;
857 }
858
729 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff); 859 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff);
730 860
731 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, 861 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
732 PHY_INT_MASK_DEFAULT); 862 PHY_INT_MASK_DEFAULT);
733 mii_nway_restart(&dev->mii); 863 mii_nway_restart(&dev->mii);
734 864
735 netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); 865 netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
736 return 0; 866 return 0;
737} 867}
738 868
@@ -743,14 +873,20 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
743 bool rxenabled; 873 bool rxenabled;
744 874
745 ret = smsc75xx_read_reg(dev, MAC_RX, &buf); 875 ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
746 check_warn_return(ret, "Failed to read MAC_RX: %d", ret); 876 if (ret < 0) {
877 netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret);
878 return ret;
879 }
747 880
748 rxenabled = ((buf & MAC_RX_RXEN) != 0); 881 rxenabled = ((buf & MAC_RX_RXEN) != 0);
749 882
750 if (rxenabled) { 883 if (rxenabled) {
751 buf &= ~MAC_RX_RXEN; 884 buf &= ~MAC_RX_RXEN;
752 ret = smsc75xx_write_reg(dev, MAC_RX, buf); 885 ret = smsc75xx_write_reg(dev, MAC_RX, buf);
753 check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 886 if (ret < 0) {
887 netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
888 return ret;
889 }
754 } 890 }
755 891
756 /* add 4 to size for FCS */ 892 /* add 4 to size for FCS */
@@ -758,12 +894,18 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
758 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE); 894 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT) & MAC_RX_MAX_SIZE);
759 895
760 ret = smsc75xx_write_reg(dev, MAC_RX, buf); 896 ret = smsc75xx_write_reg(dev, MAC_RX, buf);
761 check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 897 if (ret < 0) {
898 netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
899 return ret;
900 }
762 901
763 if (rxenabled) { 902 if (rxenabled) {
764 buf |= MAC_RX_RXEN; 903 buf |= MAC_RX_RXEN;
765 ret = smsc75xx_write_reg(dev, MAC_RX, buf); 904 ret = smsc75xx_write_reg(dev, MAC_RX, buf);
766 check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 905 if (ret < 0) {
906 netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
907 return ret;
908 }
767 } 909 }
768 910
769 return 0; 911 return 0;
@@ -774,7 +916,10 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
774 struct usbnet *dev = netdev_priv(netdev); 916 struct usbnet *dev = netdev_priv(netdev);
775 917
776 int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); 918 int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
777 check_warn_return(ret, "Failed to set mac rx frame length"); 919 if (ret < 0) {
920 netdev_warn(dev->net, "Failed to set mac rx frame length\n");
921 return ret;
922 }
778 923
779 return usbnet_change_mtu(netdev, new_mtu); 924 return usbnet_change_mtu(netdev, new_mtu);
780} 925}
@@ -799,19 +944,26 @@ static int smsc75xx_set_features(struct net_device *netdev,
799 /* it's racing here! */ 944 /* it's racing here! */
800 945
801 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 946 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
802 check_warn_return(ret, "Error writing RFE_CTL"); 947 if (ret < 0)
948 netdev_warn(dev->net, "Error writing RFE_CTL\n");
803 949
804 return 0; 950 return ret;
805} 951}
806 952
807static int smsc75xx_wait_ready(struct usbnet *dev) 953static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
808{ 954{
809 int timeout = 0; 955 int timeout = 0;
810 956
811 do { 957 do {
812 u32 buf; 958 u32 buf;
813 int ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); 959 int ret;
814 check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); 960
961 ret = __smsc75xx_read_reg(dev, PMT_CTL, &buf, in_pm);
962
963 if (ret < 0) {
964 netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
965 return ret;
966 }
815 967
816 if (buf & PMT_CTL_DEV_RDY) 968 if (buf & PMT_CTL_DEV_RDY)
817 return 0; 969 return 0;
@@ -820,7 +972,7 @@ static int smsc75xx_wait_ready(struct usbnet *dev)
820 timeout++; 972 timeout++;
821 } while (timeout < 100); 973 } while (timeout < 100);
822 974
823 netdev_warn(dev->net, "timeout waiting for device ready"); 975 netdev_warn(dev->net, "timeout waiting for device ready\n");
824 return -EIO; 976 return -EIO;
825} 977}
826 978
@@ -830,79 +982,112 @@ static int smsc75xx_reset(struct usbnet *dev)
830 u32 buf; 982 u32 buf;
831 int ret = 0, timeout; 983 int ret = 0, timeout;
832 984
833 netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset"); 985 netif_dbg(dev, ifup, dev->net, "entering smsc75xx_reset\n");
834 986
835 ret = smsc75xx_wait_ready(dev); 987 ret = smsc75xx_wait_ready(dev, 0);
836 check_warn_return(ret, "device not ready in smsc75xx_reset"); 988 if (ret < 0) {
989 netdev_warn(dev->net, "device not ready in smsc75xx_reset\n");
990 return ret;
991 }
837 992
838 ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 993 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
839 check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 994 if (ret < 0) {
995 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
996 return ret;
997 }
840 998
841 buf |= HW_CFG_LRST; 999 buf |= HW_CFG_LRST;
842 1000
843 ret = smsc75xx_write_reg(dev, HW_CFG, buf); 1001 ret = smsc75xx_write_reg(dev, HW_CFG, buf);
844 check_warn_return(ret, "Failed to write HW_CFG: %d", ret); 1002 if (ret < 0) {
1003 netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
1004 return ret;
1005 }
845 1006
846 timeout = 0; 1007 timeout = 0;
847 do { 1008 do {
848 msleep(10); 1009 msleep(10);
849 ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 1010 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
850 check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 1011 if (ret < 0) {
1012 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
1013 return ret;
1014 }
851 timeout++; 1015 timeout++;
852 } while ((buf & HW_CFG_LRST) && (timeout < 100)); 1016 } while ((buf & HW_CFG_LRST) && (timeout < 100));
853 1017
854 if (timeout >= 100) { 1018 if (timeout >= 100) {
855 netdev_warn(dev->net, "timeout on completion of Lite Reset"); 1019 netdev_warn(dev->net, "timeout on completion of Lite Reset\n");
856 return -EIO; 1020 return -EIO;
857 } 1021 }
858 1022
859 netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY"); 1023 netif_dbg(dev, ifup, dev->net, "Lite reset complete, resetting PHY\n");
860 1024
861 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); 1025 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
862 check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); 1026 if (ret < 0) {
1027 netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
1028 return ret;
1029 }
863 1030
864 buf |= PMT_CTL_PHY_RST; 1031 buf |= PMT_CTL_PHY_RST;
865 1032
866 ret = smsc75xx_write_reg(dev, PMT_CTL, buf); 1033 ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
867 check_warn_return(ret, "Failed to write PMT_CTL: %d", ret); 1034 if (ret < 0) {
1035 netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
1036 return ret;
1037 }
868 1038
869 timeout = 0; 1039 timeout = 0;
870 do { 1040 do {
871 msleep(10); 1041 msleep(10);
872 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf); 1042 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
873 check_warn_return(ret, "Failed to read PMT_CTL: %d", ret); 1043 if (ret < 0) {
1044 netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
1045 return ret;
1046 }
874 timeout++; 1047 timeout++;
875 } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100)); 1048 } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
876 1049
877 if (timeout >= 100) { 1050 if (timeout >= 100) {
878 netdev_warn(dev->net, "timeout waiting for PHY Reset"); 1051 netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
879 return -EIO; 1052 return -EIO;
880 } 1053 }
881 1054
882 netif_dbg(dev, ifup, dev->net, "PHY reset complete"); 1055 netif_dbg(dev, ifup, dev->net, "PHY reset complete\n");
883
884 smsc75xx_init_mac_address(dev);
885 1056
886 ret = smsc75xx_set_mac_address(dev); 1057 ret = smsc75xx_set_mac_address(dev);
887 check_warn_return(ret, "Failed to set mac address"); 1058 if (ret < 0) {
1059 netdev_warn(dev->net, "Failed to set mac address\n");
1060 return ret;
1061 }
888 1062
889 netif_dbg(dev, ifup, dev->net, "MAC Address: %pM", dev->net->dev_addr); 1063 netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n",
1064 dev->net->dev_addr);
890 1065
891 ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 1066 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
892 check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 1067 if (ret < 0) {
1068 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
1069 return ret;
1070 }
893 1071
894 netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x", buf); 1072 netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n",
1073 buf);
895 1074
896 buf |= HW_CFG_BIR; 1075 buf |= HW_CFG_BIR;
897 1076
898 ret = smsc75xx_write_reg(dev, HW_CFG, buf); 1077 ret = smsc75xx_write_reg(dev, HW_CFG, buf);
899 check_warn_return(ret, "Failed to write HW_CFG: %d", ret); 1078 if (ret < 0) {
1079 netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
1080 return ret;
1081 }
900 1082
901 ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 1083 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
902 check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 1084 if (ret < 0) {
1085 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
1086 return ret;
1087 }
903 1088
904 netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after " 1089 netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG after writing HW_CFG_BIR: 0x%08x\n",
905 "writing HW_CFG_BIR: 0x%08x", buf); 1090 buf);
906 1091
907 if (!turbo_mode) { 1092 if (!turbo_mode) {
908 buf = 0; 1093 buf = 0;
@@ -915,99 +1100,157 @@ static int smsc75xx_reset(struct usbnet *dev)
915 dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; 1100 dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
916 } 1101 }
917 1102
918 netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld", 1103 netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n",
919 (ulong)dev->rx_urb_size); 1104 (ulong)dev->rx_urb_size);
920 1105
921 ret = smsc75xx_write_reg(dev, BURST_CAP, buf); 1106 ret = smsc75xx_write_reg(dev, BURST_CAP, buf);
922 check_warn_return(ret, "Failed to write BURST_CAP: %d", ret); 1107 if (ret < 0) {
1108 netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
1109 return ret;
1110 }
923 1111
924 ret = smsc75xx_read_reg(dev, BURST_CAP, &buf); 1112 ret = smsc75xx_read_reg(dev, BURST_CAP, &buf);
925 check_warn_return(ret, "Failed to read BURST_CAP: %d", ret); 1113 if (ret < 0) {
1114 netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
1115 return ret;
1116 }
926 1117
927 netif_dbg(dev, ifup, dev->net, 1118 netif_dbg(dev, ifup, dev->net,
928 "Read Value from BURST_CAP after writing: 0x%08x", buf); 1119 "Read Value from BURST_CAP after writing: 0x%08x\n", buf);
929 1120
930 ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); 1121 ret = smsc75xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
931 check_warn_return(ret, "Failed to write BULK_IN_DLY: %d", ret); 1122 if (ret < 0) {
1123 netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret);
1124 return ret;
1125 }
932 1126
933 ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf); 1127 ret = smsc75xx_read_reg(dev, BULK_IN_DLY, &buf);
934 check_warn_return(ret, "Failed to read BULK_IN_DLY: %d", ret); 1128 if (ret < 0) {
1129 netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
1130 return ret;
1131 }
935 1132
936 netif_dbg(dev, ifup, dev->net, 1133 netif_dbg(dev, ifup, dev->net,
937 "Read Value from BULK_IN_DLY after writing: 0x%08x", buf); 1134 "Read Value from BULK_IN_DLY after writing: 0x%08x\n", buf);
938 1135
939 if (turbo_mode) { 1136 if (turbo_mode) {
940 ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 1137 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
941 check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 1138 if (ret < 0) {
1139 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
1140 return ret;
1141 }
942 1142
943 netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf); 1143 netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf);
944 1144
945 buf |= (HW_CFG_MEF | HW_CFG_BCE); 1145 buf |= (HW_CFG_MEF | HW_CFG_BCE);
946 1146
947 ret = smsc75xx_write_reg(dev, HW_CFG, buf); 1147 ret = smsc75xx_write_reg(dev, HW_CFG, buf);
948 check_warn_return(ret, "Failed to write HW_CFG: %d", ret); 1148 if (ret < 0) {
1149 netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
1150 return ret;
1151 }
949 1152
950 ret = smsc75xx_read_reg(dev, HW_CFG, &buf); 1153 ret = smsc75xx_read_reg(dev, HW_CFG, &buf);
951 check_warn_return(ret, "Failed to read HW_CFG: %d", ret); 1154 if (ret < 0) {
1155 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
1156 return ret;
1157 }
952 1158
953 netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x", buf); 1159 netif_dbg(dev, ifup, dev->net, "HW_CFG: 0x%08x\n", buf);
954 } 1160 }
955 1161
956 /* set FIFO sizes */ 1162 /* set FIFO sizes */
957 buf = (MAX_RX_FIFO_SIZE - 512) / 512; 1163 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
958 ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf); 1164 ret = smsc75xx_write_reg(dev, FCT_RX_FIFO_END, buf);
959 check_warn_return(ret, "Failed to write FCT_RX_FIFO_END: %d", ret); 1165 if (ret < 0) {
1166 netdev_warn(dev->net, "Failed to write FCT_RX_FIFO_END: %d\n", ret);
1167 return ret;
1168 }
960 1169
961 netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x", buf); 1170 netif_dbg(dev, ifup, dev->net, "FCT_RX_FIFO_END set to 0x%08x\n", buf);
962 1171
963 buf = (MAX_TX_FIFO_SIZE - 512) / 512; 1172 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
964 ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf); 1173 ret = smsc75xx_write_reg(dev, FCT_TX_FIFO_END, buf);
965 check_warn_return(ret, "Failed to write FCT_TX_FIFO_END: %d", ret); 1174 if (ret < 0) {
1175 netdev_warn(dev->net, "Failed to write FCT_TX_FIFO_END: %d\n", ret);
1176 return ret;
1177 }
966 1178
967 netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x", buf); 1179 netif_dbg(dev, ifup, dev->net, "FCT_TX_FIFO_END set to 0x%08x\n", buf);
968 1180
969 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); 1181 ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL);
970 check_warn_return(ret, "Failed to write INT_STS: %d", ret); 1182 if (ret < 0) {
1183 netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret);
1184 return ret;
1185 }
971 1186
972 ret = smsc75xx_read_reg(dev, ID_REV, &buf); 1187 ret = smsc75xx_read_reg(dev, ID_REV, &buf);
973 check_warn_return(ret, "Failed to read ID_REV: %d", ret); 1188 if (ret < 0) {
1189 netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
1190 return ret;
1191 }
974 1192
975 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x", buf); 1193 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", buf);
976 1194
977 ret = smsc75xx_read_reg(dev, E2P_CMD, &buf); 1195 ret = smsc75xx_read_reg(dev, E2P_CMD, &buf);
978 check_warn_return(ret, "Failed to read E2P_CMD: %d", ret); 1196 if (ret < 0) {
1197 netdev_warn(dev->net, "Failed to read E2P_CMD: %d\n", ret);
1198 return ret;
1199 }
979 1200
980 /* only set default GPIO/LED settings if no EEPROM is detected */ 1201 /* only set default GPIO/LED settings if no EEPROM is detected */
981 if (!(buf & E2P_CMD_LOADED)) { 1202 if (!(buf & E2P_CMD_LOADED)) {
982 ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf); 1203 ret = smsc75xx_read_reg(dev, LED_GPIO_CFG, &buf);
983 check_warn_return(ret, "Failed to read LED_GPIO_CFG: %d", ret); 1204 if (ret < 0) {
1205 netdev_warn(dev->net, "Failed to read LED_GPIO_CFG: %d\n", ret);
1206 return ret;
1207 }
984 1208
985 buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL); 1209 buf &= ~(LED_GPIO_CFG_LED2_FUN_SEL | LED_GPIO_CFG_LED10_FUN_SEL);
986 buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL; 1210 buf |= LED_GPIO_CFG_LEDGPIO_EN | LED_GPIO_CFG_LED2_FUN_SEL;
987 1211
988 ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf); 1212 ret = smsc75xx_write_reg(dev, LED_GPIO_CFG, buf);
989 check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d", ret); 1213 if (ret < 0) {
1214 netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret);
1215 return ret;
1216 }
990 } 1217 }
991 1218
992 ret = smsc75xx_write_reg(dev, FLOW, 0); 1219 ret = smsc75xx_write_reg(dev, FLOW, 0);
993 check_warn_return(ret, "Failed to write FLOW: %d", ret); 1220 if (ret < 0) {
1221 netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
1222 return ret;
1223 }
994 1224
995 ret = smsc75xx_write_reg(dev, FCT_FLOW, 0); 1225 ret = smsc75xx_write_reg(dev, FCT_FLOW, 0);
996 check_warn_return(ret, "Failed to write FCT_FLOW: %d", ret); 1226 if (ret < 0) {
1227 netdev_warn(dev->net, "Failed to write FCT_FLOW: %d\n", ret);
1228 return ret;
1229 }
997 1230
998 /* Don't need rfe_ctl_lock during initialisation */ 1231 /* Don't need rfe_ctl_lock during initialisation */
999 ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); 1232 ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1000 check_warn_return(ret, "Failed to read RFE_CTL: %d", ret); 1233 if (ret < 0) {
1234 netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret);
1235 return ret;
1236 }
1001 1237
1002 pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF; 1238 pdata->rfe_ctl |= RFE_CTL_AB | RFE_CTL_DPF;
1003 1239
1004 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); 1240 ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1005 check_warn_return(ret, "Failed to write RFE_CTL: %d", ret); 1241 if (ret < 0) {
1242 netdev_warn(dev->net, "Failed to write RFE_CTL: %d\n", ret);
1243 return ret;
1244 }
1006 1245
1007 ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); 1246 ret = smsc75xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
1008 check_warn_return(ret, "Failed to read RFE_CTL: %d", ret); 1247 if (ret < 0) {
1248 netdev_warn(dev->net, "Failed to read RFE_CTL: %d\n", ret);
1249 return ret;
1250 }
1009 1251
1010 netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl); 1252 netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x\n",
1253 pdata->rfe_ctl);
1011 1254
1012 /* Enable or disable checksum offload engines */ 1255 /* Enable or disable checksum offload engines */
1013 smsc75xx_set_features(dev->net, dev->net->features); 1256 smsc75xx_set_features(dev->net, dev->net->features);
@@ -1015,69 +1258,111 @@ static int smsc75xx_reset(struct usbnet *dev)
1015 smsc75xx_set_multicast(dev->net); 1258 smsc75xx_set_multicast(dev->net);
1016 1259
1017 ret = smsc75xx_phy_initialize(dev); 1260 ret = smsc75xx_phy_initialize(dev);
1018 check_warn_return(ret, "Failed to initialize PHY: %d", ret); 1261 if (ret < 0) {
1262 netdev_warn(dev->net, "Failed to initialize PHY: %d\n", ret);
1263 return ret;
1264 }
1019 1265
1020 ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf); 1266 ret = smsc75xx_read_reg(dev, INT_EP_CTL, &buf);
1021 check_warn_return(ret, "Failed to read INT_EP_CTL: %d", ret); 1267 if (ret < 0) {
1268 netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
1269 return ret;
1270 }
1022 1271
1023 /* enable PHY interrupts */ 1272 /* enable PHY interrupts */
1024 buf |= INT_ENP_PHY_INT; 1273 buf |= INT_ENP_PHY_INT;
1025 1274
1026 ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); 1275 ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf);
1027 check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret); 1276 if (ret < 0) {
1277 netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
1278 return ret;
1279 }
1028 1280
1029 /* allow mac to detect speed and duplex from phy */ 1281 /* allow mac to detect speed and duplex from phy */
1030 ret = smsc75xx_read_reg(dev, MAC_CR, &buf); 1282 ret = smsc75xx_read_reg(dev, MAC_CR, &buf);
1031 check_warn_return(ret, "Failed to read MAC_CR: %d", ret); 1283 if (ret < 0) {
1284 netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
1285 return ret;
1286 }
1032 1287
1033 buf |= (MAC_CR_ADD | MAC_CR_ASD); 1288 buf |= (MAC_CR_ADD | MAC_CR_ASD);
1034 ret = smsc75xx_write_reg(dev, MAC_CR, buf); 1289 ret = smsc75xx_write_reg(dev, MAC_CR, buf);
1035 check_warn_return(ret, "Failed to write MAC_CR: %d", ret); 1290 if (ret < 0) {
1291 netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
1292 return ret;
1293 }
1036 1294
1037 ret = smsc75xx_read_reg(dev, MAC_TX, &buf); 1295 ret = smsc75xx_read_reg(dev, MAC_TX, &buf);
1038 check_warn_return(ret, "Failed to read MAC_TX: %d", ret); 1296 if (ret < 0) {
1297 netdev_warn(dev->net, "Failed to read MAC_TX: %d\n", ret);
1298 return ret;
1299 }
1039 1300
1040 buf |= MAC_TX_TXEN; 1301 buf |= MAC_TX_TXEN;
1041 1302
1042 ret = smsc75xx_write_reg(dev, MAC_TX, buf); 1303 ret = smsc75xx_write_reg(dev, MAC_TX, buf);
1043 check_warn_return(ret, "Failed to write MAC_TX: %d", ret); 1304 if (ret < 0) {
1305 netdev_warn(dev->net, "Failed to write MAC_TX: %d\n", ret);
1306 return ret;
1307 }
1044 1308
1045 netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x", buf); 1309 netif_dbg(dev, ifup, dev->net, "MAC_TX set to 0x%08x\n", buf);
1046 1310
1047 ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf); 1311 ret = smsc75xx_read_reg(dev, FCT_TX_CTL, &buf);
1048 check_warn_return(ret, "Failed to read FCT_TX_CTL: %d", ret); 1312 if (ret < 0) {
1313 netdev_warn(dev->net, "Failed to read FCT_TX_CTL: %d\n", ret);
1314 return ret;
1315 }
1049 1316
1050 buf |= FCT_TX_CTL_EN; 1317 buf |= FCT_TX_CTL_EN;
1051 1318
1052 ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf); 1319 ret = smsc75xx_write_reg(dev, FCT_TX_CTL, buf);
1053 check_warn_return(ret, "Failed to write FCT_TX_CTL: %d", ret); 1320 if (ret < 0) {
1321 netdev_warn(dev->net, "Failed to write FCT_TX_CTL: %d\n", ret);
1322 return ret;
1323 }
1054 1324
1055 netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x", buf); 1325 netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
1056 1326
1057 ret = smsc75xx_set_rx_max_frame_length(dev, 1514); 1327 ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
1058 check_warn_return(ret, "Failed to set max rx frame length"); 1328 if (ret < 0) {
1329 netdev_warn(dev->net, "Failed to set max rx frame length\n");
1330 return ret;
1331 }
1059 1332
1060 ret = smsc75xx_read_reg(dev, MAC_RX, &buf); 1333 ret = smsc75xx_read_reg(dev, MAC_RX, &buf);
1061 check_warn_return(ret, "Failed to read MAC_RX: %d", ret); 1334 if (ret < 0) {
1335 netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret);
1336 return ret;
1337 }
1062 1338
1063 buf |= MAC_RX_RXEN; 1339 buf |= MAC_RX_RXEN;
1064 1340
1065 ret = smsc75xx_write_reg(dev, MAC_RX, buf); 1341 ret = smsc75xx_write_reg(dev, MAC_RX, buf);
1066 check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 1342 if (ret < 0) {
1343 netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
1344 return ret;
1345 }
1067 1346
1068 netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x", buf); 1347 netif_dbg(dev, ifup, dev->net, "MAC_RX set to 0x%08x\n", buf);
1069 1348
1070 ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf); 1349 ret = smsc75xx_read_reg(dev, FCT_RX_CTL, &buf);
1071 check_warn_return(ret, "Failed to read FCT_RX_CTL: %d", ret); 1350 if (ret < 0) {
1351 netdev_warn(dev->net, "Failed to read FCT_RX_CTL: %d\n", ret);
1352 return ret;
1353 }
1072 1354
1073 buf |= FCT_RX_CTL_EN; 1355 buf |= FCT_RX_CTL_EN;
1074 1356
1075 ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf); 1357 ret = smsc75xx_write_reg(dev, FCT_RX_CTL, buf);
1076 check_warn_return(ret, "Failed to write FCT_RX_CTL: %d", ret); 1358 if (ret < 0) {
1359 netdev_warn(dev->net, "Failed to write FCT_RX_CTL: %d\n", ret);
1360 return ret;
1361 }
1077 1362
1078 netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x", buf); 1363 netif_dbg(dev, ifup, dev->net, "FCT_RX_CTL set to 0x%08x\n", buf);
1079 1364
1080 netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0"); 1365 netif_dbg(dev, ifup, dev->net, "smsc75xx_reset, return 0\n");
1081 return 0; 1366 return 0;
1082} 1367}
1083 1368
@@ -1102,14 +1387,17 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1102 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); 1387 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
1103 1388
1104 ret = usbnet_get_endpoints(dev, intf); 1389 ret = usbnet_get_endpoints(dev, intf);
1105 check_warn_return(ret, "usbnet_get_endpoints failed: %d", ret); 1390 if (ret < 0) {
1391 netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
1392 return ret;
1393 }
1106 1394
1107 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv), 1395 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc75xx_priv),
1108 GFP_KERNEL); 1396 GFP_KERNEL);
1109 1397
1110 pdata = (struct smsc75xx_priv *)(dev->data[0]); 1398 pdata = (struct smsc75xx_priv *)(dev->data[0]);
1111 if (!pdata) { 1399 if (!pdata) {
1112 netdev_warn(dev->net, "Unable to allocate smsc75xx_priv"); 1400 netdev_warn(dev->net, "Unable to allocate smsc75xx_priv\n");
1113 return -ENOMEM; 1401 return -ENOMEM;
1114 } 1402 }
1115 1403
@@ -1132,8 +1420,20 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
1132 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 1420 dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1133 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; 1421 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
1134 1422
1423 ret = smsc75xx_wait_ready(dev, 0);
1424 if (ret < 0) {
1425 netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
1426 return ret;
1427 }
1428
1429 smsc75xx_init_mac_address(dev);
1430
1135 /* Init all registers */ 1431 /* Init all registers */
1136 ret = smsc75xx_reset(dev); 1432 ret = smsc75xx_reset(dev);
1433 if (ret < 0) {
1434 netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
1435 return ret;
1436 }
1137 1437
1138 dev->net->netdev_ops = &smsc75xx_netdev_ops; 1438 dev->net->netdev_ops = &smsc75xx_netdev_ops;
1139 dev->net->ethtool_ops = &smsc75xx_ethtool_ops; 1439 dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
@@ -1147,172 +1447,647 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1147{ 1447{
1148 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 1448 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1149 if (pdata) { 1449 if (pdata) {
1150 netif_dbg(dev, ifdown, dev->net, "free pdata"); 1450 netif_dbg(dev, ifdown, dev->net, "free pdata\n");
1151 kfree(pdata); 1451 kfree(pdata);
1152 pdata = NULL; 1452 pdata = NULL;
1153 dev->data[0] = 0; 1453 dev->data[0] = 0;
1154 } 1454 }
1155} 1455}
1156 1456
1457static u16 smsc_crc(const u8 *buffer, size_t len)
1458{
1459 return bitrev16(crc16(0xFFFF, buffer, len));
1460}
1461
1462static int smsc75xx_write_wuff(struct usbnet *dev, int filter, u32 wuf_cfg,
1463 u32 wuf_mask1)
1464{
1465 int cfg_base = WUF_CFGX + filter * 4;
1466 int mask_base = WUF_MASKX + filter * 16;
1467 int ret;
1468
1469 ret = smsc75xx_write_reg(dev, cfg_base, wuf_cfg);
1470 if (ret < 0) {
1471 netdev_warn(dev->net, "Error writing WUF_CFGX\n");
1472 return ret;
1473 }
1474
1475 ret = smsc75xx_write_reg(dev, mask_base, wuf_mask1);
1476 if (ret < 0) {
1477 netdev_warn(dev->net, "Error writing WUF_MASKX\n");
1478 return ret;
1479 }
1480
1481 ret = smsc75xx_write_reg(dev, mask_base + 4, 0);
1482 if (ret < 0) {
1483 netdev_warn(dev->net, "Error writing WUF_MASKX\n");
1484 return ret;
1485 }
1486
1487 ret = smsc75xx_write_reg(dev, mask_base + 8, 0);
1488 if (ret < 0) {
1489 netdev_warn(dev->net, "Error writing WUF_MASKX\n");
1490 return ret;
1491 }
1492
1493 ret = smsc75xx_write_reg(dev, mask_base + 12, 0);
1494 if (ret < 0) {
1495 netdev_warn(dev->net, "Error writing WUF_MASKX\n");
1496 return ret;
1497 }
1498
1499 return 0;
1500}
1501
1502static int smsc75xx_enter_suspend0(struct usbnet *dev)
1503{
1504 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1505 u32 val;
1506 int ret;
1507
1508 ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
1509 if (ret < 0) {
1510 netdev_warn(dev->net, "Error reading PMT_CTL\n");
1511 return ret;
1512 }
1513
1514 val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_PHY_RST));
1515 val |= PMT_CTL_SUS_MODE_0 | PMT_CTL_WOL_EN | PMT_CTL_WUPS;
1516
1517 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1518 if (ret < 0) {
1519 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1520 return ret;
1521 }
1522
1523 pdata->suspend_flags |= SUSPEND_SUSPEND0;
1524
1525 return 0;
1526}
1527
1528static int smsc75xx_enter_suspend1(struct usbnet *dev)
1529{
1530 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1531 u32 val;
1532 int ret;
1533
1534 ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
1535 if (ret < 0) {
1536 netdev_warn(dev->net, "Error reading PMT_CTL\n");
1537 return ret;
1538 }
1539
1540 val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
1541 val |= PMT_CTL_SUS_MODE_1;
1542
1543 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1544 if (ret < 0) {
1545 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1546 return ret;
1547 }
1548
1549 /* clear wol status, enable energy detection */
1550 val &= ~PMT_CTL_WUPS;
1551 val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN);
1552
1553 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1554 if (ret < 0) {
1555 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1556 return ret;
1557 }
1558
1559 pdata->suspend_flags |= SUSPEND_SUSPEND1;
1560
1561 return 0;
1562}
1563
1564static int smsc75xx_enter_suspend2(struct usbnet *dev)
1565{
1566 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1567 u32 val;
1568 int ret;
1569
1570 ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
1571 if (ret < 0) {
1572 netdev_warn(dev->net, "Error reading PMT_CTL\n");
1573 return ret;
1574 }
1575
1576 val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
1577 val |= PMT_CTL_SUS_MODE_2;
1578
1579 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1580 if (ret < 0) {
1581 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1582 return ret;
1583 }
1584
1585 pdata->suspend_flags |= SUSPEND_SUSPEND2;
1586
1587 return 0;
1588}
1589
1590static int smsc75xx_enter_suspend3(struct usbnet *dev)
1591{
1592 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1593 u32 val;
1594 int ret;
1595
1596 ret = smsc75xx_read_reg_nopm(dev, FCT_RX_CTL, &val);
1597 if (ret < 0) {
1598 netdev_warn(dev->net, "Error reading FCT_RX_CTL\n");
1599 return ret;
1600 }
1601
1602 if (val & FCT_RX_CTL_RXUSED) {
1603 netdev_dbg(dev->net, "rx fifo not empty in autosuspend\n");
1604 return -EBUSY;
1605 }
1606
1607 ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
1608 if (ret < 0) {
1609 netdev_warn(dev->net, "Error reading PMT_CTL\n");
1610 return ret;
1611 }
1612
1613 val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST);
1614 val |= PMT_CTL_SUS_MODE_3 | PMT_CTL_RES_CLR_WKP_EN;
1615
1616 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1617 if (ret < 0) {
1618 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1619 return ret;
1620 }
1621
1622 /* clear wol status */
1623 val &= ~PMT_CTL_WUPS;
1624 val |= PMT_CTL_WUPS_WOL;
1625
1626 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1627 if (ret < 0) {
1628 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1629 return ret;
1630 }
1631
1632 pdata->suspend_flags |= SUSPEND_SUSPEND3;
1633
1634 return 0;
1635}
1636
1637static int smsc75xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
1638{
1639 struct mii_if_info *mii = &dev->mii;
1640 int ret;
1641
1642 netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
1643
1644 /* read to clear */
1645 ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
1646 if (ret < 0) {
1647 netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
1648 return ret;
1649 }
1650
1651 /* enable interrupt source */
1652 ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
1653 if (ret < 0) {
1654 netdev_warn(dev->net, "Error reading PHY_INT_MASK\n");
1655 return ret;
1656 }
1657
1658 ret |= mask;
1659
1660 smsc75xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
1661
1662 return 0;
1663}
1664
1665static int smsc75xx_link_ok_nopm(struct usbnet *dev)
1666{
1667 struct mii_if_info *mii = &dev->mii;
1668 int ret;
1669
1670 /* first, a dummy read, needed to latch some MII phys */
1671 ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
1672 if (ret < 0) {
1673 netdev_warn(dev->net, "Error reading MII_BMSR\n");
1674 return ret;
1675 }
1676
1677 ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
1678 if (ret < 0) {
1679 netdev_warn(dev->net, "Error reading MII_BMSR\n");
1680 return ret;
1681 }
1682
1683 return !!(ret & BMSR_LSTATUS);
1684}
1685
1686static int smsc75xx_autosuspend(struct usbnet *dev, u32 link_up)
1687{
1688 int ret;
1689
1690 if (!netif_running(dev->net)) {
1691 /* interface is ifconfig down so fully power down hw */
1692 netdev_dbg(dev->net, "autosuspend entering SUSPEND2\n");
1693 return smsc75xx_enter_suspend2(dev);
1694 }
1695
1696 if (!link_up) {
1697 /* link is down so enter EDPD mode */
1698 netdev_dbg(dev->net, "autosuspend entering SUSPEND1\n");
1699
1700 /* enable PHY wakeup events for if cable is attached */
1701 ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
1702 PHY_INT_MASK_ANEG_COMP);
1703 if (ret < 0) {
1704 netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
1705 return ret;
1706 }
1707
1708 netdev_info(dev->net, "entering SUSPEND1 mode\n");
1709 return smsc75xx_enter_suspend1(dev);
1710 }
1711
1712 /* enable PHY wakeup events so we remote wakeup if cable is pulled */
1713 ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
1714 PHY_INT_MASK_LINK_DOWN);
1715 if (ret < 0) {
1716 netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
1717 return ret;
1718 }
1719
1720 netdev_dbg(dev->net, "autosuspend entering SUSPEND3\n");
1721 return smsc75xx_enter_suspend3(dev);
1722}
1723
1157static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message) 1724static int smsc75xx_suspend(struct usb_interface *intf, pm_message_t message)
1158{ 1725{
1159 struct usbnet *dev = usb_get_intfdata(intf); 1726 struct usbnet *dev = usb_get_intfdata(intf);
1160 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 1727 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
1728 u32 val, link_up;
1161 int ret; 1729 int ret;
1162 u32 val;
1163 1730
1164 ret = usbnet_suspend(intf, message); 1731 ret = usbnet_suspend(intf, message);
1165 check_warn_return(ret, "usbnet_suspend error"); 1732 if (ret < 0) {
1733 netdev_warn(dev->net, "usbnet_suspend error\n");
1734 return ret;
1735 }
1166 1736
1167 /* if no wol options set, enter lowest power SUSPEND2 mode */ 1737 if (pdata->suspend_flags) {
1168 if (!(pdata->wolopts & SUPPORTED_WAKE)) { 1738 netdev_warn(dev->net, "error during last resume\n");
1169 netdev_info(dev->net, "entering SUSPEND2 mode"); 1739 pdata->suspend_flags = 0;
1740 }
1741
1742 /* determine if link is up using only _nopm functions */
1743 link_up = smsc75xx_link_ok_nopm(dev);
1744
1745 if (message.event == PM_EVENT_AUTO_SUSPEND) {
1746 ret = smsc75xx_autosuspend(dev, link_up);
1747 goto done;
1748 }
1749
1750 /* if we get this far we're not autosuspending */
1751 /* if no wol options set, or if link is down and we're not waking on
1752 * PHY activity, enter lowest power SUSPEND2 mode
1753 */
1754 if (!(pdata->wolopts & SUPPORTED_WAKE) ||
1755 !(link_up || (pdata->wolopts & WAKE_PHY))) {
1756 netdev_info(dev->net, "entering SUSPEND2 mode\n");
1170 1757
1171 /* disable energy detect (link up) & wake up events */ 1758 /* disable energy detect (link up) & wake up events */
1172 ret = smsc75xx_read_reg(dev, WUCSR, &val); 1759 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
1173 check_warn_return(ret, "Error reading WUCSR"); 1760 if (ret < 0) {
1761 netdev_warn(dev->net, "Error reading WUCSR\n");
1762 goto done;
1763 }
1174 1764
1175 val &= ~(WUCSR_MPEN | WUCSR_WUEN); 1765 val &= ~(WUCSR_MPEN | WUCSR_WUEN);
1176 1766
1177 ret = smsc75xx_write_reg(dev, WUCSR, val); 1767 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1178 check_warn_return(ret, "Error writing WUCSR"); 1768 if (ret < 0) {
1769 netdev_warn(dev->net, "Error writing WUCSR\n");
1770 goto done;
1771 }
1179 1772
1180 ret = smsc75xx_read_reg(dev, PMT_CTL, &val); 1773 ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
1181 check_warn_return(ret, "Error reading PMT_CTL"); 1774 if (ret < 0) {
1775 netdev_warn(dev->net, "Error reading PMT_CTL\n");
1776 goto done;
1777 }
1182 1778
1183 val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN); 1779 val &= ~(PMT_CTL_ED_EN | PMT_CTL_WOL_EN);
1184 1780
1185 ret = smsc75xx_write_reg(dev, PMT_CTL, val); 1781 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1186 check_warn_return(ret, "Error writing PMT_CTL"); 1782 if (ret < 0) {
1783 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1784 goto done;
1785 }
1187 1786
1188 /* enter suspend2 mode */ 1787 ret = smsc75xx_enter_suspend2(dev);
1189 ret = smsc75xx_read_reg(dev, PMT_CTL, &val); 1788 goto done;
1190 check_warn_return(ret, "Error reading PMT_CTL"); 1789 }
1191 1790
1192 val &= ~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST); 1791 if (pdata->wolopts & WAKE_PHY) {
1193 val |= PMT_CTL_SUS_MODE_2; 1792 ret = smsc75xx_enable_phy_wakeup_interrupts(dev,
1793 (PHY_INT_MASK_ANEG_COMP | PHY_INT_MASK_LINK_DOWN));
1794 if (ret < 0) {
1795 netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
1796 goto done;
1797 }
1194 1798
1195 ret = smsc75xx_write_reg(dev, PMT_CTL, val); 1799 /* if link is down then configure EDPD and enter SUSPEND1,
1196 check_warn_return(ret, "Error writing PMT_CTL"); 1800 * otherwise enter SUSPEND0 below
1801 */
1802 if (!link_up) {
1803 struct mii_if_info *mii = &dev->mii;
1804 netdev_info(dev->net, "entering SUSPEND1 mode\n");
1805
1806 /* enable energy detect power-down mode */
1807 ret = smsc75xx_mdio_read_nopm(dev->net, mii->phy_id,
1808 PHY_MODE_CTRL_STS);
1809 if (ret < 0) {
1810 netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n");
1811 goto done;
1812 }
1197 1813
1198 return 0; 1814 ret |= MODE_CTRL_STS_EDPWRDOWN;
1815
1816 smsc75xx_mdio_write_nopm(dev->net, mii->phy_id,
1817 PHY_MODE_CTRL_STS, ret);
1818
1819 /* enter SUSPEND1 mode */
1820 ret = smsc75xx_enter_suspend1(dev);
1821 goto done;
1822 }
1199 } 1823 }
1200 1824
1201 if (pdata->wolopts & WAKE_MAGIC) { 1825 if (pdata->wolopts & (WAKE_MCAST | WAKE_ARP)) {
1202 /* clear any pending magic packet status */ 1826 int i, filter = 0;
1203 ret = smsc75xx_read_reg(dev, WUCSR, &val);
1204 check_warn_return(ret, "Error reading WUCSR");
1205 1827
1206 val |= WUCSR_MPR; 1828 /* disable all filters */
1829 for (i = 0; i < WUF_NUM; i++) {
1830 ret = smsc75xx_write_reg_nopm(dev, WUF_CFGX + i * 4, 0);
1831 if (ret < 0) {
1832 netdev_warn(dev->net, "Error writing WUF_CFGX\n");
1833 goto done;
1834 }
1835 }
1207 1836
1208 ret = smsc75xx_write_reg(dev, WUCSR, val); 1837 if (pdata->wolopts & WAKE_MCAST) {
1209 check_warn_return(ret, "Error writing WUCSR"); 1838 const u8 mcast[] = {0x01, 0x00, 0x5E};
1210 } 1839 netdev_info(dev->net, "enabling multicast detection\n");
1211 1840
1212 /* enable/disable magic packup wake */ 1841 val = WUF_CFGX_EN | WUF_CFGX_ATYPE_MULTICAST
1213 ret = smsc75xx_read_reg(dev, WUCSR, &val); 1842 | smsc_crc(mcast, 3);
1214 check_warn_return(ret, "Error reading WUCSR"); 1843 ret = smsc75xx_write_wuff(dev, filter++, val, 0x0007);
1844 if (ret < 0) {
1845 netdev_warn(dev->net, "Error writing wakeup filter\n");
1846 goto done;
1847 }
1848 }
1215 1849
1216 if (pdata->wolopts & WAKE_MAGIC) { 1850 if (pdata->wolopts & WAKE_ARP) {
1217 netdev_info(dev->net, "enabling magic packet wakeup"); 1851 const u8 arp[] = {0x08, 0x06};
1218 val |= WUCSR_MPEN; 1852 netdev_info(dev->net, "enabling ARP detection\n");
1853
1854 val = WUF_CFGX_EN | WUF_CFGX_ATYPE_ALL | (0x0C << 16)
1855 | smsc_crc(arp, 2);
1856 ret = smsc75xx_write_wuff(dev, filter++, val, 0x0003);
1857 if (ret < 0) {
1858 netdev_warn(dev->net, "Error writing wakeup filter\n");
1859 goto done;
1860 }
1861 }
1862
1863 /* clear any pending pattern match packet status */
1864 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
1865 if (ret < 0) {
1866 netdev_warn(dev->net, "Error reading WUCSR\n");
1867 goto done;
1868 }
1869
1870 val |= WUCSR_WUFR;
1871
1872 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1873 if (ret < 0) {
1874 netdev_warn(dev->net, "Error writing WUCSR\n");
1875 goto done;
1876 }
1877
1878 netdev_info(dev->net, "enabling packet match detection\n");
1879 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
1880 if (ret < 0) {
1881 netdev_warn(dev->net, "Error reading WUCSR\n");
1882 goto done;
1883 }
1884
1885 val |= WUCSR_WUEN;
1886
1887 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1888 if (ret < 0) {
1889 netdev_warn(dev->net, "Error writing WUCSR\n");
1890 goto done;
1891 }
1219 } else { 1892 } else {
1220 netdev_info(dev->net, "disabling magic packet wakeup"); 1893 netdev_info(dev->net, "disabling packet match detection\n");
1221 val &= ~WUCSR_MPEN; 1894 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
1895 if (ret < 0) {
1896 netdev_warn(dev->net, "Error reading WUCSR\n");
1897 goto done;
1898 }
1899
1900 val &= ~WUCSR_WUEN;
1901
1902 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1903 if (ret < 0) {
1904 netdev_warn(dev->net, "Error writing WUCSR\n");
1905 goto done;
1906 }
1222 } 1907 }
1223 1908
1224 ret = smsc75xx_write_reg(dev, WUCSR, val); 1909 /* disable magic, bcast & unicast wakeup sources */
1225 check_warn_return(ret, "Error writing WUCSR"); 1910 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
1911 if (ret < 0) {
1912 netdev_warn(dev->net, "Error reading WUCSR\n");
1913 goto done;
1914 }
1226 1915
1227 /* enable wol wakeup source */ 1916 val &= ~(WUCSR_MPEN | WUCSR_BCST_EN | WUCSR_PFDA_EN);
1228 ret = smsc75xx_read_reg(dev, PMT_CTL, &val);
1229 check_warn_return(ret, "Error reading PMT_CTL");
1230 1917
1231 val |= PMT_CTL_WOL_EN; 1918 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1919 if (ret < 0) {
1920 netdev_warn(dev->net, "Error writing WUCSR\n");
1921 goto done;
1922 }
1232 1923
1233 ret = smsc75xx_write_reg(dev, PMT_CTL, val); 1924 if (pdata->wolopts & WAKE_PHY) {
1234 check_warn_return(ret, "Error writing PMT_CTL"); 1925 netdev_info(dev->net, "enabling PHY wakeup\n");
1235 1926
1236 /* enable receiver */ 1927 ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
1237 ret = smsc75xx_read_reg(dev, MAC_RX, &val); 1928 if (ret < 0) {
1238 check_warn_return(ret, "Failed to read MAC_RX: %d", ret); 1929 netdev_warn(dev->net, "Error reading PMT_CTL\n");
1930 goto done;
1931 }
1239 1932
1240 val |= MAC_RX_RXEN; 1933 /* clear wol status, enable energy detection */
1934 val &= ~PMT_CTL_WUPS;
1935 val |= (PMT_CTL_WUPS_ED | PMT_CTL_ED_EN);
1241 1936
1242 ret = smsc75xx_write_reg(dev, MAC_RX, val); 1937 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1243 check_warn_return(ret, "Failed to write MAC_RX: %d", ret); 1938 if (ret < 0) {
1939 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1940 goto done;
1941 }
1942 }
1244 1943
1245 /* some wol options are enabled, so enter SUSPEND0 */ 1944 if (pdata->wolopts & WAKE_MAGIC) {
1246 netdev_info(dev->net, "entering SUSPEND0 mode"); 1945 netdev_info(dev->net, "enabling magic packet wakeup\n");
1946 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
1947 if (ret < 0) {
1948 netdev_warn(dev->net, "Error reading WUCSR\n");
1949 goto done;
1950 }
1247 1951
1248 ret = smsc75xx_read_reg(dev, PMT_CTL, &val); 1952 /* clear any pending magic packet status */
1249 check_warn_return(ret, "Error reading PMT_CTL"); 1953 val |= WUCSR_MPR | WUCSR_MPEN;
1250 1954
1251 val &= (~(PMT_CTL_SUS_MODE | PMT_CTL_WUPS | PMT_CTL_PHY_RST)); 1955 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1252 val |= PMT_CTL_SUS_MODE_0; 1956 if (ret < 0) {
1957 netdev_warn(dev->net, "Error writing WUCSR\n");
1958 goto done;
1959 }
1960 }
1253 1961
1254 ret = smsc75xx_write_reg(dev, PMT_CTL, val); 1962 if (pdata->wolopts & WAKE_BCAST) {
1255 check_warn_return(ret, "Error writing PMT_CTL"); 1963 netdev_info(dev->net, "enabling broadcast detection\n");
1964 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
1965 if (ret < 0) {
1966 netdev_warn(dev->net, "Error reading WUCSR\n");
1967 goto done;
1968 }
1256 1969
1257 /* clear wol status */ 1970 val |= WUCSR_BCAST_FR | WUCSR_BCST_EN;
1258 val &= ~PMT_CTL_WUPS;
1259 val |= PMT_CTL_WUPS_WOL;
1260 ret = smsc75xx_write_reg(dev, PMT_CTL, val);
1261 check_warn_return(ret, "Error writing PMT_CTL");
1262 1971
1263 /* read back PMT_CTL */ 1972 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1264 ret = smsc75xx_read_reg(dev, PMT_CTL, &val); 1973 if (ret < 0) {
1265 check_warn_return(ret, "Error reading PMT_CTL"); 1974 netdev_warn(dev->net, "Error writing WUCSR\n");
1975 goto done;
1976 }
1977 }
1266 1978
1267 smsc75xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP); 1979 if (pdata->wolopts & WAKE_UCAST) {
1980 netdev_info(dev->net, "enabling unicast detection\n");
1981 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
1982 if (ret < 0) {
1983 netdev_warn(dev->net, "Error reading WUCSR\n");
1984 goto done;
1985 }
1268 1986
1269 return 0; 1987 val |= WUCSR_WUFR | WUCSR_PFDA_EN;
1988
1989 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1990 if (ret < 0) {
1991 netdev_warn(dev->net, "Error writing WUCSR\n");
1992 goto done;
1993 }
1994 }
1995
1996 /* enable receiver to enable frame reception */
1997 ret = smsc75xx_read_reg_nopm(dev, MAC_RX, &val);
1998 if (ret < 0) {
1999 netdev_warn(dev->net, "Failed to read MAC_RX: %d\n", ret);
2000 goto done;
2001 }
2002
2003 val |= MAC_RX_RXEN;
2004
2005 ret = smsc75xx_write_reg_nopm(dev, MAC_RX, val);
2006 if (ret < 0) {
2007 netdev_warn(dev->net, "Failed to write MAC_RX: %d\n", ret);
2008 goto done;
2009 }
2010
2011 /* some wol options are enabled, so enter SUSPEND0 */
2012 netdev_info(dev->net, "entering SUSPEND0 mode\n");
2013 ret = smsc75xx_enter_suspend0(dev);
2014
2015done:
2016 if (ret)
2017 usbnet_resume(intf);
2018 return ret;
1270} 2019}
1271 2020
1272static int smsc75xx_resume(struct usb_interface *intf) 2021static int smsc75xx_resume(struct usb_interface *intf)
1273{ 2022{
1274 struct usbnet *dev = usb_get_intfdata(intf); 2023 struct usbnet *dev = usb_get_intfdata(intf);
1275 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 2024 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
2025 u8 suspend_flags = pdata->suspend_flags;
1276 int ret; 2026 int ret;
1277 u32 val; 2027 u32 val;
1278 2028
1279 if (pdata->wolopts & WAKE_MAGIC) { 2029 netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
1280 netdev_info(dev->net, "resuming from SUSPEND0");
1281 2030
1282 smsc75xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP); 2031 /* do this first to ensure it's cleared even in error case */
2032 pdata->suspend_flags = 0;
1283 2033
1284 /* Disable magic packup wake */ 2034 if (suspend_flags & SUSPEND_ALLMODES) {
1285 ret = smsc75xx_read_reg(dev, WUCSR, &val); 2035 /* Disable wakeup sources */
1286 check_warn_return(ret, "Error reading WUCSR"); 2036 ret = smsc75xx_read_reg_nopm(dev, WUCSR, &val);
2037 if (ret < 0) {
2038 netdev_warn(dev->net, "Error reading WUCSR\n");
2039 return ret;
2040 }
1287 2041
1288 val &= ~WUCSR_MPEN; 2042 val &= ~(WUCSR_WUEN | WUCSR_MPEN | WUCSR_PFDA_EN
2043 | WUCSR_BCST_EN);
1289 2044
1290 ret = smsc75xx_write_reg(dev, WUCSR, val); 2045 ret = smsc75xx_write_reg_nopm(dev, WUCSR, val);
1291 check_warn_return(ret, "Error writing WUCSR"); 2046 if (ret < 0) {
2047 netdev_warn(dev->net, "Error writing WUCSR\n");
2048 return ret;
2049 }
1292 2050
1293 /* clear wake-up status */ 2051 /* clear wake-up status */
1294 ret = smsc75xx_read_reg(dev, PMT_CTL, &val); 2052 ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
1295 check_warn_return(ret, "Error reading PMT_CTL"); 2053 if (ret < 0) {
2054 netdev_warn(dev->net, "Error reading PMT_CTL\n");
2055 return ret;
2056 }
1296 2057
1297 val &= ~PMT_CTL_WOL_EN; 2058 val &= ~PMT_CTL_WOL_EN;
1298 val |= PMT_CTL_WUPS; 2059 val |= PMT_CTL_WUPS;
1299 2060
1300 ret = smsc75xx_write_reg(dev, PMT_CTL, val); 2061 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1301 check_warn_return(ret, "Error writing PMT_CTL"); 2062 if (ret < 0) {
1302 } else { 2063 netdev_warn(dev->net, "Error writing PMT_CTL\n");
1303 netdev_info(dev->net, "resuming from SUSPEND2"); 2064 return ret;
2065 }
2066 }
1304 2067
1305 ret = smsc75xx_read_reg(dev, PMT_CTL, &val); 2068 if (suspend_flags & SUSPEND_SUSPEND2) {
1306 check_warn_return(ret, "Error reading PMT_CTL"); 2069 netdev_info(dev->net, "resuming from SUSPEND2\n");
2070
2071 ret = smsc75xx_read_reg_nopm(dev, PMT_CTL, &val);
2072 if (ret < 0) {
2073 netdev_warn(dev->net, "Error reading PMT_CTL\n");
2074 return ret;
2075 }
1307 2076
1308 val |= PMT_CTL_PHY_PWRUP; 2077 val |= PMT_CTL_PHY_PWRUP;
1309 2078
1310 ret = smsc75xx_write_reg(dev, PMT_CTL, val); 2079 ret = smsc75xx_write_reg_nopm(dev, PMT_CTL, val);
1311 check_warn_return(ret, "Error writing PMT_CTL"); 2080 if (ret < 0) {
2081 netdev_warn(dev->net, "Error writing PMT_CTL\n");
2082 return ret;
2083 }
1312 } 2084 }
1313 2085
1314 ret = smsc75xx_wait_ready(dev); 2086 ret = smsc75xx_wait_ready(dev, 1);
1315 check_warn_return(ret, "device not ready in smsc75xx_resume"); 2087 if (ret < 0) {
2088 netdev_warn(dev->net, "device not ready in smsc75xx_resume\n");
2089 return ret;
2090 }
1316 2091
1317 return usbnet_resume(intf); 2092 return usbnet_resume(intf);
1318} 2093}
@@ -1352,7 +2127,7 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1352 2127
1353 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { 2128 if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
1354 netif_dbg(dev, rx_err, dev->net, 2129 netif_dbg(dev, rx_err, dev->net,
1355 "Error rx_cmd_a=0x%08x", rx_cmd_a); 2130 "Error rx_cmd_a=0x%08x\n", rx_cmd_a);
1356 dev->net->stats.rx_errors++; 2131 dev->net->stats.rx_errors++;
1357 dev->net->stats.rx_dropped++; 2132 dev->net->stats.rx_dropped++;
1358 2133
@@ -1364,7 +2139,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1364 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ 2139 /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
1365 if (unlikely(size > (ETH_FRAME_LEN + 12))) { 2140 if (unlikely(size > (ETH_FRAME_LEN + 12))) {
1366 netif_dbg(dev, rx_err, dev->net, 2141 netif_dbg(dev, rx_err, dev->net,
1367 "size err rx_cmd_a=0x%08x", rx_cmd_a); 2142 "size err rx_cmd_a=0x%08x\n",
2143 rx_cmd_a);
1368 return 0; 2144 return 0;
1369 } 2145 }
1370 2146
@@ -1381,7 +2157,7 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1381 2157
1382 ax_skb = skb_clone(skb, GFP_ATOMIC); 2158 ax_skb = skb_clone(skb, GFP_ATOMIC);
1383 if (unlikely(!ax_skb)) { 2159 if (unlikely(!ax_skb)) {
1384 netdev_warn(dev->net, "Error allocating skb"); 2160 netdev_warn(dev->net, "Error allocating skb\n");
1385 return 0; 2161 return 0;
1386 } 2162 }
1387 2163
@@ -1406,7 +2182,7 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1406 } 2182 }
1407 2183
1408 if (unlikely(skb->len < 0)) { 2184 if (unlikely(skb->len < 0)) {
1409 netdev_warn(dev->net, "invalid rx length<0 %d", skb->len); 2185 netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len);
1410 return 0; 2186 return 0;
1411 } 2187 }
1412 2188
@@ -1454,6 +2230,12 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
1454 return skb; 2230 return skb;
1455} 2231}
1456 2232
2233static int smsc75xx_manage_power(struct usbnet *dev, int on)
2234{
2235 dev->intf->needs_remote_wakeup = on;
2236 return 0;
2237}
2238
1457static const struct driver_info smsc75xx_info = { 2239static const struct driver_info smsc75xx_info = {
1458 .description = "smsc75xx USB 2.0 Gigabit Ethernet", 2240 .description = "smsc75xx USB 2.0 Gigabit Ethernet",
1459 .bind = smsc75xx_bind, 2241 .bind = smsc75xx_bind,
@@ -1463,6 +2245,7 @@ static const struct driver_info smsc75xx_info = {
1463 .rx_fixup = smsc75xx_rx_fixup, 2245 .rx_fixup = smsc75xx_rx_fixup,
1464 .tx_fixup = smsc75xx_tx_fixup, 2246 .tx_fixup = smsc75xx_tx_fixup,
1465 .status = smsc75xx_status, 2247 .status = smsc75xx_status,
2248 .manage_power = smsc75xx_manage_power,
1466 .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR, 2249 .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR,
1467}; 2250};
1468 2251
@@ -1490,6 +2273,7 @@ static struct usb_driver smsc75xx_driver = {
1490 .reset_resume = smsc75xx_resume, 2273 .reset_resume = smsc75xx_resume,
1491 .disconnect = usbnet_disconnect, 2274 .disconnect = usbnet_disconnect,
1492 .disable_hub_initiated_lpm = 1, 2275 .disable_hub_initiated_lpm = 1,
2276 .supports_autosuspend = 1,
1493}; 2277};
1494 2278
1495module_usb_driver(smsc75xx_driver); 2279module_usb_driver(smsc75xx_driver);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 362cb8cfeb92..9b736701f854 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -26,6 +26,8 @@
26#include <linux/ethtool.h> 26#include <linux/ethtool.h>
27#include <linux/mii.h> 27#include <linux/mii.h>
28#include <linux/usb.h> 28#include <linux/usb.h>
29#include <linux/bitrev.h>
30#include <linux/crc16.h>
29#include <linux/crc32.h> 31#include <linux/crc32.h>
30#include <linux/usb/usbnet.h> 32#include <linux/usb/usbnet.h>
31#include <linux/slab.h> 33#include <linux/slab.h>
@@ -46,16 +48,12 @@
46#define SMSC95XX_INTERNAL_PHY_ID (1) 48#define SMSC95XX_INTERNAL_PHY_ID (1)
47#define SMSC95XX_TX_OVERHEAD (8) 49#define SMSC95XX_TX_OVERHEAD (8)
48#define SMSC95XX_TX_OVERHEAD_CSUM (12) 50#define SMSC95XX_TX_OVERHEAD_CSUM (12)
49#define SUPPORTED_WAKE (WAKE_MAGIC) 51#define SUPPORTED_WAKE (WAKE_PHY | WAKE_UCAST | WAKE_BCAST | \
52 WAKE_MCAST | WAKE_ARP | WAKE_MAGIC)
50 53
51#define check_warn(ret, fmt, args...) \ 54#define FEATURE_8_WAKEUP_FILTERS (0x01)
52 ({ if (ret < 0) netdev_warn(dev->net, fmt, ##args); }) 55#define FEATURE_PHY_NLP_CROSSOVER (0x02)
53 56#define FEATURE_AUTOSUSPEND (0x04)
54#define check_warn_return(ret, fmt, args...) \
55 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); return ret; } })
56
57#define check_warn_goto_done(ret, fmt, args...) \
58 ({ if (ret < 0) { netdev_warn(dev->net, fmt, ##args); goto done; } })
59 57
60struct smsc95xx_priv { 58struct smsc95xx_priv {
61 u32 mac_cr; 59 u32 mac_cr;
@@ -63,105 +61,107 @@ struct smsc95xx_priv {
63 u32 hash_lo; 61 u32 hash_lo;
64 u32 wolopts; 62 u32 wolopts;
65 spinlock_t mac_cr_lock; 63 spinlock_t mac_cr_lock;
66}; 64 u8 features;
67
68struct usb_context {
69 struct usb_ctrlrequest req;
70 struct usbnet *dev;
71}; 65};
72 66
73static bool turbo_mode = true; 67static bool turbo_mode = true;
74module_param(turbo_mode, bool, 0644); 68module_param(turbo_mode, bool, 0644);
75MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); 69MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
76 70
77static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index, 71static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
78 u32 *data) 72 u32 *data, int in_pm)
79{ 73{
80 u32 *buf = kmalloc(4, GFP_KERNEL); 74 u32 buf;
81 int ret; 75 int ret;
76 int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16);
82 77
83 BUG_ON(!dev); 78 BUG_ON(!dev);
84 79
85 if (!buf) 80 if (!in_pm)
86 return -ENOMEM; 81 fn = usbnet_read_cmd;
87 82 else
88 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 83 fn = usbnet_read_cmd_nopm;
89 USB_VENDOR_REQUEST_READ_REGISTER,
90 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
91 00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
92 84
85 ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
86 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
87 0, index, &buf, 4);
93 if (unlikely(ret < 0)) 88 if (unlikely(ret < 0))
94 netdev_warn(dev->net, "Failed to read register index 0x%08x\n", index); 89 netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
90 index, ret);
95 91
96 le32_to_cpus(buf); 92 le32_to_cpus(&buf);
97 *data = *buf; 93 *data = buf;
98 kfree(buf);
99 94
100 return ret; 95 return ret;
101} 96}
102 97
103static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index, 98static int __must_check __smsc95xx_write_reg(struct usbnet *dev, u32 index,
104 u32 data) 99 u32 data, int in_pm)
105{ 100{
106 u32 *buf = kmalloc(4, GFP_KERNEL); 101 u32 buf;
107 int ret; 102 int ret;
103 int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
108 104
109 BUG_ON(!dev); 105 BUG_ON(!dev);
110 106
111 if (!buf) 107 if (!in_pm)
112 return -ENOMEM; 108 fn = usbnet_write_cmd;
113 109 else
114 *buf = data; 110 fn = usbnet_write_cmd_nopm;
115 cpu_to_le32s(buf);
116 111
117 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 112 buf = data;
118 USB_VENDOR_REQUEST_WRITE_REGISTER, 113 cpu_to_le32s(&buf);
119 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
120 00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
121 114
115 ret = fn(dev, USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT
116 | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
117 0, index, &buf, 4);
122 if (unlikely(ret < 0)) 118 if (unlikely(ret < 0))
123 netdev_warn(dev->net, "Failed to write register index 0x%08x\n", index); 119 netdev_warn(dev->net, "Failed to write reg index 0x%08x: %d\n",
124 120 index, ret);
125 kfree(buf);
126 121
127 return ret; 122 return ret;
128} 123}
129 124
130static int smsc95xx_set_feature(struct usbnet *dev, u32 feature) 125static int __must_check smsc95xx_read_reg_nopm(struct usbnet *dev, u32 index,
126 u32 *data)
131{ 127{
132 if (WARN_ON_ONCE(!dev)) 128 return __smsc95xx_read_reg(dev, index, data, 1);
133 return -EINVAL;
134
135 cpu_to_le32s(&feature);
136
137 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
138 USB_REQ_SET_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0,
139 USB_CTRL_SET_TIMEOUT);
140} 129}
141 130
142static int smsc95xx_clear_feature(struct usbnet *dev, u32 feature) 131static int __must_check smsc95xx_write_reg_nopm(struct usbnet *dev, u32 index,
132 u32 data)
143{ 133{
144 if (WARN_ON_ONCE(!dev)) 134 return __smsc95xx_write_reg(dev, index, data, 1);
145 return -EINVAL; 135}
146 136
147 cpu_to_le32s(&feature); 137static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
138 u32 *data)
139{
140 return __smsc95xx_read_reg(dev, index, data, 0);
141}
148 142
149 return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 143static int __must_check smsc95xx_write_reg(struct usbnet *dev, u32 index,
150 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, feature, 0, NULL, 0, 144 u32 data)
151 USB_CTRL_SET_TIMEOUT); 145{
146 return __smsc95xx_write_reg(dev, index, data, 0);
152} 147}
153 148
154/* Loop until the read is completed with timeout 149/* Loop until the read is completed with timeout
155 * called with phy_mutex held */ 150 * called with phy_mutex held */
156static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev) 151static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
152 int in_pm)
157{ 153{
158 unsigned long start_time = jiffies; 154 unsigned long start_time = jiffies;
159 u32 val; 155 u32 val;
160 int ret; 156 int ret;
161 157
162 do { 158 do {
163 ret = smsc95xx_read_reg(dev, MII_ADDR, &val); 159 ret = __smsc95xx_read_reg(dev, MII_ADDR, &val, in_pm);
164 check_warn_return(ret, "Error reading MII_ACCESS"); 160 if (ret < 0) {
161 netdev_warn(dev->net, "Error reading MII_ACCESS\n");
162 return ret;
163 }
164
165 if (!(val & MII_BUSY_)) 165 if (!(val & MII_BUSY_))
166 return 0; 166 return 0;
167 } while (!time_after(jiffies, start_time + HZ)); 167 } while (!time_after(jiffies, start_time + HZ));
@@ -169,7 +169,8 @@ static int __must_check smsc95xx_phy_wait_not_busy(struct usbnet *dev)
169 return -EIO; 169 return -EIO;
170} 170}
171 171
172static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx) 172static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
173 int in_pm)
173{ 174{
174 struct usbnet *dev = netdev_priv(netdev); 175 struct usbnet *dev = netdev_priv(netdev);
175 u32 val, addr; 176 u32 val, addr;
@@ -178,21 +179,33 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
178 mutex_lock(&dev->phy_mutex); 179 mutex_lock(&dev->phy_mutex);
179 180
180 /* confirm MII not busy */ 181 /* confirm MII not busy */
181 ret = smsc95xx_phy_wait_not_busy(dev); 182 ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
182 check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_read"); 183 if (ret < 0) {
184 netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
185 goto done;
186 }
183 187
184 /* set the address, index & direction (read from PHY) */ 188 /* set the address, index & direction (read from PHY) */
185 phy_id &= dev->mii.phy_id_mask; 189 phy_id &= dev->mii.phy_id_mask;
186 idx &= dev->mii.reg_num_mask; 190 idx &= dev->mii.reg_num_mask;
187 addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_; 191 addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
188 ret = smsc95xx_write_reg(dev, MII_ADDR, addr); 192 ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
189 check_warn_goto_done(ret, "Error writing MII_ADDR"); 193 if (ret < 0) {
194 netdev_warn(dev->net, "Error writing MII_ADDR\n");
195 goto done;
196 }
190 197
191 ret = smsc95xx_phy_wait_not_busy(dev); 198 ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
192 check_warn_goto_done(ret, "Timed out reading MII reg %02X", idx); 199 if (ret < 0) {
200 netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
201 goto done;
202 }
193 203
194 ret = smsc95xx_read_reg(dev, MII_DATA, &val); 204 ret = __smsc95xx_read_reg(dev, MII_DATA, &val, in_pm);
195 check_warn_goto_done(ret, "Error reading MII_DATA"); 205 if (ret < 0) {
206 netdev_warn(dev->net, "Error reading MII_DATA\n");
207 goto done;
208 }
196 209
197 ret = (u16)(val & 0xFFFF); 210 ret = (u16)(val & 0xFFFF);
198 211
@@ -201,8 +214,8 @@ done:
201 return ret; 214 return ret;
202} 215}
203 216
204static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx, 217static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
205 int regval) 218 int idx, int regval, int in_pm)
206{ 219{
207 struct usbnet *dev = netdev_priv(netdev); 220 struct usbnet *dev = netdev_priv(netdev);
208 u32 val, addr; 221 u32 val, addr;
@@ -211,27 +224,62 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
211 mutex_lock(&dev->phy_mutex); 224 mutex_lock(&dev->phy_mutex);
212 225
213 /* confirm MII not busy */ 226 /* confirm MII not busy */
214 ret = smsc95xx_phy_wait_not_busy(dev); 227 ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
215 check_warn_goto_done(ret, "MII is busy in smsc95xx_mdio_write"); 228 if (ret < 0) {
229 netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
230 goto done;
231 }
216 232
217 val = regval; 233 val = regval;
218 ret = smsc95xx_write_reg(dev, MII_DATA, val); 234 ret = __smsc95xx_write_reg(dev, MII_DATA, val, in_pm);
219 check_warn_goto_done(ret, "Error writing MII_DATA"); 235 if (ret < 0) {
236 netdev_warn(dev->net, "Error writing MII_DATA\n");
237 goto done;
238 }
220 239
221 /* set the address, index & direction (write to PHY) */ 240 /* set the address, index & direction (write to PHY) */
222 phy_id &= dev->mii.phy_id_mask; 241 phy_id &= dev->mii.phy_id_mask;
223 idx &= dev->mii.reg_num_mask; 242 idx &= dev->mii.reg_num_mask;
224 addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_; 243 addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
225 ret = smsc95xx_write_reg(dev, MII_ADDR, addr); 244 ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
226 check_warn_goto_done(ret, "Error writing MII_ADDR"); 245 if (ret < 0) {
246 netdev_warn(dev->net, "Error writing MII_ADDR\n");
247 goto done;
248 }
227 249
228 ret = smsc95xx_phy_wait_not_busy(dev); 250 ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
229 check_warn_goto_done(ret, "Timed out writing MII reg %02X", idx); 251 if (ret < 0) {
252 netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
253 goto done;
254 }
230 255
231done: 256done:
232 mutex_unlock(&dev->phy_mutex); 257 mutex_unlock(&dev->phy_mutex);
233} 258}
234 259
260static int smsc95xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
261 int idx)
262{
263 return __smsc95xx_mdio_read(netdev, phy_id, idx, 1);
264}
265
266static void smsc95xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
267 int idx, int regval)
268{
269 __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 1);
270}
271
272static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
273{
274 return __smsc95xx_mdio_read(netdev, phy_id, idx, 0);
275}
276
277static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
278 int regval)
279{
280 __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 0);
281}
282
235static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev) 283static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
236{ 284{
237 unsigned long start_time = jiffies; 285 unsigned long start_time = jiffies;
@@ -240,7 +288,11 @@ static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
240 288
241 do { 289 do {
242 ret = smsc95xx_read_reg(dev, E2P_CMD, &val); 290 ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
243 check_warn_return(ret, "Error reading E2P_CMD"); 291 if (ret < 0) {
292 netdev_warn(dev->net, "Error reading E2P_CMD\n");
293 return ret;
294 }
295
244 if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_)) 296 if (!(val & E2P_CMD_BUSY_) || (val & E2P_CMD_TIMEOUT_))
245 break; 297 break;
246 udelay(40); 298 udelay(40);
@@ -262,7 +314,10 @@ static int __must_check smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
262 314
263 do { 315 do {
264 ret = smsc95xx_read_reg(dev, E2P_CMD, &val); 316 ret = smsc95xx_read_reg(dev, E2P_CMD, &val);
265 check_warn_return(ret, "Error reading E2P_CMD"); 317 if (ret < 0) {
318 netdev_warn(dev->net, "Error reading E2P_CMD\n");
319 return ret;
320 }
266 321
267 if (!(val & E2P_CMD_BUSY_)) 322 if (!(val & E2P_CMD_BUSY_))
268 return 0; 323 return 0;
@@ -290,14 +345,20 @@ static int smsc95xx_read_eeprom(struct usbnet *dev, u32 offset, u32 length,
290 for (i = 0; i < length; i++) { 345 for (i = 0; i < length; i++) {
291 val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_); 346 val = E2P_CMD_BUSY_ | E2P_CMD_READ_ | (offset & E2P_CMD_ADDR_);
292 ret = smsc95xx_write_reg(dev, E2P_CMD, val); 347 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
293 check_warn_return(ret, "Error writing E2P_CMD"); 348 if (ret < 0) {
349 netdev_warn(dev->net, "Error writing E2P_CMD\n");
350 return ret;
351 }
294 352
295 ret = smsc95xx_wait_eeprom(dev); 353 ret = smsc95xx_wait_eeprom(dev);
296 if (ret < 0) 354 if (ret < 0)
297 return ret; 355 return ret;
298 356
299 ret = smsc95xx_read_reg(dev, E2P_DATA, &val); 357 ret = smsc95xx_read_reg(dev, E2P_DATA, &val);
300 check_warn_return(ret, "Error reading E2P_DATA"); 358 if (ret < 0) {
359 netdev_warn(dev->net, "Error reading E2P_DATA\n");
360 return ret;
361 }
301 362
302 data[i] = val & 0xFF; 363 data[i] = val & 0xFF;
303 offset++; 364 offset++;
@@ -322,7 +383,10 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
322 /* Issue write/erase enable command */ 383 /* Issue write/erase enable command */
323 val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_; 384 val = E2P_CMD_BUSY_ | E2P_CMD_EWEN_;
324 ret = smsc95xx_write_reg(dev, E2P_CMD, val); 385 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
325 check_warn_return(ret, "Error writing E2P_DATA"); 386 if (ret < 0) {
387 netdev_warn(dev->net, "Error writing E2P_DATA\n");
388 return ret;
389 }
326 390
327 ret = smsc95xx_wait_eeprom(dev); 391 ret = smsc95xx_wait_eeprom(dev);
328 if (ret < 0) 392 if (ret < 0)
@@ -333,12 +397,18 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
333 /* Fill data register */ 397 /* Fill data register */
334 val = data[i]; 398 val = data[i];
335 ret = smsc95xx_write_reg(dev, E2P_DATA, val); 399 ret = smsc95xx_write_reg(dev, E2P_DATA, val);
336 check_warn_return(ret, "Error writing E2P_DATA"); 400 if (ret < 0) {
401 netdev_warn(dev->net, "Error writing E2P_DATA\n");
402 return ret;
403 }
337 404
338 /* Send "write" command */ 405 /* Send "write" command */
339 val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_); 406 val = E2P_CMD_BUSY_ | E2P_CMD_WRITE_ | (offset & E2P_CMD_ADDR_);
340 ret = smsc95xx_write_reg(dev, E2P_CMD, val); 407 ret = smsc95xx_write_reg(dev, E2P_CMD, val);
341 check_warn_return(ret, "Error writing E2P_CMD"); 408 if (ret < 0) {
409 netdev_warn(dev->net, "Error writing E2P_CMD\n");
410 return ret;
411 }
342 412
343 ret = smsc95xx_wait_eeprom(dev); 413 ret = smsc95xx_wait_eeprom(dev);
344 if (ret < 0) 414 if (ret < 0)
@@ -350,60 +420,24 @@ static int smsc95xx_write_eeprom(struct usbnet *dev, u32 offset, u32 length,
350 return 0; 420 return 0;
351} 421}
352 422
353static void smsc95xx_async_cmd_callback(struct urb *urb)
354{
355 struct usb_context *usb_context = urb->context;
356 struct usbnet *dev = usb_context->dev;
357 int status = urb->status;
358
359 check_warn(status, "async callback failed with %d\n", status);
360
361 kfree(usb_context);
362 usb_free_urb(urb);
363}
364
365static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index, 423static int __must_check smsc95xx_write_reg_async(struct usbnet *dev, u16 index,
366 u32 *data) 424 u32 data)
367{ 425{
368 struct usb_context *usb_context;
369 int status;
370 struct urb *urb;
371 const u16 size = 4; 426 const u16 size = 4;
427 u32 buf;
428 int ret;
372 429
373 urb = usb_alloc_urb(0, GFP_ATOMIC); 430 buf = data;
374 if (!urb) { 431 cpu_to_le32s(&buf);
375 netdev_warn(dev->net, "Error allocating URB\n");
376 return -ENOMEM;
377 }
378
379 usb_context = kmalloc(sizeof(struct usb_context), GFP_ATOMIC);
380 if (usb_context == NULL) {
381 netdev_warn(dev->net, "Error allocating control msg\n");
382 usb_free_urb(urb);
383 return -ENOMEM;
384 }
385
386 usb_context->req.bRequestType =
387 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
388 usb_context->req.bRequest = USB_VENDOR_REQUEST_WRITE_REGISTER;
389 usb_context->req.wValue = 00;
390 usb_context->req.wIndex = cpu_to_le16(index);
391 usb_context->req.wLength = cpu_to_le16(size);
392
393 usb_fill_control_urb(urb, dev->udev, usb_sndctrlpipe(dev->udev, 0),
394 (void *)&usb_context->req, data, size,
395 smsc95xx_async_cmd_callback,
396 (void *)usb_context);
397
398 status = usb_submit_urb(urb, GFP_ATOMIC);
399 if (status < 0) {
400 netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
401 status);
402 kfree(usb_context);
403 usb_free_urb(urb);
404 }
405 432
406 return status; 433 ret = usbnet_write_cmd_async(dev, USB_VENDOR_REQUEST_WRITE_REGISTER,
434 USB_DIR_OUT | USB_TYPE_VENDOR |
435 USB_RECIP_DEVICE,
436 0, index, &buf, size);
437 if (ret < 0)
438 netdev_warn(dev->net, "Error write async cmd, sts=%d\n",
439 ret);
440 return ret;
407} 441}
408 442
409/* returns hash bit number for given MAC address 443/* returns hash bit number for given MAC address
@@ -460,14 +494,17 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
460 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 494 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
461 495
462 /* Initiate async writes, as we can't wait for completion here */ 496 /* Initiate async writes, as we can't wait for completion here */
463 ret = smsc95xx_write_reg_async(dev, HASHH, &pdata->hash_hi); 497 ret = smsc95xx_write_reg_async(dev, HASHH, pdata->hash_hi);
464 check_warn(ret, "failed to initiate async write to HASHH"); 498 if (ret < 0)
499 netdev_warn(dev->net, "failed to initiate async write to HASHH\n");
465 500
466 ret = smsc95xx_write_reg_async(dev, HASHL, &pdata->hash_lo); 501 ret = smsc95xx_write_reg_async(dev, HASHL, pdata->hash_lo);
467 check_warn(ret, "failed to initiate async write to HASHL"); 502 if (ret < 0)
503 netdev_warn(dev->net, "failed to initiate async write to HASHL\n");
468 504
469 ret = smsc95xx_write_reg_async(dev, MAC_CR, &pdata->mac_cr); 505 ret = smsc95xx_write_reg_async(dev, MAC_CR, pdata->mac_cr);
470 check_warn(ret, "failed to initiate async write to MAC_CR"); 506 if (ret < 0)
507 netdev_warn(dev->net, "failed to initiate async write to MAC_CR\n");
471} 508}
472 509
473static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex, 510static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
@@ -476,7 +513,10 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
476 u32 flow, afc_cfg = 0; 513 u32 flow, afc_cfg = 0;
477 514
478 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg); 515 int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
479 check_warn_return(ret, "Error reading AFC_CFG"); 516 if (ret < 0) {
517 netdev_warn(dev->net, "Error reading AFC_CFG\n");
518 return ret;
519 }
480 520
481 if (duplex == DUPLEX_FULL) { 521 if (duplex == DUPLEX_FULL) {
482 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); 522 u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
@@ -501,12 +541,16 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
501 } 541 }
502 542
503 ret = smsc95xx_write_reg(dev, FLOW, flow); 543 ret = smsc95xx_write_reg(dev, FLOW, flow);
504 check_warn_return(ret, "Error writing FLOW"); 544 if (ret < 0) {
545 netdev_warn(dev->net, "Error writing FLOW\n");
546 return ret;
547 }
505 548
506 ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg); 549 ret = smsc95xx_write_reg(dev, AFC_CFG, afc_cfg);
507 check_warn_return(ret, "Error writing AFC_CFG"); 550 if (ret < 0)
551 netdev_warn(dev->net, "Error writing AFC_CFG\n");
508 552
509 return 0; 553 return ret;
510} 554}
511 555
512static int smsc95xx_link_reset(struct usbnet *dev) 556static int smsc95xx_link_reset(struct usbnet *dev)
@@ -520,10 +564,16 @@ static int smsc95xx_link_reset(struct usbnet *dev)
520 564
521 /* clear interrupt status */ 565 /* clear interrupt status */
522 ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); 566 ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
523 check_warn_return(ret, "Error reading PHY_INT_SRC"); 567 if (ret < 0) {
568 netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
569 return ret;
570 }
524 571
525 ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); 572 ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
526 check_warn_return(ret, "Error writing INT_STS"); 573 if (ret < 0) {
574 netdev_warn(dev->net, "Error writing INT_STS\n");
575 return ret;
576 }
527 577
528 mii_check_media(mii, 1, 1); 578 mii_check_media(mii, 1, 1);
529 mii_ethtool_gset(&dev->mii, &ecmd); 579 mii_ethtool_gset(&dev->mii, &ecmd);
@@ -545,12 +595,16 @@ static int smsc95xx_link_reset(struct usbnet *dev)
545 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 595 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
546 596
547 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 597 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
548 check_warn_return(ret, "Error writing MAC_CR"); 598 if (ret < 0) {
599 netdev_warn(dev->net, "Error writing MAC_CR\n");
600 return ret;
601 }
549 602
550 ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv); 603 ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
551 check_warn_return(ret, "Error updating PHY flow control"); 604 if (ret < 0)
605 netdev_warn(dev->net, "Error updating PHY flow control\n");
552 606
553 return 0; 607 return ret;
554} 608}
555 609
556static void smsc95xx_status(struct usbnet *dev, struct urb *urb) 610static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
@@ -584,7 +638,10 @@ static int smsc95xx_set_features(struct net_device *netdev,
584 int ret; 638 int ret;
585 639
586 ret = smsc95xx_read_reg(dev, COE_CR, &read_buf); 640 ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
587 check_warn_return(ret, "Failed to read COE_CR: %d\n", ret); 641 if (ret < 0) {
642 netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
643 return ret;
644 }
588 645
589 if (features & NETIF_F_HW_CSUM) 646 if (features & NETIF_F_HW_CSUM)
590 read_buf |= Tx_COE_EN_; 647 read_buf |= Tx_COE_EN_;
@@ -597,7 +654,10 @@ static int smsc95xx_set_features(struct net_device *netdev,
597 read_buf &= ~Rx_COE_EN_; 654 read_buf &= ~Rx_COE_EN_;
598 655
599 ret = smsc95xx_write_reg(dev, COE_CR, read_buf); 656 ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
600 check_warn_return(ret, "Failed to write COE_CR: %d\n", ret); 657 if (ret < 0) {
658 netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
659 return ret;
660 }
601 661
602 netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf); 662 netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
603 return 0; 663 return 0;
@@ -635,7 +695,7 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
635static int smsc95xx_ethtool_getregslen(struct net_device *netdev) 695static int smsc95xx_ethtool_getregslen(struct net_device *netdev)
636{ 696{
637 /* all smsc95xx registers */ 697 /* all smsc95xx registers */
638 return COE_CR - ID_REV + 1; 698 return COE_CR - ID_REV + sizeof(u32);
639} 699}
640 700
641static void 701static void
@@ -677,9 +737,15 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
677{ 737{
678 struct usbnet *dev = netdev_priv(net); 738 struct usbnet *dev = netdev_priv(net);
679 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 739 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
740 int ret;
680 741
681 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE; 742 pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
682 return 0; 743
744 ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
745 if (ret < 0)
746 netdev_warn(dev->net, "device_set_wakeup_enable error %d\n", ret);
747
748 return ret;
683} 749}
684 750
685static const struct ethtool_ops smsc95xx_ethtool_ops = { 751static const struct ethtool_ops smsc95xx_ethtool_ops = {
@@ -734,12 +800,16 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
734 int ret; 800 int ret;
735 801
736 ret = smsc95xx_write_reg(dev, ADDRL, addr_lo); 802 ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
737 check_warn_return(ret, "Failed to write ADDRL: %d\n", ret); 803 if (ret < 0) {
804 netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
805 return ret;
806 }
738 807
739 ret = smsc95xx_write_reg(dev, ADDRH, addr_hi); 808 ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
740 check_warn_return(ret, "Failed to write ADDRH: %d\n", ret); 809 if (ret < 0)
810 netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
741 811
742 return 0; 812 return ret;
743} 813}
744 814
745/* starts the TX path */ 815/* starts the TX path */
@@ -755,17 +825,21 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
755 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 825 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
756 826
757 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 827 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr);
758 check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret); 828 if (ret < 0) {
829 netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
830 return ret;
831 }
759 832
760 /* Enable Tx at SCSRs */ 833 /* Enable Tx at SCSRs */
761 ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_); 834 ret = smsc95xx_write_reg(dev, TX_CFG, TX_CFG_ON_);
762 check_warn_return(ret, "Failed to write TX_CFG: %d\n", ret); 835 if (ret < 0)
836 netdev_warn(dev->net, "Failed to write TX_CFG: %d\n", ret);
763 837
764 return 0; 838 return ret;
765} 839}
766 840
767/* Starts the Receive path */ 841/* Starts the Receive path */
768static int smsc95xx_start_rx_path(struct usbnet *dev) 842static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
769{ 843{
770 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 844 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
771 unsigned long flags; 845 unsigned long flags;
@@ -775,10 +849,11 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
775 pdata->mac_cr |= MAC_CR_RXEN_; 849 pdata->mac_cr |= MAC_CR_RXEN_;
776 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags); 850 spin_unlock_irqrestore(&pdata->mac_cr_lock, flags);
777 851
778 ret = smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr); 852 ret = __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
779 check_warn_return(ret, "Failed to write MAC_CR: %d\n", ret); 853 if (ret < 0)
854 netdev_warn(dev->net, "Failed to write MAC_CR: %d\n", ret);
780 855
781 return 0; 856 return ret;
782} 857}
783 858
784static int smsc95xx_phy_initialize(struct usbnet *dev) 859static int smsc95xx_phy_initialize(struct usbnet *dev)
@@ -813,7 +888,10 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
813 888
814 /* read to clear */ 889 /* read to clear */
815 ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); 890 ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
816 check_warn_return(ret, "Failed to read PHY_INT_SRC during init"); 891 if (ret < 0) {
892 netdev_warn(dev->net, "Failed to read PHY_INT_SRC during init\n");
893 return ret;
894 }
817 895
818 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, 896 smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
819 PHY_INT_MASK_DEFAULT_); 897 PHY_INT_MASK_DEFAULT_);
@@ -832,13 +910,19 @@ static int smsc95xx_reset(struct usbnet *dev)
832 netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n"); 910 netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
833 911
834 ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_); 912 ret = smsc95xx_write_reg(dev, HW_CFG, HW_CFG_LRST_);
835 check_warn_return(ret, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n"); 913 if (ret < 0) {
914 netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG\n");
915 return ret;
916 }
836 917
837 timeout = 0; 918 timeout = 0;
838 do { 919 do {
839 msleep(10); 920 msleep(10);
840 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 921 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
841 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); 922 if (ret < 0) {
923 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
924 return ret;
925 }
842 timeout++; 926 timeout++;
843 } while ((read_buf & HW_CFG_LRST_) && (timeout < 100)); 927 } while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
844 928
@@ -848,13 +932,19 @@ static int smsc95xx_reset(struct usbnet *dev)
848 } 932 }
849 933
850 ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_); 934 ret = smsc95xx_write_reg(dev, PM_CTRL, PM_CTL_PHY_RST_);
851 check_warn_return(ret, "Failed to write PM_CTRL: %d\n", ret); 935 if (ret < 0) {
936 netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
937 return ret;
938 }
852 939
853 timeout = 0; 940 timeout = 0;
854 do { 941 do {
855 msleep(10); 942 msleep(10);
856 ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf); 943 ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
857 check_warn_return(ret, "Failed to read PM_CTRL: %d\n", ret); 944 if (ret < 0) {
945 netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
946 return ret;
947 }
858 timeout++; 948 timeout++;
859 } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100)); 949 } while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
860 950
@@ -867,22 +957,32 @@ static int smsc95xx_reset(struct usbnet *dev)
867 if (ret < 0) 957 if (ret < 0)
868 return ret; 958 return ret;
869 959
870 netif_dbg(dev, ifup, dev->net, 960 netif_dbg(dev, ifup, dev->net, "MAC Address: %pM\n",
871 "MAC Address: %pM\n", dev->net->dev_addr); 961 dev->net->dev_addr);
872 962
873 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 963 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
874 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); 964 if (ret < 0) {
965 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
966 return ret;
967 }
875 968
876 netif_dbg(dev, ifup, dev->net, 969 netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG : 0x%08x\n",
877 "Read Value from HW_CFG : 0x%08x\n", read_buf); 970 read_buf);
878 971
879 read_buf |= HW_CFG_BIR_; 972 read_buf |= HW_CFG_BIR_;
880 973
881 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); 974 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
882 check_warn_return(ret, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n"); 975 if (ret < 0) {
976 netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG\n");
977 return ret;
978 }
883 979
884 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 980 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
885 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); 981 if (ret < 0) {
982 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
983 return ret;
984 }
985
886 netif_dbg(dev, ifup, dev->net, 986 netif_dbg(dev, ifup, dev->net,
887 "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n", 987 "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
888 read_buf); 988 read_buf);
@@ -898,34 +998,49 @@ static int smsc95xx_reset(struct usbnet *dev)
898 dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE; 998 dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
899 } 999 }
900 1000
901 netif_dbg(dev, ifup, dev->net, 1001 netif_dbg(dev, ifup, dev->net, "rx_urb_size=%ld\n",
902 "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size); 1002 (ulong)dev->rx_urb_size);
903 1003
904 ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap); 1004 ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
905 check_warn_return(ret, "Failed to write BURST_CAP: %d\n", ret); 1005 if (ret < 0) {
1006 netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
1007 return ret;
1008 }
906 1009
907 ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf); 1010 ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
908 check_warn_return(ret, "Failed to read BURST_CAP: %d\n", ret); 1011 if (ret < 0) {
1012 netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
1013 return ret;
1014 }
909 1015
910 netif_dbg(dev, ifup, dev->net, 1016 netif_dbg(dev, ifup, dev->net,
911 "Read Value from BURST_CAP after writing: 0x%08x\n", 1017 "Read Value from BURST_CAP after writing: 0x%08x\n",
912 read_buf); 1018 read_buf);
913 1019
914 ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY); 1020 ret = smsc95xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
915 check_warn_return(ret, "Failed to write BULK_IN_DLY: %d\n", ret); 1021 if (ret < 0) {
1022 netdev_warn(dev->net, "Failed to write BULK_IN_DLY: %d\n", ret);
1023 return ret;
1024 }
916 1025
917 ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf); 1026 ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
918 check_warn_return(ret, "Failed to read BULK_IN_DLY: %d\n", ret); 1027 if (ret < 0) {
1028 netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
1029 return ret;
1030 }
919 1031
920 netif_dbg(dev, ifup, dev->net, 1032 netif_dbg(dev, ifup, dev->net,
921 "Read Value from BULK_IN_DLY after writing: 0x%08x\n", 1033 "Read Value from BULK_IN_DLY after writing: 0x%08x\n",
922 read_buf); 1034 read_buf);
923 1035
924 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 1036 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
925 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); 1037 if (ret < 0) {
1038 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
1039 return ret;
1040 }
926 1041
927 netif_dbg(dev, ifup, dev->net, 1042 netif_dbg(dev, ifup, dev->net, "Read Value from HW_CFG: 0x%08x\n",
928 "Read Value from HW_CFG: 0x%08x\n", read_buf); 1043 read_buf);
929 1044
930 if (turbo_mode) 1045 if (turbo_mode)
931 read_buf |= (HW_CFG_MEF_ | HW_CFG_BCE_); 1046 read_buf |= (HW_CFG_MEF_ | HW_CFG_BCE_);
@@ -936,66 +1051,111 @@ static int smsc95xx_reset(struct usbnet *dev)
936 read_buf |= NET_IP_ALIGN << 9; 1051 read_buf |= NET_IP_ALIGN << 9;
937 1052
938 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf); 1053 ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
939 check_warn_return(ret, "Failed to write HW_CFG: %d\n", ret); 1054 if (ret < 0) {
1055 netdev_warn(dev->net, "Failed to write HW_CFG: %d\n", ret);
1056 return ret;
1057 }
940 1058
941 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf); 1059 ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
942 check_warn_return(ret, "Failed to read HW_CFG: %d\n", ret); 1060 if (ret < 0) {
1061 netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
1062 return ret;
1063 }
943 1064
944 netif_dbg(dev, ifup, dev->net, 1065 netif_dbg(dev, ifup, dev->net,
945 "Read Value from HW_CFG after writing: 0x%08x\n", read_buf); 1066 "Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
946 1067
947 ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); 1068 ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
948 check_warn_return(ret, "Failed to write INT_STS: %d\n", ret); 1069 if (ret < 0) {
1070 netdev_warn(dev->net, "Failed to write INT_STS: %d\n", ret);
1071 return ret;
1072 }
949 1073
950 ret = smsc95xx_read_reg(dev, ID_REV, &read_buf); 1074 ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
951 check_warn_return(ret, "Failed to read ID_REV: %d\n", ret); 1075 if (ret < 0) {
1076 netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
1077 return ret;
1078 }
952 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf); 1079 netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
953 1080
954 /* Configure GPIO pins as LED outputs */ 1081 /* Configure GPIO pins as LED outputs */
955 write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED | 1082 write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
956 LED_GPIO_CFG_FDX_LED; 1083 LED_GPIO_CFG_FDX_LED;
957 ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf); 1084 ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
958 check_warn_return(ret, "Failed to write LED_GPIO_CFG: %d\n", ret); 1085 if (ret < 0) {
1086 netdev_warn(dev->net, "Failed to write LED_GPIO_CFG: %d\n", ret);
1087 return ret;
1088 }
959 1089
960 /* Init Tx */ 1090 /* Init Tx */
961 ret = smsc95xx_write_reg(dev, FLOW, 0); 1091 ret = smsc95xx_write_reg(dev, FLOW, 0);
962 check_warn_return(ret, "Failed to write FLOW: %d\n", ret); 1092 if (ret < 0) {
1093 netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
1094 return ret;
1095 }
963 1096
964 ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT); 1097 ret = smsc95xx_write_reg(dev, AFC_CFG, AFC_CFG_DEFAULT);
965 check_warn_return(ret, "Failed to write AFC_CFG: %d\n", ret); 1098 if (ret < 0) {
1099 netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
1100 return ret;
1101 }
966 1102
967 /* Don't need mac_cr_lock during initialisation */ 1103 /* Don't need mac_cr_lock during initialisation */
968 ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr); 1104 ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
969 check_warn_return(ret, "Failed to read MAC_CR: %d\n", ret); 1105 if (ret < 0) {
1106 netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
1107 return ret;
1108 }
970 1109
971 /* Init Rx */ 1110 /* Init Rx */
972 /* Set Vlan */ 1111 /* Set Vlan */
973 ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q); 1112 ret = smsc95xx_write_reg(dev, VLAN1, (u32)ETH_P_8021Q);
974 check_warn_return(ret, "Failed to write VLAN1: %d\n", ret); 1113 if (ret < 0) {
1114 netdev_warn(dev->net, "Failed to write VLAN1: %d\n", ret);
1115 return ret;
1116 }
975 1117
976 /* Enable or disable checksum offload engines */ 1118 /* Enable or disable checksum offload engines */
977 ret = smsc95xx_set_features(dev->net, dev->net->features); 1119 ret = smsc95xx_set_features(dev->net, dev->net->features);
978 check_warn_return(ret, "Failed to set checksum offload features"); 1120 if (ret < 0) {
1121 netdev_warn(dev->net, "Failed to set checksum offload features\n");
1122 return ret;
1123 }
979 1124
980 smsc95xx_set_multicast(dev->net); 1125 smsc95xx_set_multicast(dev->net);
981 1126
982 ret = smsc95xx_phy_initialize(dev); 1127 ret = smsc95xx_phy_initialize(dev);
983 check_warn_return(ret, "Failed to init PHY"); 1128 if (ret < 0) {
1129 netdev_warn(dev->net, "Failed to init PHY\n");
1130 return ret;
1131 }
984 1132
985 ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf); 1133 ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
986 check_warn_return(ret, "Failed to read INT_EP_CTL: %d\n", ret); 1134 if (ret < 0) {
1135 netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
1136 return ret;
1137 }
987 1138
988 /* enable PHY interrupts */ 1139 /* enable PHY interrupts */
989 read_buf |= INT_EP_CTL_PHY_INT_; 1140 read_buf |= INT_EP_CTL_PHY_INT_;
990 1141
991 ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf); 1142 ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
992 check_warn_return(ret, "Failed to write INT_EP_CTL: %d\n", ret); 1143 if (ret < 0) {
1144 netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
1145 return ret;
1146 }
993 1147
994 ret = smsc95xx_start_tx_path(dev); 1148 ret = smsc95xx_start_tx_path(dev);
995 check_warn_return(ret, "Failed to start TX path"); 1149 if (ret < 0) {
1150 netdev_warn(dev->net, "Failed to start TX path\n");
1151 return ret;
1152 }
996 1153
997 ret = smsc95xx_start_rx_path(dev); 1154 ret = smsc95xx_start_rx_path(dev, 0);
998 check_warn_return(ret, "Failed to start RX path"); 1155 if (ret < 0) {
1156 netdev_warn(dev->net, "Failed to start RX path\n");
1157 return ret;
1158 }
999 1159
1000 netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n"); 1160 netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
1001 return 0; 1161 return 0;
@@ -1017,12 +1177,16 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
1017static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) 1177static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1018{ 1178{
1019 struct smsc95xx_priv *pdata = NULL; 1179 struct smsc95xx_priv *pdata = NULL;
1180 u32 val;
1020 int ret; 1181 int ret;
1021 1182
1022 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n"); 1183 printk(KERN_INFO SMSC_CHIPNAME " v" SMSC_DRIVER_VERSION "\n");
1023 1184
1024 ret = usbnet_get_endpoints(dev, intf); 1185 ret = usbnet_get_endpoints(dev, intf);
1025 check_warn_return(ret, "usbnet_get_endpoints failed: %d\n", ret); 1186 if (ret < 0) {
1187 netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
1188 return ret;
1189 }
1026 1190
1027 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv), 1191 dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
1028 GFP_KERNEL); 1192 GFP_KERNEL);
@@ -1047,6 +1211,22 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
1047 /* Init all registers */ 1211 /* Init all registers */
1048 ret = smsc95xx_reset(dev); 1212 ret = smsc95xx_reset(dev);
1049 1213
1214 /* detect device revision as different features may be available */
1215 ret = smsc95xx_read_reg(dev, ID_REV, &val);
1216 if (ret < 0) {
1217 netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
1218 return ret;
1219 }
1220 val >>= 16;
1221
1222 if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
1223 (val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
1224 pdata->features = (FEATURE_8_WAKEUP_FILTERS |
1225 FEATURE_PHY_NLP_CROSSOVER |
1226 FEATURE_AUTOSUSPEND);
1227 else if (val == ID_REV_CHIP_ID_9512_)
1228 pdata->features = FEATURE_8_WAKEUP_FILTERS;
1229
1050 dev->net->netdev_ops = &smsc95xx_netdev_ops; 1230 dev->net->netdev_ops = &smsc95xx_netdev_ops;
1051 dev->net->ethtool_ops = &smsc95xx_ethtool_ops; 1231 dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
1052 dev->net->flags |= IFF_MULTICAST; 1232 dev->net->flags |= IFF_MULTICAST;
@@ -1066,113 +1246,448 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
1066 } 1246 }
1067} 1247}
1068 1248
1249static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
1250{
1251 u32 crc = bitrev16(crc16(0xFFFF, buffer, len));
1252 return crc << ((filter % 2) * 16);
1253}
1254
1255static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
1256{
1257 struct mii_if_info *mii = &dev->mii;
1258 int ret;
1259
1260 netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
1261
1262 /* read to clear */
1263 ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
1264 if (ret < 0) {
1265 netdev_warn(dev->net, "Error reading PHY_INT_SRC\n");
1266 return ret;
1267 }
1268
1269 /* enable interrupt source */
1270 ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
1271 if (ret < 0) {
1272 netdev_warn(dev->net, "Error reading PHY_INT_MASK\n");
1273 return ret;
1274 }
1275
1276 ret |= mask;
1277
1278 smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
1279
1280 return 0;
1281}
1282
1283static int smsc95xx_link_ok_nopm(struct usbnet *dev)
1284{
1285 struct mii_if_info *mii = &dev->mii;
1286 int ret;
1287
1288 /* first, a dummy read, needed to latch some MII phys */
1289 ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
1290 if (ret < 0) {
1291 netdev_warn(dev->net, "Error reading MII_BMSR\n");
1292 return ret;
1293 }
1294
1295 ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
1296 if (ret < 0) {
1297 netdev_warn(dev->net, "Error reading MII_BMSR\n");
1298 return ret;
1299 }
1300
1301 return !!(ret & BMSR_LSTATUS);
1302}
1303
1304static int smsc95xx_enter_suspend0(struct usbnet *dev)
1305{
1306 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1307 u32 val;
1308 int ret;
1309
1310 ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
1311 if (ret < 0) {
1312 netdev_warn(dev->net, "Error reading PM_CTRL\n");
1313 return ret;
1314 }
1315
1316 val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
1317 val |= PM_CTL_SUS_MODE_0;
1318
1319 ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
1320 if (ret < 0) {
1321 netdev_warn(dev->net, "Error writing PM_CTRL\n");
1322 return ret;
1323 }
1324
1325 /* clear wol status */
1326 val &= ~PM_CTL_WUPS_;
1327 val |= PM_CTL_WUPS_WOL_;
1328
1329 /* enable energy detection */
1330 if (pdata->wolopts & WAKE_PHY)
1331 val |= PM_CTL_WUPS_ED_;
1332
1333 ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
1334 if (ret < 0) {
1335 netdev_warn(dev->net, "Error writing PM_CTRL\n");
1336 return ret;
1337 }
1338
1339 /* read back PM_CTRL */
1340 ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
1341 if (ret < 0)
1342 netdev_warn(dev->net, "Error reading PM_CTRL\n");
1343
1344 return ret;
1345}
1346
1347static int smsc95xx_enter_suspend1(struct usbnet *dev)
1348{
1349 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1350 struct mii_if_info *mii = &dev->mii;
1351 u32 val;
1352 int ret;
1353
1354 /* reconfigure link pulse detection timing for
1355 * compatibility with non-standard link partners
1356 */
1357 if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
1358 smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_EDPD_CONFIG,
1359 PHY_EDPD_CONFIG_DEFAULT);
1360
1361 /* enable energy detect power-down mode */
1362 ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS);
1363 if (ret < 0) {
1364 netdev_warn(dev->net, "Error reading PHY_MODE_CTRL_STS\n");
1365 return ret;
1366 }
1367
1368 ret |= MODE_CTRL_STS_EDPWRDOWN_;
1369
1370 smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret);
1371
1372 /* enter SUSPEND1 mode */
1373 ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
1374 if (ret < 0) {
1375 netdev_warn(dev->net, "Error reading PM_CTRL\n");
1376 return ret;
1377 }
1378
1379 val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
1380 val |= PM_CTL_SUS_MODE_1;
1381
1382 ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
1383 if (ret < 0) {
1384 netdev_warn(dev->net, "Error writing PM_CTRL\n");
1385 return ret;
1386 }
1387
1388 /* clear wol status, enable energy detection */
1389 val &= ~PM_CTL_WUPS_;
1390 val |= (PM_CTL_WUPS_ED_ | PM_CTL_ED_EN_);
1391
1392 ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
1393 if (ret < 0)
1394 netdev_warn(dev->net, "Error writing PM_CTRL\n");
1395
1396 return ret;
1397}
1398
1399static int smsc95xx_enter_suspend2(struct usbnet *dev)
1400{
1401 u32 val;
1402 int ret;
1403
1404 ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
1405 if (ret < 0) {
1406 netdev_warn(dev->net, "Error reading PM_CTRL\n");
1407 return ret;
1408 }
1409
1410 val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_);
1411 val |= PM_CTL_SUS_MODE_2;
1412
1413 ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
1414 if (ret < 0)
1415 netdev_warn(dev->net, "Error writing PM_CTRL\n");
1416
1417 return ret;
1418}
1419
1069static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message) 1420static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
1070{ 1421{
1071 struct usbnet *dev = usb_get_intfdata(intf); 1422 struct usbnet *dev = usb_get_intfdata(intf);
1072 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); 1423 struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
1424 u32 val, link_up;
1073 int ret; 1425 int ret;
1074 u32 val;
1075 1426
1076 ret = usbnet_suspend(intf, message); 1427 ret = usbnet_suspend(intf, message);
1077 check_warn_return(ret, "usbnet_suspend error"); 1428 if (ret < 0) {
1429 netdev_warn(dev->net, "usbnet_suspend error\n");
1430 return ret;
1431 }
1078 1432
1079 /* if no wol options set, enter lowest power SUSPEND2 mode */ 1433 /* determine if link is up using only _nopm functions */
1080 if (!(pdata->wolopts & SUPPORTED_WAKE)) { 1434 link_up = smsc95xx_link_ok_nopm(dev);
1081 netdev_info(dev->net, "entering SUSPEND2 mode"); 1435
1436 /* if no wol options set, or if link is down and we're not waking on
1437 * PHY activity, enter lowest power SUSPEND2 mode
1438 */
1439 if (!(pdata->wolopts & SUPPORTED_WAKE) ||
1440 !(link_up || (pdata->wolopts & WAKE_PHY))) {
1441 netdev_info(dev->net, "entering SUSPEND2 mode\n");
1082 1442
1083 /* disable energy detect (link up) & wake up events */ 1443 /* disable energy detect (link up) & wake up events */
1084 ret = smsc95xx_read_reg(dev, WUCSR, &val); 1444 ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
1085 check_warn_return(ret, "Error reading WUCSR"); 1445 if (ret < 0) {
1446 netdev_warn(dev->net, "Error reading WUCSR\n");
1447 goto done;
1448 }
1086 1449
1087 val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_); 1450 val &= ~(WUCSR_MPEN_ | WUCSR_WAKE_EN_);
1088 1451
1089 ret = smsc95xx_write_reg(dev, WUCSR, val); 1452 ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
1090 check_warn_return(ret, "Error writing WUCSR"); 1453 if (ret < 0) {
1454 netdev_warn(dev->net, "Error writing WUCSR\n");
1455 goto done;
1456 }
1091 1457
1092 ret = smsc95xx_read_reg(dev, PM_CTRL, &val); 1458 ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
1093 check_warn_return(ret, "Error reading PM_CTRL"); 1459 if (ret < 0) {
1460 netdev_warn(dev->net, "Error reading PM_CTRL\n");
1461 goto done;
1462 }
1094 1463
1095 val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_); 1464 val &= ~(PM_CTL_ED_EN_ | PM_CTL_WOL_EN_);
1096 1465
1097 ret = smsc95xx_write_reg(dev, PM_CTRL, val); 1466 ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
1098 check_warn_return(ret, "Error writing PM_CTRL"); 1467 if (ret < 0) {
1468 netdev_warn(dev->net, "Error writing PM_CTRL\n");
1469 goto done;
1470 }
1099 1471
1100 /* enter suspend2 mode */ 1472 ret = smsc95xx_enter_suspend2(dev);
1101 ret = smsc95xx_read_reg(dev, PM_CTRL, &val); 1473 goto done;
1102 check_warn_return(ret, "Error reading PM_CTRL"); 1474 }
1103 1475
1104 val &= ~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_); 1476 if (pdata->wolopts & WAKE_PHY) {
1105 val |= PM_CTL_SUS_MODE_2; 1477 ret = smsc95xx_enable_phy_wakeup_interrupts(dev,
1478 (PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_LINK_DOWN_));
1479 if (ret < 0) {
1480 netdev_warn(dev->net, "error enabling PHY wakeup ints\n");
1481 goto done;
1482 }
1483
1484 /* if link is down then configure EDPD and enter SUSPEND1,
1485 * otherwise enter SUSPEND0 below
1486 */
1487 if (!link_up) {
1488 netdev_info(dev->net, "entering SUSPEND1 mode\n");
1489 ret = smsc95xx_enter_suspend1(dev);
1490 goto done;
1491 }
1492 }
1106 1493
1107 ret = smsc95xx_write_reg(dev, PM_CTRL, val); 1494 if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
1108 check_warn_return(ret, "Error writing PM_CTRL"); 1495 u32 *filter_mask = kzalloc(sizeof(u32) * 32, GFP_KERNEL);
1496 u32 command[2];
1497 u32 offset[2];
1498 u32 crc[4];
1499 int wuff_filter_count =
1500 (pdata->features & FEATURE_8_WAKEUP_FILTERS) ?
1501 LAN9500A_WUFF_NUM : LAN9500_WUFF_NUM;
1502 int i, filter = 0;
1503
1504 if (!filter_mask) {
1505 netdev_warn(dev->net, "Unable to allocate filter_mask\n");
1506 ret = -ENOMEM;
1507 goto done;
1508 }
1109 1509
1110 return 0; 1510 memset(command, 0, sizeof(command));
1511 memset(offset, 0, sizeof(offset));
1512 memset(crc, 0, sizeof(crc));
1513
1514 if (pdata->wolopts & WAKE_BCAST) {
1515 const u8 bcast[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1516 netdev_info(dev->net, "enabling broadcast detection\n");
1517 filter_mask[filter * 4] = 0x003F;
1518 filter_mask[filter * 4 + 1] = 0x00;
1519 filter_mask[filter * 4 + 2] = 0x00;
1520 filter_mask[filter * 4 + 3] = 0x00;
1521 command[filter/4] |= 0x05UL << ((filter % 4) * 8);
1522 offset[filter/4] |= 0x00 << ((filter % 4) * 8);
1523 crc[filter/2] |= smsc_crc(bcast, 6, filter);
1524 filter++;
1525 }
1526
1527 if (pdata->wolopts & WAKE_MCAST) {
1528 const u8 mcast[] = {0x01, 0x00, 0x5E};
1529 netdev_info(dev->net, "enabling multicast detection\n");
1530 filter_mask[filter * 4] = 0x0007;
1531 filter_mask[filter * 4 + 1] = 0x00;
1532 filter_mask[filter * 4 + 2] = 0x00;
1533 filter_mask[filter * 4 + 3] = 0x00;
1534 command[filter/4] |= 0x09UL << ((filter % 4) * 8);
1535 offset[filter/4] |= 0x00 << ((filter % 4) * 8);
1536 crc[filter/2] |= smsc_crc(mcast, 3, filter);
1537 filter++;
1538 }
1539
1540 if (pdata->wolopts & WAKE_ARP) {
1541 const u8 arp[] = {0x08, 0x06};
1542 netdev_info(dev->net, "enabling ARP detection\n");
1543 filter_mask[filter * 4] = 0x0003;
1544 filter_mask[filter * 4 + 1] = 0x00;
1545 filter_mask[filter * 4 + 2] = 0x00;
1546 filter_mask[filter * 4 + 3] = 0x00;
1547 command[filter/4] |= 0x05UL << ((filter % 4) * 8);
1548 offset[filter/4] |= 0x0C << ((filter % 4) * 8);
1549 crc[filter/2] |= smsc_crc(arp, 2, filter);
1550 filter++;
1551 }
1552
1553 if (pdata->wolopts & WAKE_UCAST) {
1554 netdev_info(dev->net, "enabling unicast detection\n");
1555 filter_mask[filter * 4] = 0x003F;
1556 filter_mask[filter * 4 + 1] = 0x00;
1557 filter_mask[filter * 4 + 2] = 0x00;
1558 filter_mask[filter * 4 + 3] = 0x00;
1559 command[filter/4] |= 0x01UL << ((filter % 4) * 8);
1560 offset[filter/4] |= 0x00 << ((filter % 4) * 8);
1561 crc[filter/2] |= smsc_crc(dev->net->dev_addr, ETH_ALEN, filter);
1562 filter++;
1563 }
1564
1565 for (i = 0; i < (wuff_filter_count * 4); i++) {
1566 ret = smsc95xx_write_reg_nopm(dev, WUFF, filter_mask[i]);
1567 if (ret < 0) {
1568 netdev_warn(dev->net, "Error writing WUFF\n");
1569 kfree(filter_mask);
1570 goto done;
1571 }
1572 }
1573 kfree(filter_mask);
1574
1575 for (i = 0; i < (wuff_filter_count / 4); i++) {
1576 ret = smsc95xx_write_reg_nopm(dev, WUFF, command[i]);
1577 if (ret < 0) {
1578 netdev_warn(dev->net, "Error writing WUFF\n");
1579 goto done;
1580 }
1581 }
1582
1583 for (i = 0; i < (wuff_filter_count / 4); i++) {
1584 ret = smsc95xx_write_reg_nopm(dev, WUFF, offset[i]);
1585 if (ret < 0) {
1586 netdev_warn(dev->net, "Error writing WUFF\n");
1587 goto done;
1588 }
1589 }
1590
1591 for (i = 0; i < (wuff_filter_count / 2); i++) {
1592 ret = smsc95xx_write_reg_nopm(dev, WUFF, crc[i]);
1593 if (ret < 0) {
1594 netdev_warn(dev->net, "Error writing WUFF\n");
1595 goto done;
1596 }
1597 }
1598
1599 /* clear any pending pattern match packet status */
1600 ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
1601 if (ret < 0) {
1602 netdev_warn(dev->net, "Error reading WUCSR\n");
1603 goto done;
1604 }
1605
1606 val |= WUCSR_WUFR_;
1607
1608 ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
1609 if (ret < 0) {
1610 netdev_warn(dev->net, "Error writing WUCSR\n");
1611 goto done;
1612 }
1111 } 1613 }
1112 1614
1113 if (pdata->wolopts & WAKE_MAGIC) { 1615 if (pdata->wolopts & WAKE_MAGIC) {
1114 /* clear any pending magic packet status */ 1616 /* clear any pending magic packet status */
1115 ret = smsc95xx_read_reg(dev, WUCSR, &val); 1617 ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
1116 check_warn_return(ret, "Error reading WUCSR"); 1618 if (ret < 0) {
1619 netdev_warn(dev->net, "Error reading WUCSR\n");
1620 goto done;
1621 }
1117 1622
1118 val |= WUCSR_MPR_; 1623 val |= WUCSR_MPR_;
1119 1624
1120 ret = smsc95xx_write_reg(dev, WUCSR, val); 1625 ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
1121 check_warn_return(ret, "Error writing WUCSR"); 1626 if (ret < 0) {
1627 netdev_warn(dev->net, "Error writing WUCSR\n");
1628 goto done;
1629 }
1630 }
1631
1632 /* enable/disable wakeup sources */
1633 ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
1634 if (ret < 0) {
1635 netdev_warn(dev->net, "Error reading WUCSR\n");
1636 goto done;
1122 } 1637 }
1123 1638
1124 /* enable/disable magic packup wake */ 1639 if (pdata->wolopts & (WAKE_BCAST | WAKE_MCAST | WAKE_ARP | WAKE_UCAST)) {
1125 ret = smsc95xx_read_reg(dev, WUCSR, &val); 1640 netdev_info(dev->net, "enabling pattern match wakeup\n");
1126 check_warn_return(ret, "Error reading WUCSR"); 1641 val |= WUCSR_WAKE_EN_;
1642 } else {
1643 netdev_info(dev->net, "disabling pattern match wakeup\n");
1644 val &= ~WUCSR_WAKE_EN_;
1645 }
1127 1646
1128 if (pdata->wolopts & WAKE_MAGIC) { 1647 if (pdata->wolopts & WAKE_MAGIC) {
1129 netdev_info(dev->net, "enabling magic packet wakeup"); 1648 netdev_info(dev->net, "enabling magic packet wakeup\n");
1130 val |= WUCSR_MPEN_; 1649 val |= WUCSR_MPEN_;
1131 } else { 1650 } else {
1132 netdev_info(dev->net, "disabling magic packet wakeup"); 1651 netdev_info(dev->net, "disabling magic packet wakeup\n");
1133 val &= ~WUCSR_MPEN_; 1652 val &= ~WUCSR_MPEN_;
1134 } 1653 }
1135 1654
1136 ret = smsc95xx_write_reg(dev, WUCSR, val); 1655 ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
1137 check_warn_return(ret, "Error writing WUCSR"); 1656 if (ret < 0) {
1657 netdev_warn(dev->net, "Error writing WUCSR\n");
1658 goto done;
1659 }
1138 1660
1139 /* enable wol wakeup source */ 1661 /* enable wol wakeup source */
1140 ret = smsc95xx_read_reg(dev, PM_CTRL, &val); 1662 ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
1141 check_warn_return(ret, "Error reading PM_CTRL"); 1663 if (ret < 0) {
1664 netdev_warn(dev->net, "Error reading PM_CTRL\n");
1665 goto done;
1666 }
1142 1667
1143 val |= PM_CTL_WOL_EN_; 1668 val |= PM_CTL_WOL_EN_;
1144 1669
1145 ret = smsc95xx_write_reg(dev, PM_CTRL, val); 1670 /* phy energy detect wakeup source */
1146 check_warn_return(ret, "Error writing PM_CTRL"); 1671 if (pdata->wolopts & WAKE_PHY)
1147 1672 val |= PM_CTL_ED_EN_;
1148 /* enable receiver */
1149 smsc95xx_start_rx_path(dev);
1150
1151 /* some wol options are enabled, so enter SUSPEND0 */
1152 netdev_info(dev->net, "entering SUSPEND0 mode");
1153
1154 ret = smsc95xx_read_reg(dev, PM_CTRL, &val);
1155 check_warn_return(ret, "Error reading PM_CTRL");
1156
1157 val &= (~(PM_CTL_SUS_MODE_ | PM_CTL_WUPS_ | PM_CTL_PHY_RST_));
1158 val |= PM_CTL_SUS_MODE_0;
1159
1160 ret = smsc95xx_write_reg(dev, PM_CTRL, val);
1161 check_warn_return(ret, "Error writing PM_CTRL");
1162 1673
1163 /* clear wol status */ 1674 ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
1164 val &= ~PM_CTL_WUPS_; 1675 if (ret < 0) {
1165 val |= PM_CTL_WUPS_WOL_; 1676 netdev_warn(dev->net, "Error writing PM_CTRL\n");
1166 ret = smsc95xx_write_reg(dev, PM_CTRL, val); 1677 goto done;
1167 check_warn_return(ret, "Error writing PM_CTRL"); 1678 }
1168 1679
1169 /* read back PM_CTRL */ 1680 /* enable receiver to enable frame reception */
1170 ret = smsc95xx_read_reg(dev, PM_CTRL, &val); 1681 smsc95xx_start_rx_path(dev, 1);
1171 check_warn_return(ret, "Error reading PM_CTRL");
1172 1682
1173 smsc95xx_set_feature(dev, USB_DEVICE_REMOTE_WAKEUP); 1683 /* some wol options are enabled, so enter SUSPEND0 */
1684 netdev_info(dev->net, "entering SUSPEND0 mode\n");
1685 ret = smsc95xx_enter_suspend0(dev);
1174 1686
1175 return 0; 1687done:
1688 if (ret)
1689 usbnet_resume(intf);
1690 return ret;
1176} 1691}
1177 1692
1178static int smsc95xx_resume(struct usb_interface *intf) 1693static int smsc95xx_resume(struct usb_interface *intf)
@@ -1184,33 +1699,44 @@ static int smsc95xx_resume(struct usb_interface *intf)
1184 1699
1185 BUG_ON(!dev); 1700 BUG_ON(!dev);
1186 1701
1187 if (pdata->wolopts & WAKE_MAGIC) { 1702 if (pdata->wolopts) {
1188 smsc95xx_clear_feature(dev, USB_DEVICE_REMOTE_WAKEUP); 1703 /* clear wake-up sources */
1189 1704 ret = smsc95xx_read_reg_nopm(dev, WUCSR, &val);
1190 /* Disable magic packup wake */ 1705 if (ret < 0) {
1191 ret = smsc95xx_read_reg(dev, WUCSR, &val); 1706 netdev_warn(dev->net, "Error reading WUCSR\n");
1192 check_warn_return(ret, "Error reading WUCSR"); 1707 return ret;
1708 }
1193 1709
1194 val &= ~WUCSR_MPEN_; 1710 val &= ~(WUCSR_WAKE_EN_ | WUCSR_MPEN_);
1195 1711
1196 ret = smsc95xx_write_reg(dev, WUCSR, val); 1712 ret = smsc95xx_write_reg_nopm(dev, WUCSR, val);
1197 check_warn_return(ret, "Error writing WUCSR"); 1713 if (ret < 0) {
1714 netdev_warn(dev->net, "Error writing WUCSR\n");
1715 return ret;
1716 }
1198 1717
1199 /* clear wake-up status */ 1718 /* clear wake-up status */
1200 ret = smsc95xx_read_reg(dev, PM_CTRL, &val); 1719 ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
1201 check_warn_return(ret, "Error reading PM_CTRL"); 1720 if (ret < 0) {
1721 netdev_warn(dev->net, "Error reading PM_CTRL\n");
1722 return ret;
1723 }
1202 1724
1203 val &= ~PM_CTL_WOL_EN_; 1725 val &= ~PM_CTL_WOL_EN_;
1204 val |= PM_CTL_WUPS_; 1726 val |= PM_CTL_WUPS_;
1205 1727
1206 ret = smsc95xx_write_reg(dev, PM_CTRL, val); 1728 ret = smsc95xx_write_reg_nopm(dev, PM_CTRL, val);
1207 check_warn_return(ret, "Error writing PM_CTRL"); 1729 if (ret < 0) {
1730 netdev_warn(dev->net, "Error writing PM_CTRL\n");
1731 return ret;
1732 }
1208 } 1733 }
1209 1734
1210 return usbnet_resume(intf); 1735 ret = usbnet_resume(intf);
1211 check_warn_return(ret, "usbnet_resume error"); 1736 if (ret < 0)
1737 netdev_warn(dev->net, "usbnet_resume error\n");
1212 1738
1213 return 0; 1739 return ret;
1214} 1740}
1215 1741
1216static void smsc95xx_rx_csum_offload(struct sk_buff *skb) 1742static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
diff --git a/drivers/net/usb/smsc95xx.h b/drivers/net/usb/smsc95xx.h
index 2ff9815aa27c..f360ee372554 100644
--- a/drivers/net/usb/smsc95xx.h
+++ b/drivers/net/usb/smsc95xx.h
@@ -53,6 +53,11 @@
53#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000) 53#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000)
54#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) 54#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
55#define ID_REV_CHIP_ID_9500_ (0x9500) 55#define ID_REV_CHIP_ID_9500_ (0x9500)
56#define ID_REV_CHIP_ID_9500A_ (0x9E00)
57#define ID_REV_CHIP_ID_9512_ (0xEC00)
58#define ID_REV_CHIP_ID_9530_ (0x9530)
59#define ID_REV_CHIP_ID_89530_ (0x9E08)
60#define ID_REV_CHIP_ID_9730_ (0x9730)
56 61
57#define INT_STS (0x08) 62#define INT_STS (0x08)
58#define INT_STS_TX_STOP_ (0x00020000) 63#define INT_STS_TX_STOP_ (0x00020000)
@@ -203,8 +208,11 @@
203#define VLAN2 (0x124) 208#define VLAN2 (0x124)
204 209
205#define WUFF (0x128) 210#define WUFF (0x128)
211#define LAN9500_WUFF_NUM (4)
212#define LAN9500A_WUFF_NUM (8)
206 213
207#define WUCSR (0x12C) 214#define WUCSR (0x12C)
215#define WUCSR_WFF_PTR_RST_ (0x80000000)
208#define WUCSR_GUE_ (0x00000200) 216#define WUCSR_GUE_ (0x00000200)
209#define WUCSR_WUFR_ (0x00000040) 217#define WUCSR_WUFR_ (0x00000040)
210#define WUCSR_MPR_ (0x00000020) 218#define WUCSR_MPR_ (0x00000020)
@@ -218,6 +226,23 @@
218 226
219/* Vendor-specific PHY Definitions */ 227/* Vendor-specific PHY Definitions */
220 228
229/* EDPD NLP / crossover time configuration (LAN9500A only) */
230#define PHY_EDPD_CONFIG (16)
231#define PHY_EDPD_CONFIG_TX_NLP_EN_ ((u16)0x8000)
232#define PHY_EDPD_CONFIG_TX_NLP_1000_ ((u16)0x0000)
233#define PHY_EDPD_CONFIG_TX_NLP_768_ ((u16)0x2000)
234#define PHY_EDPD_CONFIG_TX_NLP_512_ ((u16)0x4000)
235#define PHY_EDPD_CONFIG_TX_NLP_256_ ((u16)0x6000)
236#define PHY_EDPD_CONFIG_RX_1_NLP_ ((u16)0x1000)
237#define PHY_EDPD_CONFIG_RX_NLP_64_ ((u16)0x0000)
238#define PHY_EDPD_CONFIG_RX_NLP_256_ ((u16)0x0400)
239#define PHY_EDPD_CONFIG_RX_NLP_512_ ((u16)0x0800)
240#define PHY_EDPD_CONFIG_RX_NLP_1000_ ((u16)0x0C00)
241#define PHY_EDPD_CONFIG_EXT_CROSSOVER_ ((u16)0x0001)
242#define PHY_EDPD_CONFIG_DEFAULT (PHY_EDPD_CONFIG_TX_NLP_EN_ | \
243 PHY_EDPD_CONFIG_TX_NLP_768_ | \
244 PHY_EDPD_CONFIG_RX_1_NLP_)
245
221/* Mode Control/Status Register */ 246/* Mode Control/Status Register */
222#define PHY_MODE_CTRL_STS (17) 247#define PHY_MODE_CTRL_STS (17)
223#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000) 248#define MODE_CTRL_STS_EDPWRDOWN_ ((u16)0x2000)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index edb81ed06950..c04110ba677f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1616,6 +1616,202 @@ void usbnet_device_suggests_idle(struct usbnet *dev)
1616EXPORT_SYMBOL(usbnet_device_suggests_idle); 1616EXPORT_SYMBOL(usbnet_device_suggests_idle);
1617 1617
1618/*-------------------------------------------------------------------------*/ 1618/*-------------------------------------------------------------------------*/
1619static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1620 u16 value, u16 index, void *data, u16 size)
1621{
1622 void *buf = NULL;
1623 int err = -ENOMEM;
1624
1625 netdev_dbg(dev->net, "usbnet_read_cmd cmd=0x%02x reqtype=%02x"
1626 " value=0x%04x index=0x%04x size=%d\n",
1627 cmd, reqtype, value, index, size);
1628
1629 if (data) {
1630 buf = kmalloc(size, GFP_KERNEL);
1631 if (!buf)
1632 goto out;
1633 }
1634
1635 err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
1636 cmd, reqtype, value, index, buf, size,
1637 USB_CTRL_GET_TIMEOUT);
1638 if (err > 0 && err <= size)
1639 memcpy(data, buf, err);
1640 kfree(buf);
1641out:
1642 return err;
1643}
1644
1645static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1646 u16 value, u16 index, const void *data,
1647 u16 size)
1648{
1649 void *buf = NULL;
1650 int err = -ENOMEM;
1651
1652 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
1653 " value=0x%04x index=0x%04x size=%d\n",
1654 cmd, reqtype, value, index, size);
1655
1656 if (data) {
1657 buf = kmemdup(data, size, GFP_KERNEL);
1658 if (!buf)
1659 goto out;
1660 }
1661
1662 err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
1663 cmd, reqtype, value, index, buf, size,
1664 USB_CTRL_SET_TIMEOUT);
1665 kfree(buf);
1666
1667out:
1668 return err;
1669}
1670
1671/*
1672 * The function can't be called inside suspend/resume callback,
1673 * otherwise deadlock will be caused.
1674 */
1675int usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1676 u16 value, u16 index, void *data, u16 size)
1677{
1678 int ret;
1679
1680 if (usb_autopm_get_interface(dev->intf) < 0)
1681 return -ENODEV;
1682 ret = __usbnet_read_cmd(dev, cmd, reqtype, value, index,
1683 data, size);
1684 usb_autopm_put_interface(dev->intf);
1685 return ret;
1686}
1687EXPORT_SYMBOL_GPL(usbnet_read_cmd);
1688
1689/*
1690 * The function can't be called inside suspend/resume callback,
1691 * otherwise deadlock will be caused.
1692 */
1693int usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
1694 u16 value, u16 index, const void *data, u16 size)
1695{
1696 int ret;
1697
1698 if (usb_autopm_get_interface(dev->intf) < 0)
1699 return -ENODEV;
1700 ret = __usbnet_write_cmd(dev, cmd, reqtype, value, index,
1701 data, size);
1702 usb_autopm_put_interface(dev->intf);
1703 return ret;
1704}
1705EXPORT_SYMBOL_GPL(usbnet_write_cmd);
1706
1707/*
1708 * The function can be called inside suspend/resume callback safely
1709 * and should only be called by suspend/resume callback generally.
1710 */
1711int usbnet_read_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
1712 u16 value, u16 index, void *data, u16 size)
1713{
1714 return __usbnet_read_cmd(dev, cmd, reqtype, value, index,
1715 data, size);
1716}
1717EXPORT_SYMBOL_GPL(usbnet_read_cmd_nopm);
1718
1719/*
1720 * The function can be called inside suspend/resume callback safely
1721 * and should only be called by suspend/resume callback generally.
1722 */
1723int usbnet_write_cmd_nopm(struct usbnet *dev, u8 cmd, u8 reqtype,
1724 u16 value, u16 index, const void *data,
1725 u16 size)
1726{
1727 return __usbnet_write_cmd(dev, cmd, reqtype, value, index,
1728 data, size);
1729}
1730EXPORT_SYMBOL_GPL(usbnet_write_cmd_nopm);
1731
1732static void usbnet_async_cmd_cb(struct urb *urb)
1733{
1734 struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
1735 int status = urb->status;
1736
1737 if (status < 0)
1738 dev_dbg(&urb->dev->dev, "%s failed with %d",
1739 __func__, status);
1740
1741 kfree(req);
1742 usb_free_urb(urb);
1743}
1744
1745/*
1746 * The caller must make sure that device can't be put into suspend
1747 * state until the control URB completes.
1748 */
1749int usbnet_write_cmd_async(struct usbnet *dev, u8 cmd, u8 reqtype,
1750 u16 value, u16 index, const void *data, u16 size)
1751{
1752 struct usb_ctrlrequest *req = NULL;
1753 struct urb *urb;
1754 int err = -ENOMEM;
1755 void *buf = NULL;
1756
1757 netdev_dbg(dev->net, "usbnet_write_cmd cmd=0x%02x reqtype=%02x"
1758 " value=0x%04x index=0x%04x size=%d\n",
1759 cmd, reqtype, value, index, size);
1760
1761 urb = usb_alloc_urb(0, GFP_ATOMIC);
1762 if (!urb) {
1763 netdev_err(dev->net, "Error allocating URB in"
1764 " %s!\n", __func__);
1765 goto fail;
1766 }
1767
1768 if (data) {
1769 buf = kmemdup(data, size, GFP_ATOMIC);
1770 if (!buf) {
1771 netdev_err(dev->net, "Error allocating buffer"
1772 " in %s!\n", __func__);
1773 goto fail_free;
1774 }
1775 }
1776
1777 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
1778 if (!req) {
1779 netdev_err(dev->net, "Failed to allocate memory for %s\n",
1780 __func__);
1781 goto fail_free_buf;
1782 }
1783
1784 req->bRequestType = reqtype;
1785 req->bRequest = cmd;
1786 req->wValue = cpu_to_le16(value);
1787 req->wIndex = cpu_to_le16(index);
1788 req->wLength = cpu_to_le16(size);
1789
1790 usb_fill_control_urb(urb, dev->udev,
1791 usb_sndctrlpipe(dev->udev, 0),
1792 (void *)req, buf, size,
1793 usbnet_async_cmd_cb, req);
1794 urb->transfer_flags |= URB_FREE_BUFFER;
1795
1796 err = usb_submit_urb(urb, GFP_ATOMIC);
1797 if (err < 0) {
1798 netdev_err(dev->net, "Error submitting the control"
1799 " message: status=%d\n", err);
1800 goto fail_free;
1801 }
1802 return 0;
1803
1804fail_free_buf:
1805 kfree(buf);
1806fail_free:
1807 kfree(req);
1808 usb_free_urb(urb);
1809fail:
1810 return err;
1811
1812}
1813EXPORT_SYMBOL_GPL(usbnet_write_cmd_async);
1814/*-------------------------------------------------------------------------*/
1619 1815
1620static int __init usbnet_init(void) 1816static int __init usbnet_init(void)
1621{ 1817{
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e522ff70444c..95814d9747ef 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -264,6 +264,7 @@ static void veth_setup(struct net_device *dev)
264 ether_setup(dev); 264 ether_setup(dev);
265 265
266 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 266 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
267 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
267 268
268 dev->netdev_ops = &veth_netdev_ops; 269 dev->netdev_ops = &veth_netdev_ops;
269 dev->ethtool_ops = &veth_ethtool_ops; 270 dev->ethtool_ops = &veth_ethtool_ops;
@@ -339,7 +340,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
339 if (IS_ERR(net)) 340 if (IS_ERR(net))
340 return PTR_ERR(net); 341 return PTR_ERR(net);
341 342
342 peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp); 343 peer = rtnl_create_link(net, ifname, &veth_link_ops, tbp);
343 if (IS_ERR(peer)) { 344 if (IS_ERR(peer)) {
344 put_net(net); 345 put_net(net);
345 return PTR_ERR(peer); 346 return PTR_ERR(peer);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cbf8b0625352..68d64f0313ea 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -51,15 +51,51 @@ struct virtnet_stats {
51 u64 rx_packets; 51 u64 rx_packets;
52}; 52};
53 53
54/* Internal representation of a send virtqueue */
55struct send_queue {
56 /* Virtqueue associated with this send _queue */
57 struct virtqueue *vq;
58
59 /* TX: fragments + linear part + virtio header */
60 struct scatterlist sg[MAX_SKB_FRAGS + 2];
61
62 /* Name of the send queue: output.$index */
63 char name[40];
64};
65
66/* Internal representation of a receive virtqueue */
67struct receive_queue {
68 /* Virtqueue associated with this receive_queue */
69 struct virtqueue *vq;
70
71 struct napi_struct napi;
72
73 /* Number of input buffers, and max we've ever had. */
74 unsigned int num, max;
75
76 /* Chain pages by the private ptr. */
77 struct page *pages;
78
79 /* RX: fragments + linear part + virtio header */
80 struct scatterlist sg[MAX_SKB_FRAGS + 2];
81
82 /* Name of this receive queue: input.$index */
83 char name[40];
84};
85
54struct virtnet_info { 86struct virtnet_info {
55 struct virtio_device *vdev; 87 struct virtio_device *vdev;
56 struct virtqueue *rvq, *svq, *cvq; 88 struct virtqueue *cvq;
57 struct net_device *dev; 89 struct net_device *dev;
58 struct napi_struct napi; 90 struct send_queue *sq;
91 struct receive_queue *rq;
59 unsigned int status; 92 unsigned int status;
60 93
61 /* Number of input buffers, and max we've ever had. */ 94 /* Max # of queue pairs supported by the device */
62 unsigned int num, max; 95 u16 max_queue_pairs;
96
97 /* # of queue pairs currently used by the driver */
98 u16 curr_queue_pairs;
63 99
64 /* I like... big packets and I cannot lie! */ 100 /* I like... big packets and I cannot lie! */
65 bool big_packets; 101 bool big_packets;
@@ -67,6 +103,9 @@ struct virtnet_info {
67 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 103 /* Host will merge rx buffers for big packets (shake it! shake it!) */
68 bool mergeable_rx_bufs; 104 bool mergeable_rx_bufs;
69 105
106 /* Has control virtqueue */
107 bool has_cvq;
108
70 /* enable config space updates */ 109 /* enable config space updates */
71 bool config_enable; 110 bool config_enable;
72 111
@@ -82,12 +121,8 @@ struct virtnet_info {
82 /* Lock for config space updates */ 121 /* Lock for config space updates */
83 struct mutex config_lock; 122 struct mutex config_lock;
84 123
85 /* Chain pages by the private ptr. */ 124 /* Does the affinity hint is set for virtqueues? */
86 struct page *pages; 125 bool affinity_hint_set;
87
88 /* fragments + linear part + virtio header */
89 struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
90 struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
91}; 126};
92 127
93struct skb_vnet_hdr { 128struct skb_vnet_hdr {
@@ -108,6 +143,29 @@ struct padded_vnet_hdr {
108 char padding[6]; 143 char padding[6];
109}; 144};
110 145
146/* Converting between virtqueue no. and kernel tx/rx queue no.
147 * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
148 */
149static int vq2txq(struct virtqueue *vq)
150{
151 return (virtqueue_get_queue_index(vq) - 1) / 2;
152}
153
154static int txq2vq(int txq)
155{
156 return txq * 2 + 1;
157}
158
159static int vq2rxq(struct virtqueue *vq)
160{
161 return virtqueue_get_queue_index(vq) / 2;
162}
163
164static int rxq2vq(int rxq)
165{
166 return rxq * 2;
167}
168
111static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) 169static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
112{ 170{
113 return (struct skb_vnet_hdr *)skb->cb; 171 return (struct skb_vnet_hdr *)skb->cb;
@@ -117,22 +175,22 @@ static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
117 * private is used to chain pages for big packets, put the whole 175 * private is used to chain pages for big packets, put the whole
118 * most recent used list in the beginning for reuse 176 * most recent used list in the beginning for reuse
119 */ 177 */
120static void give_pages(struct virtnet_info *vi, struct page *page) 178static void give_pages(struct receive_queue *rq, struct page *page)
121{ 179{
122 struct page *end; 180 struct page *end;
123 181
124 /* Find end of list, sew whole thing into vi->pages. */ 182 /* Find end of list, sew whole thing into vi->rq.pages. */
125 for (end = page; end->private; end = (struct page *)end->private); 183 for (end = page; end->private; end = (struct page *)end->private);
126 end->private = (unsigned long)vi->pages; 184 end->private = (unsigned long)rq->pages;
127 vi->pages = page; 185 rq->pages = page;
128} 186}
129 187
130static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) 188static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
131{ 189{
132 struct page *p = vi->pages; 190 struct page *p = rq->pages;
133 191
134 if (p) { 192 if (p) {
135 vi->pages = (struct page *)p->private; 193 rq->pages = (struct page *)p->private;
136 /* clear private here, it is used to chain pages */ 194 /* clear private here, it is used to chain pages */
137 p->private = 0; 195 p->private = 0;
138 } else 196 } else
@@ -140,15 +198,15 @@ static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
140 return p; 198 return p;
141} 199}
142 200
143static void skb_xmit_done(struct virtqueue *svq) 201static void skb_xmit_done(struct virtqueue *vq)
144{ 202{
145 struct virtnet_info *vi = svq->vdev->priv; 203 struct virtnet_info *vi = vq->vdev->priv;
146 204
147 /* Suppress further interrupts. */ 205 /* Suppress further interrupts. */
148 virtqueue_disable_cb(svq); 206 virtqueue_disable_cb(vq);
149 207
150 /* We were probably waiting for more output buffers. */ 208 /* We were probably waiting for more output buffers. */
151 netif_wake_queue(vi->dev); 209 netif_wake_subqueue(vi->dev, vq2txq(vq));
152} 210}
153 211
154static void set_skb_frag(struct sk_buff *skb, struct page *page, 212static void set_skb_frag(struct sk_buff *skb, struct page *page,
@@ -167,9 +225,10 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
167} 225}
168 226
169/* Called from bottom half context */ 227/* Called from bottom half context */
170static struct sk_buff *page_to_skb(struct virtnet_info *vi, 228static struct sk_buff *page_to_skb(struct receive_queue *rq,
171 struct page *page, unsigned int len) 229 struct page *page, unsigned int len)
172{ 230{
231 struct virtnet_info *vi = rq->vq->vdev->priv;
173 struct sk_buff *skb; 232 struct sk_buff *skb;
174 struct skb_vnet_hdr *hdr; 233 struct skb_vnet_hdr *hdr;
175 unsigned int copy, hdr_len, offset; 234 unsigned int copy, hdr_len, offset;
@@ -212,8 +271,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
212 * the case of a broken device. 271 * the case of a broken device.
213 */ 272 */
214 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { 273 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
215 if (net_ratelimit()) 274 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
216 pr_debug("%s: too much data\n", skb->dev->name);
217 dev_kfree_skb(skb); 275 dev_kfree_skb(skb);
218 return NULL; 276 return NULL;
219 } 277 }
@@ -225,12 +283,12 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
225 } 283 }
226 284
227 if (page) 285 if (page)
228 give_pages(vi, page); 286 give_pages(rq, page);
229 287
230 return skb; 288 return skb;
231} 289}
232 290
233static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) 291static int receive_mergeable(struct receive_queue *rq, struct sk_buff *skb)
234{ 292{
235 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 293 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
236 struct page *page; 294 struct page *page;
@@ -244,7 +302,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
244 skb->dev->stats.rx_length_errors++; 302 skb->dev->stats.rx_length_errors++;
245 return -EINVAL; 303 return -EINVAL;
246 } 304 }
247 page = virtqueue_get_buf(vi->rvq, &len); 305 page = virtqueue_get_buf(rq->vq, &len);
248 if (!page) { 306 if (!page) {
249 pr_debug("%s: rx error: %d buffers missing\n", 307 pr_debug("%s: rx error: %d buffers missing\n",
250 skb->dev->name, hdr->mhdr.num_buffers); 308 skb->dev->name, hdr->mhdr.num_buffers);
@@ -257,14 +315,15 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
257 315
258 set_skb_frag(skb, page, 0, &len); 316 set_skb_frag(skb, page, 0, &len);
259 317
260 --vi->num; 318 --rq->num;
261 } 319 }
262 return 0; 320 return 0;
263} 321}
264 322
265static void receive_buf(struct net_device *dev, void *buf, unsigned int len) 323static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
266{ 324{
267 struct virtnet_info *vi = netdev_priv(dev); 325 struct virtnet_info *vi = rq->vq->vdev->priv;
326 struct net_device *dev = vi->dev;
268 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 327 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
269 struct sk_buff *skb; 328 struct sk_buff *skb;
270 struct page *page; 329 struct page *page;
@@ -274,7 +333,7 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
274 pr_debug("%s: short packet %i\n", dev->name, len); 333 pr_debug("%s: short packet %i\n", dev->name, len);
275 dev->stats.rx_length_errors++; 334 dev->stats.rx_length_errors++;
276 if (vi->mergeable_rx_bufs || vi->big_packets) 335 if (vi->mergeable_rx_bufs || vi->big_packets)
277 give_pages(vi, buf); 336 give_pages(rq, buf);
278 else 337 else
279 dev_kfree_skb(buf); 338 dev_kfree_skb(buf);
280 return; 339 return;
@@ -286,14 +345,14 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
286 skb_trim(skb, len); 345 skb_trim(skb, len);
287 } else { 346 } else {
288 page = buf; 347 page = buf;
289 skb = page_to_skb(vi, page, len); 348 skb = page_to_skb(rq, page, len);
290 if (unlikely(!skb)) { 349 if (unlikely(!skb)) {
291 dev->stats.rx_dropped++; 350 dev->stats.rx_dropped++;
292 give_pages(vi, page); 351 give_pages(rq, page);
293 return; 352 return;
294 } 353 }
295 if (vi->mergeable_rx_bufs) 354 if (vi->mergeable_rx_bufs)
296 if (receive_mergeable(vi, skb)) { 355 if (receive_mergeable(rq, skb)) {
297 dev_kfree_skb(skb); 356 dev_kfree_skb(skb);
298 return; 357 return;
299 } 358 }
@@ -333,9 +392,8 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
333 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 392 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
334 break; 393 break;
335 default: 394 default:
336 if (net_ratelimit()) 395 net_warn_ratelimited("%s: bad gso type %u.\n",
337 printk(KERN_WARNING "%s: bad gso type %u.\n", 396 dev->name, hdr->hdr.gso_type);
338 dev->name, hdr->hdr.gso_type);
339 goto frame_err; 397 goto frame_err;
340 } 398 }
341 399
@@ -344,9 +402,7 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
344 402
345 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; 403 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
346 if (skb_shinfo(skb)->gso_size == 0) { 404 if (skb_shinfo(skb)->gso_size == 0) {
347 if (net_ratelimit()) 405 net_warn_ratelimited("%s: zero gso size.\n", dev->name);
348 printk(KERN_WARNING "%s: zero gso size.\n",
349 dev->name);
350 goto frame_err; 406 goto frame_err;
351 } 407 }
352 408
@@ -363,8 +419,9 @@ frame_err:
363 dev_kfree_skb(skb); 419 dev_kfree_skb(skb);
364} 420}
365 421
366static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) 422static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
367{ 423{
424 struct virtnet_info *vi = rq->vq->vdev->priv;
368 struct sk_buff *skb; 425 struct sk_buff *skb;
369 struct skb_vnet_hdr *hdr; 426 struct skb_vnet_hdr *hdr;
370 int err; 427 int err;
@@ -376,77 +433,77 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
376 skb_put(skb, MAX_PACKET_LEN); 433 skb_put(skb, MAX_PACKET_LEN);
377 434
378 hdr = skb_vnet_hdr(skb); 435 hdr = skb_vnet_hdr(skb);
379 sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr); 436 sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
380 437
381 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); 438 skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
382 439
383 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); 440 err = virtqueue_add_buf(rq->vq, rq->sg, 0, 2, skb, gfp);
384 if (err < 0) 441 if (err < 0)
385 dev_kfree_skb(skb); 442 dev_kfree_skb(skb);
386 443
387 return err; 444 return err;
388} 445}
389 446
390static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) 447static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
391{ 448{
392 struct page *first, *list = NULL; 449 struct page *first, *list = NULL;
393 char *p; 450 char *p;
394 int i, err, offset; 451 int i, err, offset;
395 452
396 /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */ 453 /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
397 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 454 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
398 first = get_a_page(vi, gfp); 455 first = get_a_page(rq, gfp);
399 if (!first) { 456 if (!first) {
400 if (list) 457 if (list)
401 give_pages(vi, list); 458 give_pages(rq, list);
402 return -ENOMEM; 459 return -ENOMEM;
403 } 460 }
404 sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE); 461 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
405 462
406 /* chain new page in list head to match sg */ 463 /* chain new page in list head to match sg */
407 first->private = (unsigned long)list; 464 first->private = (unsigned long)list;
408 list = first; 465 list = first;
409 } 466 }
410 467
411 first = get_a_page(vi, gfp); 468 first = get_a_page(rq, gfp);
412 if (!first) { 469 if (!first) {
413 give_pages(vi, list); 470 give_pages(rq, list);
414 return -ENOMEM; 471 return -ENOMEM;
415 } 472 }
416 p = page_address(first); 473 p = page_address(first);
417 474
418 /* vi->rx_sg[0], vi->rx_sg[1] share the same page */ 475 /* rq->sg[0], rq->sg[1] share the same page */
419 /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */ 476 /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */
420 sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr)); 477 sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr));
421 478
422 /* vi->rx_sg[1] for data packet, from offset */ 479 /* rq->sg[1] for data packet, from offset */
423 offset = sizeof(struct padded_vnet_hdr); 480 offset = sizeof(struct padded_vnet_hdr);
424 sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset); 481 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
425 482
426 /* chain first in list head */ 483 /* chain first in list head */
427 first->private = (unsigned long)list; 484 first->private = (unsigned long)list;
428 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 485 err = virtqueue_add_buf(rq->vq, rq->sg, 0, MAX_SKB_FRAGS + 2,
429 first, gfp); 486 first, gfp);
430 if (err < 0) 487 if (err < 0)
431 give_pages(vi, first); 488 give_pages(rq, first);
432 489
433 return err; 490 return err;
434} 491}
435 492
436static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) 493static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
437{ 494{
438 struct page *page; 495 struct page *page;
439 int err; 496 int err;
440 497
441 page = get_a_page(vi, gfp); 498 page = get_a_page(rq, gfp);
442 if (!page) 499 if (!page)
443 return -ENOMEM; 500 return -ENOMEM;
444 501
445 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); 502 sg_init_one(rq->sg, page_address(page), PAGE_SIZE);
446 503
447 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp); 504 err = virtqueue_add_buf(rq->vq, rq->sg, 0, 1, page, gfp);
448 if (err < 0) 505 if (err < 0)
449 give_pages(vi, page); 506 give_pages(rq, page);
450 507
451 return err; 508 return err;
452} 509}
@@ -458,97 +515,108 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
458 * before we're receiving packets, or from refill_work which is 515 * before we're receiving packets, or from refill_work which is
459 * careful to disable receiving (using napi_disable). 516 * careful to disable receiving (using napi_disable).
460 */ 517 */
461static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) 518static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
462{ 519{
520 struct virtnet_info *vi = rq->vq->vdev->priv;
463 int err; 521 int err;
464 bool oom; 522 bool oom;
465 523
466 do { 524 do {
467 if (vi->mergeable_rx_bufs) 525 if (vi->mergeable_rx_bufs)
468 err = add_recvbuf_mergeable(vi, gfp); 526 err = add_recvbuf_mergeable(rq, gfp);
469 else if (vi->big_packets) 527 else if (vi->big_packets)
470 err = add_recvbuf_big(vi, gfp); 528 err = add_recvbuf_big(rq, gfp);
471 else 529 else
472 err = add_recvbuf_small(vi, gfp); 530 err = add_recvbuf_small(rq, gfp);
473 531
474 oom = err == -ENOMEM; 532 oom = err == -ENOMEM;
475 if (err < 0) 533 if (err < 0)
476 break; 534 break;
477 ++vi->num; 535 ++rq->num;
478 } while (err > 0); 536 } while (err > 0);
479 if (unlikely(vi->num > vi->max)) 537 if (unlikely(rq->num > rq->max))
480 vi->max = vi->num; 538 rq->max = rq->num;
481 virtqueue_kick(vi->rvq); 539 virtqueue_kick(rq->vq);
482 return !oom; 540 return !oom;
483} 541}
484 542
485static void skb_recv_done(struct virtqueue *rvq) 543static void skb_recv_done(struct virtqueue *rvq)
486{ 544{
487 struct virtnet_info *vi = rvq->vdev->priv; 545 struct virtnet_info *vi = rvq->vdev->priv;
546 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
547
488 /* Schedule NAPI, Suppress further interrupts if successful. */ 548 /* Schedule NAPI, Suppress further interrupts if successful. */
489 if (napi_schedule_prep(&vi->napi)) { 549 if (napi_schedule_prep(&rq->napi)) {
490 virtqueue_disable_cb(rvq); 550 virtqueue_disable_cb(rvq);
491 __napi_schedule(&vi->napi); 551 __napi_schedule(&rq->napi);
492 } 552 }
493} 553}
494 554
495static void virtnet_napi_enable(struct virtnet_info *vi) 555static void virtnet_napi_enable(struct receive_queue *rq)
496{ 556{
497 napi_enable(&vi->napi); 557 napi_enable(&rq->napi);
498 558
499 /* If all buffers were filled by other side before we napi_enabled, we 559 /* If all buffers were filled by other side before we napi_enabled, we
500 * won't get another interrupt, so process any outstanding packets 560 * won't get another interrupt, so process any outstanding packets
501 * now. virtnet_poll wants re-enable the queue, so we disable here. 561 * now. virtnet_poll wants re-enable the queue, so we disable here.
502 * We synchronize against interrupts via NAPI_STATE_SCHED */ 562 * We synchronize against interrupts via NAPI_STATE_SCHED */
503 if (napi_schedule_prep(&vi->napi)) { 563 if (napi_schedule_prep(&rq->napi)) {
504 virtqueue_disable_cb(vi->rvq); 564 virtqueue_disable_cb(rq->vq);
505 local_bh_disable(); 565 local_bh_disable();
506 __napi_schedule(&vi->napi); 566 __napi_schedule(&rq->napi);
507 local_bh_enable(); 567 local_bh_enable();
508 } 568 }
509} 569}
510 570
511static void refill_work(struct work_struct *work) 571static void refill_work(struct work_struct *work)
512{ 572{
513 struct virtnet_info *vi; 573 struct virtnet_info *vi =
574 container_of(work, struct virtnet_info, refill.work);
514 bool still_empty; 575 bool still_empty;
576 int i;
515 577
516 vi = container_of(work, struct virtnet_info, refill.work); 578 for (i = 0; i < vi->max_queue_pairs; i++) {
517 napi_disable(&vi->napi); 579 struct receive_queue *rq = &vi->rq[i];
518 still_empty = !try_fill_recv(vi, GFP_KERNEL);
519 virtnet_napi_enable(vi);
520 580
521 /* In theory, this can happen: if we don't get any buffers in 581 napi_disable(&rq->napi);
522 * we will *never* try to fill again. */ 582 still_empty = !try_fill_recv(rq, GFP_KERNEL);
523 if (still_empty) 583 virtnet_napi_enable(rq);
524 schedule_delayed_work(&vi->refill, HZ/2); 584
585 /* In theory, this can happen: if we don't get any buffers in
586 * we will *never* try to fill again.
587 */
588 if (still_empty)
589 schedule_delayed_work(&vi->refill, HZ/2);
590 }
525} 591}
526 592
527static int virtnet_poll(struct napi_struct *napi, int budget) 593static int virtnet_poll(struct napi_struct *napi, int budget)
528{ 594{
529 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 595 struct receive_queue *rq =
596 container_of(napi, struct receive_queue, napi);
597 struct virtnet_info *vi = rq->vq->vdev->priv;
530 void *buf; 598 void *buf;
531 unsigned int len, received = 0; 599 unsigned int len, received = 0;
532 600
533again: 601again:
534 while (received < budget && 602 while (received < budget &&
535 (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { 603 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
536 receive_buf(vi->dev, buf, len); 604 receive_buf(rq, buf, len);
537 --vi->num; 605 --rq->num;
538 received++; 606 received++;
539 } 607 }
540 608
541 if (vi->num < vi->max / 2) { 609 if (rq->num < rq->max / 2) {
542 if (!try_fill_recv(vi, GFP_ATOMIC)) 610 if (!try_fill_recv(rq, GFP_ATOMIC))
543 schedule_delayed_work(&vi->refill, 0); 611 schedule_delayed_work(&vi->refill, 0);
544 } 612 }
545 613
546 /* Out of packets? */ 614 /* Out of packets? */
547 if (received < budget) { 615 if (received < budget) {
548 napi_complete(napi); 616 napi_complete(napi);
549 if (unlikely(!virtqueue_enable_cb(vi->rvq)) && 617 if (unlikely(!virtqueue_enable_cb(rq->vq)) &&
550 napi_schedule_prep(napi)) { 618 napi_schedule_prep(napi)) {
551 virtqueue_disable_cb(vi->rvq); 619 virtqueue_disable_cb(rq->vq);
552 __napi_schedule(napi); 620 __napi_schedule(napi);
553 goto again; 621 goto again;
554 } 622 }
@@ -557,13 +625,29 @@ again:
557 return received; 625 return received;
558} 626}
559 627
560static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) 628static int virtnet_open(struct net_device *dev)
629{
630 struct virtnet_info *vi = netdev_priv(dev);
631 int i;
632
633 for (i = 0; i < vi->max_queue_pairs; i++) {
634 /* Make sure we have some buffers: if oom use wq. */
635 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
636 schedule_delayed_work(&vi->refill, 0);
637 virtnet_napi_enable(&vi->rq[i]);
638 }
639
640 return 0;
641}
642
643static unsigned int free_old_xmit_skbs(struct send_queue *sq)
561{ 644{
562 struct sk_buff *skb; 645 struct sk_buff *skb;
563 unsigned int len, tot_sgs = 0; 646 unsigned int len, tot_sgs = 0;
647 struct virtnet_info *vi = sq->vq->vdev->priv;
564 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 648 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
565 649
566 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { 650 while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
567 pr_debug("Sent skb %p\n", skb); 651 pr_debug("Sent skb %p\n", skb);
568 652
569 u64_stats_update_begin(&stats->tx_syncp); 653 u64_stats_update_begin(&stats->tx_syncp);
@@ -577,10 +661,11 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
577 return tot_sgs; 661 return tot_sgs;
578} 662}
579 663
580static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 664static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
581{ 665{
582 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 666 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
583 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 667 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
668 struct virtnet_info *vi = sq->vq->vdev->priv;
584 669
585 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 670 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
586 671
@@ -615,44 +700,47 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
615 700
616 /* Encode metadata header at front. */ 701 /* Encode metadata header at front. */
617 if (vi->mergeable_rx_bufs) 702 if (vi->mergeable_rx_bufs)
618 sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr); 703 sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
619 else 704 else
620 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); 705 sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
621 706
622 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; 707 hdr->num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
623 return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, 708 return virtqueue_add_buf(sq->vq, sq->sg, hdr->num_sg,
624 0, skb, GFP_ATOMIC); 709 0, skb, GFP_ATOMIC);
625} 710}
626 711
627static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 712static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
628{ 713{
629 struct virtnet_info *vi = netdev_priv(dev); 714 struct virtnet_info *vi = netdev_priv(dev);
715 int qnum = skb_get_queue_mapping(skb);
716 struct send_queue *sq = &vi->sq[qnum];
630 int capacity; 717 int capacity;
631 718
632 /* Free up any pending old buffers before queueing new ones. */ 719 /* Free up any pending old buffers before queueing new ones. */
633 free_old_xmit_skbs(vi); 720 free_old_xmit_skbs(sq);
634 721
635 /* Try to transmit */ 722 /* Try to transmit */
636 capacity = xmit_skb(vi, skb); 723 capacity = xmit_skb(sq, skb);
637 724
638 /* This can happen with OOM and indirect buffers. */ 725 /* This can happen with OOM and indirect buffers. */
639 if (unlikely(capacity < 0)) { 726 if (unlikely(capacity < 0)) {
640 if (likely(capacity == -ENOMEM)) { 727 if (likely(capacity == -ENOMEM)) {
641 if (net_ratelimit()) 728 if (net_ratelimit())
642 dev_warn(&dev->dev, 729 dev_warn(&dev->dev,
643 "TX queue failure: out of memory\n"); 730 "TXQ (%d) failure: out of memory\n",
731 qnum);
644 } else { 732 } else {
645 dev->stats.tx_fifo_errors++; 733 dev->stats.tx_fifo_errors++;
646 if (net_ratelimit()) 734 if (net_ratelimit())
647 dev_warn(&dev->dev, 735 dev_warn(&dev->dev,
648 "Unexpected TX queue failure: %d\n", 736 "Unexpected TXQ (%d) failure: %d\n",
649 capacity); 737 qnum, capacity);
650 } 738 }
651 dev->stats.tx_dropped++; 739 dev->stats.tx_dropped++;
652 kfree_skb(skb); 740 kfree_skb(skb);
653 return NETDEV_TX_OK; 741 return NETDEV_TX_OK;
654 } 742 }
655 virtqueue_kick(vi->svq); 743 virtqueue_kick(sq->vq);
656 744
657 /* Don't wait up for transmitted skbs to be freed. */ 745 /* Don't wait up for transmitted skbs to be freed. */
658 skb_orphan(skb); 746 skb_orphan(skb);
@@ -661,13 +749,13 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
661 /* Apparently nice girls don't return TX_BUSY; stop the queue 749 /* Apparently nice girls don't return TX_BUSY; stop the queue
662 * before it gets out of hand. Naturally, this wastes entries. */ 750 * before it gets out of hand. Naturally, this wastes entries. */
663 if (capacity < 2+MAX_SKB_FRAGS) { 751 if (capacity < 2+MAX_SKB_FRAGS) {
664 netif_stop_queue(dev); 752 netif_stop_subqueue(dev, qnum);
665 if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { 753 if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
666 /* More just got used, free them then recheck. */ 754 /* More just got used, free them then recheck. */
667 capacity += free_old_xmit_skbs(vi); 755 capacity += free_old_xmit_skbs(sq);
668 if (capacity >= 2+MAX_SKB_FRAGS) { 756 if (capacity >= 2+MAX_SKB_FRAGS) {
669 netif_start_queue(dev); 757 netif_start_subqueue(dev, qnum);
670 virtqueue_disable_cb(vi->svq); 758 virtqueue_disable_cb(sq->vq);
671 } 759 }
672 } 760 }
673 } 761 }
@@ -734,23 +822,13 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
734static void virtnet_netpoll(struct net_device *dev) 822static void virtnet_netpoll(struct net_device *dev)
735{ 823{
736 struct virtnet_info *vi = netdev_priv(dev); 824 struct virtnet_info *vi = netdev_priv(dev);
825 int i;
737 826
738 napi_schedule(&vi->napi); 827 for (i = 0; i < vi->curr_queue_pairs; i++)
828 napi_schedule(&vi->rq[i].napi);
739} 829}
740#endif 830#endif
741 831
742static int virtnet_open(struct net_device *dev)
743{
744 struct virtnet_info *vi = netdev_priv(dev);
745
746 /* Make sure we have some buffers: if oom use wq. */
747 if (!try_fill_recv(vi, GFP_KERNEL))
748 schedule_delayed_work(&vi->refill, 0);
749
750 virtnet_napi_enable(vi);
751 return 0;
752}
753
754/* 832/*
755 * Send command via the control virtqueue and check status. Commands 833 * Send command via the control virtqueue and check status. Commands
756 * supported by the hypervisor, as indicated by feature bits, should 834 * supported by the hypervisor, as indicated by feature bits, should
@@ -806,13 +884,39 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
806 rtnl_unlock(); 884 rtnl_unlock();
807} 885}
808 886
887static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
888{
889 struct scatterlist sg;
890 struct virtio_net_ctrl_mq s;
891 struct net_device *dev = vi->dev;
892
893 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
894 return 0;
895
896 s.virtqueue_pairs = queue_pairs;
897 sg_init_one(&sg, &s, sizeof(s));
898
899 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
900 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, 1, 0)){
901 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
902 queue_pairs);
903 return -EINVAL;
904 } else
905 vi->curr_queue_pairs = queue_pairs;
906
907 return 0;
908}
909
809static int virtnet_close(struct net_device *dev) 910static int virtnet_close(struct net_device *dev)
810{ 911{
811 struct virtnet_info *vi = netdev_priv(dev); 912 struct virtnet_info *vi = netdev_priv(dev);
913 int i;
812 914
813 /* Make sure refill_work doesn't re-enable napi! */ 915 /* Make sure refill_work doesn't re-enable napi! */
814 cancel_delayed_work_sync(&vi->refill); 916 cancel_delayed_work_sync(&vi->refill);
815 napi_disable(&vi->napi); 917
918 for (i = 0; i < vi->max_queue_pairs; i++)
919 napi_disable(&vi->rq[i].napi);
816 920
817 return 0; 921 return 0;
818} 922}
@@ -919,16 +1023,43 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
919 return 0; 1023 return 0;
920} 1024}
921 1025
1026static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
1027{
1028 int i;
1029
1030 /* In multiqueue mode, when the number of cpu is equal to the number of
1031 * queue pairs, we let the queue pairs to be private to one cpu by
1032 * setting the affinity hint to eliminate the contention.
1033 */
1034 if ((vi->curr_queue_pairs == 1 ||
1035 vi->max_queue_pairs != num_online_cpus()) && set) {
1036 if (vi->affinity_hint_set)
1037 set = false;
1038 else
1039 return;
1040 }
1041
1042 for (i = 0; i < vi->max_queue_pairs; i++) {
1043 int cpu = set ? i : -1;
1044 virtqueue_set_affinity(vi->rq[i].vq, cpu);
1045 virtqueue_set_affinity(vi->sq[i].vq, cpu);
1046 }
1047
1048 if (set)
1049 vi->affinity_hint_set = true;
1050 else
1051 vi->affinity_hint_set = false;
1052}
1053
922static void virtnet_get_ringparam(struct net_device *dev, 1054static void virtnet_get_ringparam(struct net_device *dev,
923 struct ethtool_ringparam *ring) 1055 struct ethtool_ringparam *ring)
924{ 1056{
925 struct virtnet_info *vi = netdev_priv(dev); 1057 struct virtnet_info *vi = netdev_priv(dev);
926 1058
927 ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq); 1059 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
928 ring->tx_max_pending = virtqueue_get_vring_size(vi->svq); 1060 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
929 ring->rx_pending = ring->rx_max_pending; 1061 ring->rx_pending = ring->rx_max_pending;
930 ring->tx_pending = ring->tx_max_pending; 1062 ring->tx_pending = ring->tx_max_pending;
931
932} 1063}
933 1064
934 1065
@@ -944,10 +1075,53 @@ static void virtnet_get_drvinfo(struct net_device *dev,
944 1075
945} 1076}
946 1077
1078/* TODO: Eliminate OOO packets during switching */
1079static int virtnet_set_channels(struct net_device *dev,
1080 struct ethtool_channels *channels)
1081{
1082 struct virtnet_info *vi = netdev_priv(dev);
1083 u16 queue_pairs = channels->combined_count;
1084 int err;
1085
1086 /* We don't support separate rx/tx channels.
1087 * We don't allow setting 'other' channels.
1088 */
1089 if (channels->rx_count || channels->tx_count || channels->other_count)
1090 return -EINVAL;
1091
1092 if (queue_pairs > vi->max_queue_pairs)
1093 return -EINVAL;
1094
1095 err = virtnet_set_queues(vi, queue_pairs);
1096 if (!err) {
1097 netif_set_real_num_tx_queues(dev, queue_pairs);
1098 netif_set_real_num_rx_queues(dev, queue_pairs);
1099
1100 virtnet_set_affinity(vi, true);
1101 }
1102
1103 return err;
1104}
1105
1106static void virtnet_get_channels(struct net_device *dev,
1107 struct ethtool_channels *channels)
1108{
1109 struct virtnet_info *vi = netdev_priv(dev);
1110
1111 channels->combined_count = vi->curr_queue_pairs;
1112 channels->max_combined = vi->max_queue_pairs;
1113 channels->max_other = 0;
1114 channels->rx_count = 0;
1115 channels->tx_count = 0;
1116 channels->other_count = 0;
1117}
1118
947static const struct ethtool_ops virtnet_ethtool_ops = { 1119static const struct ethtool_ops virtnet_ethtool_ops = {
948 .get_drvinfo = virtnet_get_drvinfo, 1120 .get_drvinfo = virtnet_get_drvinfo,
949 .get_link = ethtool_op_get_link, 1121 .get_link = ethtool_op_get_link,
950 .get_ringparam = virtnet_get_ringparam, 1122 .get_ringparam = virtnet_get_ringparam,
1123 .set_channels = virtnet_set_channels,
1124 .get_channels = virtnet_get_channels,
951}; 1125};
952 1126
953#define MIN_MTU 68 1127#define MIN_MTU 68
@@ -961,6 +1135,21 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
961 return 0; 1135 return 0;
962} 1136}
963 1137
1138/* To avoid contending a lock hold by a vcpu who would exit to host, select the
1139 * txq based on the processor id.
1140 * TODO: handle cpu hotplug.
1141 */
1142static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
1143{
1144 int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
1145 smp_processor_id();
1146
1147 while (unlikely(txq >= dev->real_num_tx_queues))
1148 txq -= dev->real_num_tx_queues;
1149
1150 return txq;
1151}
1152
964static const struct net_device_ops virtnet_netdev = { 1153static const struct net_device_ops virtnet_netdev = {
965 .ndo_open = virtnet_open, 1154 .ndo_open = virtnet_open,
966 .ndo_stop = virtnet_close, 1155 .ndo_stop = virtnet_close,
@@ -972,6 +1161,7 @@ static const struct net_device_ops virtnet_netdev = {
972 .ndo_get_stats64 = virtnet_stats, 1161 .ndo_get_stats64 = virtnet_stats,
973 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, 1162 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
974 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, 1163 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1164 .ndo_select_queue = virtnet_select_queue,
975#ifdef CONFIG_NET_POLL_CONTROLLER 1165#ifdef CONFIG_NET_POLL_CONTROLLER
976 .ndo_poll_controller = virtnet_netpoll, 1166 .ndo_poll_controller = virtnet_netpoll,
977#endif 1167#endif
@@ -1007,10 +1197,10 @@ static void virtnet_config_changed_work(struct work_struct *work)
1007 1197
1008 if (vi->status & VIRTIO_NET_S_LINK_UP) { 1198 if (vi->status & VIRTIO_NET_S_LINK_UP) {
1009 netif_carrier_on(vi->dev); 1199 netif_carrier_on(vi->dev);
1010 netif_wake_queue(vi->dev); 1200 netif_tx_wake_all_queues(vi->dev);
1011 } else { 1201 } else {
1012 netif_carrier_off(vi->dev); 1202 netif_carrier_off(vi->dev);
1013 netif_stop_queue(vi->dev); 1203 netif_tx_stop_all_queues(vi->dev);
1014 } 1204 }
1015done: 1205done:
1016 mutex_unlock(&vi->config_lock); 1206 mutex_unlock(&vi->config_lock);
@@ -1023,41 +1213,203 @@ static void virtnet_config_changed(struct virtio_device *vdev)
1023 schedule_work(&vi->config_work); 1213 schedule_work(&vi->config_work);
1024} 1214}
1025 1215
1026static int init_vqs(struct virtnet_info *vi) 1216static void virtnet_free_queues(struct virtnet_info *vi)
1027{ 1217{
1028 struct virtqueue *vqs[3]; 1218 kfree(vi->rq);
1029 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; 1219 kfree(vi->sq);
1030 const char *names[] = { "input", "output", "control" }; 1220}
1031 int nvqs, err;
1032 1221
1033 /* We expect two virtqueues, receive then send, 1222static void free_receive_bufs(struct virtnet_info *vi)
1034 * and optionally control. */ 1223{
1035 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; 1224 int i;
1036 1225
1037 err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names); 1226 for (i = 0; i < vi->max_queue_pairs; i++) {
1038 if (err) 1227 while (vi->rq[i].pages)
1039 return err; 1228 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1229 }
1230}
1040 1231
1041 vi->rvq = vqs[0]; 1232static void free_unused_bufs(struct virtnet_info *vi)
1042 vi->svq = vqs[1]; 1233{
1234 void *buf;
1235 int i;
1043 1236
1044 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { 1237 for (i = 0; i < vi->max_queue_pairs; i++) {
1045 vi->cvq = vqs[2]; 1238 struct virtqueue *vq = vi->sq[i].vq;
1239 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
1240 dev_kfree_skb(buf);
1241 }
1046 1242
1243 for (i = 0; i < vi->max_queue_pairs; i++) {
1244 struct virtqueue *vq = vi->rq[i].vq;
1245
1246 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1247 if (vi->mergeable_rx_bufs || vi->big_packets)
1248 give_pages(&vi->rq[i], buf);
1249 else
1250 dev_kfree_skb(buf);
1251 --vi->rq[i].num;
1252 }
1253 BUG_ON(vi->rq[i].num != 0);
1254 }
1255}
1256
1257static void virtnet_del_vqs(struct virtnet_info *vi)
1258{
1259 struct virtio_device *vdev = vi->vdev;
1260
1261 virtnet_set_affinity(vi, false);
1262
1263 vdev->config->del_vqs(vdev);
1264
1265 virtnet_free_queues(vi);
1266}
1267
1268static int virtnet_find_vqs(struct virtnet_info *vi)
1269{
1270 vq_callback_t **callbacks;
1271 struct virtqueue **vqs;
1272 int ret = -ENOMEM;
1273 int i, total_vqs;
1274 const char **names;
1275
1276 /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1277 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1278 * possible control vq.
1279 */
1280 total_vqs = vi->max_queue_pairs * 2 +
1281 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1282
1283 /* Allocate space for find_vqs parameters */
1284 vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1285 if (!vqs)
1286 goto err_vq;
1287 callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1288 if (!callbacks)
1289 goto err_callback;
1290 names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1291 if (!names)
1292 goto err_names;
1293
1294 /* Parameters for control virtqueue, if any */
1295 if (vi->has_cvq) {
1296 callbacks[total_vqs - 1] = NULL;
1297 names[total_vqs - 1] = "control";
1298 }
1299
1300 /* Allocate/initialize parameters for send/receive virtqueues */
1301 for (i = 0; i < vi->max_queue_pairs; i++) {
1302 callbacks[rxq2vq(i)] = skb_recv_done;
1303 callbacks[txq2vq(i)] = skb_xmit_done;
1304 sprintf(vi->rq[i].name, "input.%d", i);
1305 sprintf(vi->sq[i].name, "output.%d", i);
1306 names[rxq2vq(i)] = vi->rq[i].name;
1307 names[txq2vq(i)] = vi->sq[i].name;
1308 }
1309
1310 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
1311 names);
1312 if (ret)
1313 goto err_find;
1314
1315 if (vi->has_cvq) {
1316 vi->cvq = vqs[total_vqs - 1];
1047 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) 1317 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
1048 vi->dev->features |= NETIF_F_HW_VLAN_FILTER; 1318 vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
1049 } 1319 }
1320
1321 for (i = 0; i < vi->max_queue_pairs; i++) {
1322 vi->rq[i].vq = vqs[rxq2vq(i)];
1323 vi->sq[i].vq = vqs[txq2vq(i)];
1324 }
1325
1326 kfree(names);
1327 kfree(callbacks);
1328 kfree(vqs);
1329
1330 return 0;
1331
1332err_find:
1333 kfree(names);
1334err_names:
1335 kfree(callbacks);
1336err_callback:
1337 kfree(vqs);
1338err_vq:
1339 return ret;
1340}
1341
1342static int virtnet_alloc_queues(struct virtnet_info *vi)
1343{
1344 int i;
1345
1346 vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
1347 if (!vi->sq)
1348 goto err_sq;
1349 vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
1350 if (!vi->rq)
1351 goto err_rq;
1352
1353 INIT_DELAYED_WORK(&vi->refill, refill_work);
1354 for (i = 0; i < vi->max_queue_pairs; i++) {
1355 vi->rq[i].pages = NULL;
1356 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
1357 napi_weight);
1358
1359 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
1360 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
1361 }
1362
1050 return 0; 1363 return 0;
1364
1365err_rq:
1366 kfree(vi->sq);
1367err_sq:
1368 return -ENOMEM;
1369}
1370
1371static int init_vqs(struct virtnet_info *vi)
1372{
1373 int ret;
1374
1375 /* Allocate send & receive queues */
1376 ret = virtnet_alloc_queues(vi);
1377 if (ret)
1378 goto err;
1379
1380 ret = virtnet_find_vqs(vi);
1381 if (ret)
1382 goto err_free;
1383
1384 virtnet_set_affinity(vi, true);
1385 return 0;
1386
1387err_free:
1388 virtnet_free_queues(vi);
1389err:
1390 return ret;
1051} 1391}
1052 1392
1053static int virtnet_probe(struct virtio_device *vdev) 1393static int virtnet_probe(struct virtio_device *vdev)
1054{ 1394{
1055 int err; 1395 int i, err;
1056 struct net_device *dev; 1396 struct net_device *dev;
1057 struct virtnet_info *vi; 1397 struct virtnet_info *vi;
1398 u16 max_queue_pairs;
1399
1400 /* Find if host supports multiqueue virtio_net device */
1401 err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
1402 offsetof(struct virtio_net_config,
1403 max_virtqueue_pairs), &max_queue_pairs);
1404
1405 /* We need at least 2 queue's */
1406 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1407 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1408 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1409 max_queue_pairs = 1;
1058 1410
1059 /* Allocate ourselves a network device with room for our info */ 1411 /* Allocate ourselves a network device with room for our info */
1060 dev = alloc_etherdev(sizeof(struct virtnet_info)); 1412 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
1061 if (!dev) 1413 if (!dev)
1062 return -ENOMEM; 1414 return -ENOMEM;
1063 1415
@@ -1103,22 +1455,17 @@ static int virtnet_probe(struct virtio_device *vdev)
1103 1455
1104 /* Set up our device-specific information */ 1456 /* Set up our device-specific information */
1105 vi = netdev_priv(dev); 1457 vi = netdev_priv(dev);
1106 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
1107 vi->dev = dev; 1458 vi->dev = dev;
1108 vi->vdev = vdev; 1459 vi->vdev = vdev;
1109 vdev->priv = vi; 1460 vdev->priv = vi;
1110 vi->pages = NULL;
1111 vi->stats = alloc_percpu(struct virtnet_stats); 1461 vi->stats = alloc_percpu(struct virtnet_stats);
1112 err = -ENOMEM; 1462 err = -ENOMEM;
1113 if (vi->stats == NULL) 1463 if (vi->stats == NULL)
1114 goto free; 1464 goto free;
1115 1465
1116 INIT_DELAYED_WORK(&vi->refill, refill_work);
1117 mutex_init(&vi->config_lock); 1466 mutex_init(&vi->config_lock);
1118 vi->config_enable = true; 1467 vi->config_enable = true;
1119 INIT_WORK(&vi->config_work, virtnet_config_changed_work); 1468 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1120 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
1121 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
1122 1469
1123 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1470 /* If we can receive ANY GSO packets, we must allocate large ones. */
1124 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1471 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -1129,10 +1476,21 @@ static int virtnet_probe(struct virtio_device *vdev)
1129 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1476 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
1130 vi->mergeable_rx_bufs = true; 1477 vi->mergeable_rx_bufs = true;
1131 1478
1479 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
1480 vi->has_cvq = true;
1481
1482 /* Use single tx/rx queue pair as default */
1483 vi->curr_queue_pairs = 1;
1484 vi->max_queue_pairs = max_queue_pairs;
1485
1486 /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
1132 err = init_vqs(vi); 1487 err = init_vqs(vi);
1133 if (err) 1488 if (err)
1134 goto free_stats; 1489 goto free_stats;
1135 1490
1491 netif_set_real_num_tx_queues(dev, 1);
1492 netif_set_real_num_rx_queues(dev, 1);
1493
1136 err = register_netdev(dev); 1494 err = register_netdev(dev);
1137 if (err) { 1495 if (err) {
1138 pr_debug("virtio_net: registering device failed\n"); 1496 pr_debug("virtio_net: registering device failed\n");
@@ -1140,12 +1498,15 @@ static int virtnet_probe(struct virtio_device *vdev)
1140 } 1498 }
1141 1499
1142 /* Last of all, set up some receive buffers. */ 1500 /* Last of all, set up some receive buffers. */
1143 try_fill_recv(vi, GFP_KERNEL); 1501 for (i = 0; i < vi->max_queue_pairs; i++) {
1144 1502 try_fill_recv(&vi->rq[i], GFP_KERNEL);
1145 /* If we didn't even get one input buffer, we're useless. */ 1503
1146 if (vi->num == 0) { 1504 /* If we didn't even get one input buffer, we're useless. */
1147 err = -ENOMEM; 1505 if (vi->rq[i].num == 0) {
1148 goto unregister; 1506 free_unused_bufs(vi);
1507 err = -ENOMEM;
1508 goto free_recv_bufs;
1509 }
1149 } 1510 }
1150 1511
1151 /* Assume link up if device can't report link status, 1512 /* Assume link up if device can't report link status,
@@ -1158,13 +1519,17 @@ static int virtnet_probe(struct virtio_device *vdev)
1158 netif_carrier_on(dev); 1519 netif_carrier_on(dev);
1159 } 1520 }
1160 1521
1161 pr_debug("virtnet: registered device %s\n", dev->name); 1522 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
1523 dev->name, max_queue_pairs);
1524
1162 return 0; 1525 return 0;
1163 1526
1164unregister: 1527free_recv_bufs:
1528 free_receive_bufs(vi);
1165 unregister_netdev(dev); 1529 unregister_netdev(dev);
1166free_vqs: 1530free_vqs:
1167 vdev->config->del_vqs(vdev); 1531 cancel_delayed_work_sync(&vi->refill);
1532 virtnet_del_vqs(vi);
1168free_stats: 1533free_stats:
1169 free_percpu(vi->stats); 1534 free_percpu(vi->stats);
1170free: 1535free:
@@ -1172,28 +1537,6 @@ free:
1172 return err; 1537 return err;
1173} 1538}
1174 1539
1175static void free_unused_bufs(struct virtnet_info *vi)
1176{
1177 void *buf;
1178 while (1) {
1179 buf = virtqueue_detach_unused_buf(vi->svq);
1180 if (!buf)
1181 break;
1182 dev_kfree_skb(buf);
1183 }
1184 while (1) {
1185 buf = virtqueue_detach_unused_buf(vi->rvq);
1186 if (!buf)
1187 break;
1188 if (vi->mergeable_rx_bufs || vi->big_packets)
1189 give_pages(vi, buf);
1190 else
1191 dev_kfree_skb(buf);
1192 --vi->num;
1193 }
1194 BUG_ON(vi->num != 0);
1195}
1196
1197static void remove_vq_common(struct virtnet_info *vi) 1540static void remove_vq_common(struct virtnet_info *vi)
1198{ 1541{
1199 vi->vdev->config->reset(vi->vdev); 1542 vi->vdev->config->reset(vi->vdev);
@@ -1201,13 +1544,12 @@ static void remove_vq_common(struct virtnet_info *vi)
1201 /* Free unused buffers in both send and recv, if any. */ 1544 /* Free unused buffers in both send and recv, if any. */
1202 free_unused_bufs(vi); 1545 free_unused_bufs(vi);
1203 1546
1204 vi->vdev->config->del_vqs(vi->vdev); 1547 free_receive_bufs(vi);
1205 1548
1206 while (vi->pages) 1549 virtnet_del_vqs(vi);
1207 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1208} 1550}
1209 1551
1210static void __devexit virtnet_remove(struct virtio_device *vdev) 1552static void virtnet_remove(struct virtio_device *vdev)
1211{ 1553{
1212 struct virtnet_info *vi = vdev->priv; 1554 struct virtnet_info *vi = vdev->priv;
1213 1555
@@ -1230,6 +1572,7 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
1230static int virtnet_freeze(struct virtio_device *vdev) 1572static int virtnet_freeze(struct virtio_device *vdev)
1231{ 1573{
1232 struct virtnet_info *vi = vdev->priv; 1574 struct virtnet_info *vi = vdev->priv;
1575 int i;
1233 1576
1234 /* Prevent config work handler from accessing the device */ 1577 /* Prevent config work handler from accessing the device */
1235 mutex_lock(&vi->config_lock); 1578 mutex_lock(&vi->config_lock);
@@ -1240,7 +1583,10 @@ static int virtnet_freeze(struct virtio_device *vdev)
1240 cancel_delayed_work_sync(&vi->refill); 1583 cancel_delayed_work_sync(&vi->refill);
1241 1584
1242 if (netif_running(vi->dev)) 1585 if (netif_running(vi->dev))
1243 napi_disable(&vi->napi); 1586 for (i = 0; i < vi->max_queue_pairs; i++) {
1587 napi_disable(&vi->rq[i].napi);
1588 netif_napi_del(&vi->rq[i].napi);
1589 }
1244 1590
1245 remove_vq_common(vi); 1591 remove_vq_common(vi);
1246 1592
@@ -1252,24 +1598,28 @@ static int virtnet_freeze(struct virtio_device *vdev)
1252static int virtnet_restore(struct virtio_device *vdev) 1598static int virtnet_restore(struct virtio_device *vdev)
1253{ 1599{
1254 struct virtnet_info *vi = vdev->priv; 1600 struct virtnet_info *vi = vdev->priv;
1255 int err; 1601 int err, i;
1256 1602
1257 err = init_vqs(vi); 1603 err = init_vqs(vi);
1258 if (err) 1604 if (err)
1259 return err; 1605 return err;
1260 1606
1261 if (netif_running(vi->dev)) 1607 if (netif_running(vi->dev))
1262 virtnet_napi_enable(vi); 1608 for (i = 0; i < vi->max_queue_pairs; i++)
1609 virtnet_napi_enable(&vi->rq[i]);
1263 1610
1264 netif_device_attach(vi->dev); 1611 netif_device_attach(vi->dev);
1265 1612
1266 if (!try_fill_recv(vi, GFP_KERNEL)) 1613 for (i = 0; i < vi->max_queue_pairs; i++)
1267 schedule_delayed_work(&vi->refill, 0); 1614 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
1615 schedule_delayed_work(&vi->refill, 0);
1268 1616
1269 mutex_lock(&vi->config_lock); 1617 mutex_lock(&vi->config_lock);
1270 vi->config_enable = true; 1618 vi->config_enable = true;
1271 mutex_unlock(&vi->config_lock); 1619 mutex_unlock(&vi->config_lock);
1272 1620
1621 virtnet_set_queues(vi, vi->curr_queue_pairs);
1622
1273 return 0; 1623 return 0;
1274} 1624}
1275#endif 1625#endif
@@ -1287,7 +1637,7 @@ static unsigned int features[] = {
1287 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 1637 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1288 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1638 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1289 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1639 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1290 VIRTIO_NET_F_GUEST_ANNOUNCE, 1640 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
1291}; 1641};
1292 1642
1293static struct virtio_driver virtio_net_driver = { 1643static struct virtio_driver virtio_net_driver = {
@@ -1297,7 +1647,7 @@ static struct virtio_driver virtio_net_driver = {
1297 .driver.owner = THIS_MODULE, 1647 .driver.owner = THIS_MODULE,
1298 .id_table = id_table, 1648 .id_table = id_table,
1299 .probe = virtnet_probe, 1649 .probe = virtnet_probe,
1300 .remove = __devexit_p(virtnet_remove), 1650 .remove = virtnet_remove,
1301 .config_changed = virtnet_config_changed, 1651 .config_changed = virtnet_config_changed,
1302#ifdef CONFIG_PM 1652#ifdef CONFIG_PM
1303 .freeze = virtnet_freeze, 1653 .freeze = virtnet_freeze,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0ae1bcc6da73..dc8913c6238c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1094,10 +1094,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1094{ 1094{
1095 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1095 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1096 1096
1097 BUG_ON(skb->queue_mapping > adapter->num_tx_queues); 1097 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1098 return vmxnet3_tq_xmit(skb, 1098 return vmxnet3_tq_xmit(skb,
1099 &adapter->tx_queue[skb->queue_mapping], 1099 &adapter->tx_queue[skb->queue_mapping],
1100 adapter, netdev); 1100 adapter, netdev);
1101} 1101}
1102 1102
1103 1103
@@ -1243,8 +1243,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1243 skb_reserve(new_skb, NET_IP_ALIGN); 1243 skb_reserve(new_skb, NET_IP_ALIGN);
1244 rbi->skb = new_skb; 1244 rbi->skb = new_skb;
1245 rbi->dma_addr = pci_map_single(adapter->pdev, 1245 rbi->dma_addr = pci_map_single(adapter->pdev,
1246 rbi->skb->data, rbi->len, 1246 rbi->skb->data, rbi->len,
1247 PCI_DMA_FROMDEVICE); 1247 PCI_DMA_FROMDEVICE);
1248 rxd->addr = cpu_to_le64(rbi->dma_addr); 1248 rxd->addr = cpu_to_le64(rbi->dma_addr);
1249 rxd->len = rbi->len; 1249 rxd->len = rbi->len;
1250 1250
@@ -1331,14 +1331,14 @@ rcd_done:
1331 /* if needed, update the register */ 1331 /* if needed, update the register */
1332 if (unlikely(rq->shared->updateRxProd)) { 1332 if (unlikely(rq->shared->updateRxProd)) {
1333 VMXNET3_WRITE_BAR0_REG(adapter, 1333 VMXNET3_WRITE_BAR0_REG(adapter,
1334 rxprod_reg[ring_idx] + rq->qid * 8, 1334 rxprod_reg[ring_idx] + rq->qid * 8,
1335 ring->next2fill); 1335 ring->next2fill);
1336 rq->uncommitted[ring_idx] = 0; 1336 rq->uncommitted[ring_idx] = 0;
1337 } 1337 }
1338 1338
1339 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1339 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1340 vmxnet3_getRxComp(rcd, 1340 vmxnet3_getRxComp(rcd,
1341 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp); 1341 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1342 } 1342 }
1343 1343
1344 return num_rxd; 1344 return num_rxd;
@@ -1922,7 +1922,7 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1922 free_irq(adapter->pdev->irq, adapter->netdev); 1922 free_irq(adapter->pdev->irq, adapter->netdev);
1923 break; 1923 break;
1924 default: 1924 default:
1925 BUG_ON(true); 1925 BUG();
1926 } 1926 }
1927} 1927}
1928 1928
@@ -2885,7 +2885,7 @@ vmxnet3_reset_work(struct work_struct *data)
2885} 2885}
2886 2886
2887 2887
2888static int __devinit 2888static int
2889vmxnet3_probe_device(struct pci_dev *pdev, 2889vmxnet3_probe_device(struct pci_dev *pdev,
2890 const struct pci_device_id *id) 2890 const struct pci_device_id *id)
2891{ 2891{
@@ -2949,11 +2949,11 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2949 2949
2950 spin_lock_init(&adapter->cmd_lock); 2950 spin_lock_init(&adapter->cmd_lock);
2951 adapter->shared = pci_alloc_consistent(adapter->pdev, 2951 adapter->shared = pci_alloc_consistent(adapter->pdev,
2952 sizeof(struct Vmxnet3_DriverShared), 2952 sizeof(struct Vmxnet3_DriverShared),
2953 &adapter->shared_pa); 2953 &adapter->shared_pa);
2954 if (!adapter->shared) { 2954 if (!adapter->shared) {
2955 printk(KERN_ERR "Failed to allocate memory for %s\n", 2955 printk(KERN_ERR "Failed to allocate memory for %s\n",
2956 pci_name(pdev)); 2956 pci_name(pdev));
2957 err = -ENOMEM; 2957 err = -ENOMEM;
2958 goto err_alloc_shared; 2958 goto err_alloc_shared;
2959 } 2959 }
@@ -2964,16 +2964,16 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2964 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 2964 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2965 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 2965 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2966 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size, 2966 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
2967 &adapter->queue_desc_pa); 2967 &adapter->queue_desc_pa);
2968 2968
2969 if (!adapter->tqd_start) { 2969 if (!adapter->tqd_start) {
2970 printk(KERN_ERR "Failed to allocate memory for %s\n", 2970 printk(KERN_ERR "Failed to allocate memory for %s\n",
2971 pci_name(pdev)); 2971 pci_name(pdev));
2972 err = -ENOMEM; 2972 err = -ENOMEM;
2973 goto err_alloc_queue_desc; 2973 goto err_alloc_queue_desc;
2974 } 2974 }
2975 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + 2975 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2976 adapter->num_tx_queues); 2976 adapter->num_tx_queues);
2977 2977
2978 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 2978 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2979 if (adapter->pm_conf == NULL) { 2979 if (adapter->pm_conf == NULL) {
@@ -3019,7 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3019 3019
3020 adapter->dev_number = atomic_read(&devices_found); 3020 adapter->dev_number = atomic_read(&devices_found);
3021 3021
3022 adapter->share_intr = irq_share_mode; 3022 adapter->share_intr = irq_share_mode;
3023 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE && 3023 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
3024 adapter->num_tx_queues != adapter->num_rx_queues) 3024 adapter->num_tx_queues != adapter->num_rx_queues)
3025 adapter->share_intr = VMXNET3_INTR_DONTSHARE; 3025 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
@@ -3065,7 +3065,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
3065 3065
3066 if (err) { 3066 if (err) {
3067 printk(KERN_ERR "Failed to register adapter %s\n", 3067 printk(KERN_ERR "Failed to register adapter %s\n",
3068 pci_name(pdev)); 3068 pci_name(pdev));
3069 goto err_register; 3069 goto err_register;
3070 } 3070 }
3071 3071
@@ -3096,7 +3096,7 @@ err_alloc_shared:
3096} 3096}
3097 3097
3098 3098
3099static void __devexit 3099static void
3100vmxnet3_remove_device(struct pci_dev *pdev) 3100vmxnet3_remove_device(struct pci_dev *pdev)
3101{ 3101{
3102 struct net_device *netdev = pci_get_drvdata(pdev); 3102 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3302,7 +3302,7 @@ static struct pci_driver vmxnet3_driver = {
3302 .name = vmxnet3_driver_name, 3302 .name = vmxnet3_driver_name,
3303 .id_table = vmxnet3_pciid_table, 3303 .id_table = vmxnet3_pciid_table,
3304 .probe = vmxnet3_probe_device, 3304 .probe = vmxnet3_probe_device,
3305 .remove = __devexit_p(vmxnet3_remove_device), 3305 .remove = vmxnet3_remove_device,
3306#ifdef CONFIG_PM 3306#ifdef CONFIG_PM
3307 .driver.pm = &vmxnet3_pm_ops, 3307 .driver.pm = &vmxnet3_pm_ops,
3308#endif 3308#endif
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 8b5c61917076..3b3fdf648ea7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -29,6 +29,8 @@
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/if_ether.h> 30#include <linux/if_ether.h>
31#include <linux/hash.h> 31#include <linux/hash.h>
32#include <net/arp.h>
33#include <net/ndisc.h>
32#include <net/ip.h> 34#include <net/ip.h>
33#include <net/icmp.h> 35#include <net/icmp.h>
34#include <net/udp.h> 36#include <net/udp.h>
@@ -110,18 +112,23 @@ struct vxlan_dev {
110 __u16 port_max; 112 __u16 port_max;
111 __u8 tos; /* TOS override */ 113 __u8 tos; /* TOS override */
112 __u8 ttl; 114 __u8 ttl;
113 bool learn; 115 u32 flags; /* VXLAN_F_* below */
114 116
115 unsigned long age_interval; 117 unsigned long age_interval;
116 struct timer_list age_timer; 118 struct timer_list age_timer;
117 spinlock_t hash_lock; 119 spinlock_t hash_lock;
118 unsigned int addrcnt; 120 unsigned int addrcnt;
119 unsigned int addrmax; 121 unsigned int addrmax;
120 unsigned int addrexceeded;
121 122
122 struct hlist_head fdb_head[FDB_HASH_SIZE]; 123 struct hlist_head fdb_head[FDB_HASH_SIZE];
123}; 124};
124 125
126#define VXLAN_F_LEARN 0x01
127#define VXLAN_F_PROXY 0x02
128#define VXLAN_F_RSC 0x04
129#define VXLAN_F_L2MISS 0x08
130#define VXLAN_F_L3MISS 0x10
131
125/* salt for hash table */ 132/* salt for hash table */
126static u32 vxlan_salt __read_mostly; 133static u32 vxlan_salt __read_mostly;
127 134
@@ -155,6 +162,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
155 struct nda_cacheinfo ci; 162 struct nda_cacheinfo ci;
156 struct nlmsghdr *nlh; 163 struct nlmsghdr *nlh;
157 struct ndmsg *ndm; 164 struct ndmsg *ndm;
165 bool send_ip, send_eth;
158 166
159 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); 167 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
160 if (nlh == NULL) 168 if (nlh == NULL)
@@ -162,16 +170,24 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
162 170
163 ndm = nlmsg_data(nlh); 171 ndm = nlmsg_data(nlh);
164 memset(ndm, 0, sizeof(*ndm)); 172 memset(ndm, 0, sizeof(*ndm));
165 ndm->ndm_family = AF_BRIDGE; 173
174 send_eth = send_ip = true;
175
176 if (type == RTM_GETNEIGH) {
177 ndm->ndm_family = AF_INET;
178 send_ip = fdb->remote_ip != 0;
179 send_eth = !is_zero_ether_addr(fdb->eth_addr);
180 } else
181 ndm->ndm_family = AF_BRIDGE;
166 ndm->ndm_state = fdb->state; 182 ndm->ndm_state = fdb->state;
167 ndm->ndm_ifindex = vxlan->dev->ifindex; 183 ndm->ndm_ifindex = vxlan->dev->ifindex;
168 ndm->ndm_flags = NTF_SELF; 184 ndm->ndm_flags = NTF_SELF;
169 ndm->ndm_type = NDA_DST; 185 ndm->ndm_type = NDA_DST;
170 186
171 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 187 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
172 goto nla_put_failure; 188 goto nla_put_failure;
173 189
174 if (nla_put_be32(skb, NDA_DST, fdb->remote_ip)) 190 if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
175 goto nla_put_failure; 191 goto nla_put_failure;
176 192
177 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 193 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
@@ -223,6 +239,29 @@ errout:
223 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 239 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
224} 240}
225 241
242static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
243{
244 struct vxlan_dev *vxlan = netdev_priv(dev);
245 struct vxlan_fdb f;
246
247 memset(&f, 0, sizeof f);
248 f.state = NUD_STALE;
249 f.remote_ip = ipa; /* goes to NDA_DST */
250
251 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
252}
253
254static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
255{
256 struct vxlan_fdb f;
257
258 memset(&f, 0, sizeof f);
259 f.state = NUD_STALE;
260 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
261
262 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
263}
264
226/* Hash Ethernet address */ 265/* Hash Ethernet address */
227static u32 eth_hash(const unsigned char *addr) 266static u32 eth_hash(const unsigned char *addr)
228{ 267{
@@ -552,6 +591,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
552 goto drop; 591 goto drop;
553 } 592 }
554 593
594 skb_reset_mac_header(skb);
595
555 /* Re-examine inner Ethernet packet */ 596 /* Re-examine inner Ethernet packet */
556 oip = ip_hdr(skb); 597 oip = ip_hdr(skb);
557 skb->protocol = eth_type_trans(skb, vxlan->dev); 598 skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -561,12 +602,22 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
561 vxlan->dev->dev_addr) == 0) 602 vxlan->dev->dev_addr) == 0)
562 goto drop; 603 goto drop;
563 604
564 if (vxlan->learn) 605 if (vxlan->flags & VXLAN_F_LEARN)
565 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); 606 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
566 607
567 __skb_tunnel_rx(skb, vxlan->dev); 608 __skb_tunnel_rx(skb, vxlan->dev);
568 skb_reset_network_header(skb); 609 skb_reset_network_header(skb);
569 skb->ip_summed = CHECKSUM_NONE; 610
611 /* If the NIC driver gave us an encapsulated packet with
612 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
613 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
614 * for us. Otherwise force the upper layers to verify it.
615 */
616 if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
617 !(vxlan->dev->features & NETIF_F_RXCSUM))
618 skb->ip_summed = CHECKSUM_NONE;
619
620 skb->encapsulation = 0;
570 621
571 err = IP_ECN_decapsulate(oip, skb); 622 err = IP_ECN_decapsulate(oip, skb);
572 if (unlikely(err)) { 623 if (unlikely(err)) {
@@ -600,6 +651,117 @@ drop:
600 return 0; 651 return 0;
601} 652}
602 653
654static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
655{
656 struct vxlan_dev *vxlan = netdev_priv(dev);
657 struct arphdr *parp;
658 u8 *arpptr, *sha;
659 __be32 sip, tip;
660 struct neighbour *n;
661
662 if (dev->flags & IFF_NOARP)
663 goto out;
664
665 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
666 dev->stats.tx_dropped++;
667 goto out;
668 }
669 parp = arp_hdr(skb);
670
671 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
672 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
673 parp->ar_pro != htons(ETH_P_IP) ||
674 parp->ar_op != htons(ARPOP_REQUEST) ||
675 parp->ar_hln != dev->addr_len ||
676 parp->ar_pln != 4)
677 goto out;
678 arpptr = (u8 *)parp + sizeof(struct arphdr);
679 sha = arpptr;
680 arpptr += dev->addr_len; /* sha */
681 memcpy(&sip, arpptr, sizeof(sip));
682 arpptr += sizeof(sip);
683 arpptr += dev->addr_len; /* tha */
684 memcpy(&tip, arpptr, sizeof(tip));
685
686 if (ipv4_is_loopback(tip) ||
687 ipv4_is_multicast(tip))
688 goto out;
689
690 n = neigh_lookup(&arp_tbl, &tip, dev);
691
692 if (n) {
693 struct vxlan_dev *vxlan = netdev_priv(dev);
694 struct vxlan_fdb *f;
695 struct sk_buff *reply;
696
697 if (!(n->nud_state & NUD_CONNECTED)) {
698 neigh_release(n);
699 goto out;
700 }
701
702 f = vxlan_find_mac(vxlan, n->ha);
703 if (f && f->remote_ip == 0) {
704 /* bridge-local neighbor */
705 neigh_release(n);
706 goto out;
707 }
708
709 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
710 n->ha, sha);
711
712 neigh_release(n);
713
714 skb_reset_mac_header(reply);
715 __skb_pull(reply, skb_network_offset(reply));
716 reply->ip_summed = CHECKSUM_UNNECESSARY;
717 reply->pkt_type = PACKET_HOST;
718
719 if (netif_rx_ni(reply) == NET_RX_DROP)
720 dev->stats.rx_dropped++;
721 } else if (vxlan->flags & VXLAN_F_L3MISS)
722 vxlan_ip_miss(dev, tip);
723out:
724 consume_skb(skb);
725 return NETDEV_TX_OK;
726}
727
728static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
729{
730 struct vxlan_dev *vxlan = netdev_priv(dev);
731 struct neighbour *n;
732 struct iphdr *pip;
733
734 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
735 return false;
736
737 n = NULL;
738 switch (ntohs(eth_hdr(skb)->h_proto)) {
739 case ETH_P_IP:
740 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
741 return false;
742 pip = ip_hdr(skb);
743 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
744 break;
745 default:
746 return false;
747 }
748
749 if (n) {
750 bool diff;
751
752 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
753 if (diff) {
754 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
755 dev->addr_len);
756 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
757 }
758 neigh_release(n);
759 return diff;
760 } else if (vxlan->flags & VXLAN_F_L3MISS)
761 vxlan_ip_miss(dev, pip->daddr);
762 return false;
763}
764
603/* Extract dsfield from inner protocol */ 765/* Extract dsfield from inner protocol */
604static inline u8 vxlan_get_dsfield(const struct iphdr *iph, 766static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
605 const struct sk_buff *skb) 767 const struct sk_buff *skb)
@@ -622,22 +784,6 @@ static inline u8 vxlan_ecn_encap(u8 tos,
622 return INET_ECN_encapsulate(tos, inner); 784 return INET_ECN_encapsulate(tos, inner);
623} 785}
624 786
625static __be32 vxlan_find_dst(struct vxlan_dev *vxlan, struct sk_buff *skb)
626{
627 const struct ethhdr *eth = (struct ethhdr *) skb->data;
628 const struct vxlan_fdb *f;
629
630 if (is_multicast_ether_addr(eth->h_dest))
631 return vxlan->gaddr;
632
633 f = vxlan_find_mac(vxlan, eth->h_dest);
634 if (f)
635 return f->remote_ip;
636 else
637 return vxlan->gaddr;
638
639}
640
641static void vxlan_sock_free(struct sk_buff *skb) 787static void vxlan_sock_free(struct sk_buff *skb)
642{ 788{
643 sock_put(skb->sk); 789 sock_put(skb->sk);
@@ -684,6 +830,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
684 struct vxlan_dev *vxlan = netdev_priv(dev); 830 struct vxlan_dev *vxlan = netdev_priv(dev);
685 struct rtable *rt; 831 struct rtable *rt;
686 const struct iphdr *old_iph; 832 const struct iphdr *old_iph;
833 struct ethhdr *eth;
687 struct iphdr *iph; 834 struct iphdr *iph;
688 struct vxlanhdr *vxh; 835 struct vxlanhdr *vxh;
689 struct udphdr *uh; 836 struct udphdr *uh;
@@ -694,10 +841,55 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
694 __be16 df = 0; 841 __be16 df = 0;
695 __u8 tos, ttl; 842 __u8 tos, ttl;
696 int err; 843 int err;
844 bool did_rsc = false;
845 const struct vxlan_fdb *f;
846
847 skb_reset_mac_header(skb);
848 eth = eth_hdr(skb);
697 849
698 dst = vxlan_find_dst(vxlan, skb); 850 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
699 if (!dst) 851 return arp_reduce(dev, skb);
852 else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
853 did_rsc = route_shortcircuit(dev, skb);
854
855 f = vxlan_find_mac(vxlan, eth->h_dest);
856 if (f == NULL) {
857 did_rsc = false;
858 dst = vxlan->gaddr;
859 if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
860 !is_multicast_ether_addr(eth->h_dest))
861 vxlan_fdb_miss(vxlan, eth->h_dest);
862 } else
863 dst = f->remote_ip;
864
865 if (!dst) {
866 if (did_rsc) {
867 __skb_pull(skb, skb_network_offset(skb));
868 skb->ip_summed = CHECKSUM_NONE;
869 skb->pkt_type = PACKET_HOST;
870
871 /* short-circuited back to local bridge */
872 if (netif_rx(skb) == NET_RX_SUCCESS) {
873 struct vxlan_stats *stats =
874 this_cpu_ptr(vxlan->stats);
875
876 u64_stats_update_begin(&stats->syncp);
877 stats->tx_packets++;
878 stats->tx_bytes += pkt_len;
879 u64_stats_update_end(&stats->syncp);
880 } else {
881 dev->stats.tx_errors++;
882 dev->stats.tx_aborted_errors++;
883 }
884 return NETDEV_TX_OK;
885 }
700 goto drop; 886 goto drop;
887 }
888
889 if (!skb->encapsulation) {
890 skb_reset_inner_headers(skb);
891 skb->encapsulation = 1;
892 }
701 893
702 /* Need space for new headers (invalidates iph ptr) */ 894 /* Need space for new headers (invalidates iph ptr) */
703 if (skb_cow_head(skb, VXLAN_HEADROOM)) 895 if (skb_cow_head(skb, VXLAN_HEADROOM))
@@ -769,8 +961,9 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
769 961
770 vxlan_set_owner(dev, skb); 962 vxlan_set_owner(dev, skb);
771 963
772 /* See __IPTUNNEL_XMIT */ 964 /* See iptunnel_xmit() */
773 skb->ip_summed = CHECKSUM_NONE; 965 if (skb->ip_summed != CHECKSUM_PARTIAL)
966 skb->ip_summed = CHECKSUM_NONE;
774 ip_select_ident(iph, &rt->dst, NULL); 967 ip_select_ident(iph, &rt->dst, NULL);
775 968
776 err = ip_local_out(skb); 969 err = ip_local_out(skb);
@@ -991,6 +1184,10 @@ static void vxlan_setup(struct net_device *dev)
991 dev->tx_queue_len = 0; 1184 dev->tx_queue_len = 0;
992 dev->features |= NETIF_F_LLTX; 1185 dev->features |= NETIF_F_LLTX;
993 dev->features |= NETIF_F_NETNS_LOCAL; 1186 dev->features |= NETIF_F_NETNS_LOCAL;
1187 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1188 dev->features |= NETIF_F_RXCSUM;
1189
1190 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
994 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1191 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
995 1192
996 spin_lock_init(&vxlan->hash_lock); 1193 spin_lock_init(&vxlan->hash_lock);
@@ -1020,6 +1217,10 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1020 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, 1217 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1021 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, 1218 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
1022 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) }, 1219 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
1220 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
1221 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1222 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1223 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
1023}; 1224};
1024 1225
1025static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) 1226static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1111,14 +1312,29 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
1111 if (data[IFLA_VXLAN_TOS]) 1312 if (data[IFLA_VXLAN_TOS])
1112 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]); 1313 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1113 1314
1315 if (data[IFLA_VXLAN_TTL])
1316 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
1317
1114 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) 1318 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
1115 vxlan->learn = true; 1319 vxlan->flags |= VXLAN_F_LEARN;
1116 1320
1117 if (data[IFLA_VXLAN_AGEING]) 1321 if (data[IFLA_VXLAN_AGEING])
1118 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); 1322 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1119 else 1323 else
1120 vxlan->age_interval = FDB_AGE_DEFAULT; 1324 vxlan->age_interval = FDB_AGE_DEFAULT;
1121 1325
1326 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
1327 vxlan->flags |= VXLAN_F_PROXY;
1328
1329 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
1330 vxlan->flags |= VXLAN_F_RSC;
1331
1332 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
1333 vxlan->flags |= VXLAN_F_L2MISS;
1334
1335 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
1336 vxlan->flags |= VXLAN_F_L3MISS;
1337
1122 if (data[IFLA_VXLAN_LIMIT]) 1338 if (data[IFLA_VXLAN_LIMIT])
1123 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]); 1339 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1124 1340
@@ -1155,6 +1371,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
1155 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ 1371 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1156 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ 1372 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1157 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ 1373 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1374 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
1375 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
1376 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
1377 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
1158 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */ 1378 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1159 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */ 1379 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1160 nla_total_size(sizeof(struct ifla_vxlan_port_range)) + 1380 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -1183,7 +1403,15 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1183 1403
1184 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) || 1404 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1185 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) || 1405 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
1186 nla_put_u8(skb, IFLA_VXLAN_LEARNING, vxlan->learn) || 1406 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
1407 !!(vxlan->flags & VXLAN_F_LEARN)) ||
1408 nla_put_u8(skb, IFLA_VXLAN_PROXY,
1409 !!(vxlan->flags & VXLAN_F_PROXY)) ||
1410 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
1411 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
1412 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
1413 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1414 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
1187 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) || 1415 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1188 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax)) 1416 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1189 goto nla_put_failure; 1417 goto nla_put_failure;
diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
index eac709bed7ae..df70248e2fda 100644
--- a/drivers/net/wan/Makefile
+++ b/drivers/net/wan/Makefile
@@ -52,9 +52,9 @@ endif
52 52
53quiet_cmd_build_wanxlfw = BLD FW $@ 53quiet_cmd_build_wanxlfw = BLD FW $@
54 cmd_build_wanxlfw = \ 54 cmd_build_wanxlfw = \
55 $(CPP) -Wp,-MD,$(depfile) -I$(srctree)/include $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \ 55 $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
56 $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \ 56 $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
57 hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \ 57 hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
58 rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o 58 rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
59 59
60$(obj)/wanxlfw.inc: $(src)/wanxlfw.S 60$(obj)/wanxlfw.inc: $(src)/wanxlfw.S
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index ef36cafd44b7..851dc7b7e8b0 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -707,8 +707,7 @@ static void dscc4_free1(struct pci_dev *pdev)
707 kfree(ppriv); 707 kfree(ppriv);
708} 708}
709 709
710static int __devinit dscc4_init_one(struct pci_dev *pdev, 710static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
711 const struct pci_device_id *ent)
712{ 711{
713 struct dscc4_pci_priv *priv; 712 struct dscc4_pci_priv *priv;
714 struct dscc4_dev_priv *dpriv; 713 struct dscc4_dev_priv *dpriv;
@@ -1968,7 +1967,7 @@ err_out:
1968 return -ENOMEM; 1967 return -ENOMEM;
1969} 1968}
1970 1969
1971static void __devexit dscc4_remove_one(struct pci_dev *pdev) 1970static void dscc4_remove_one(struct pci_dev *pdev)
1972{ 1971{
1973 struct dscc4_pci_priv *ppriv; 1972 struct dscc4_pci_priv *ppriv;
1974 struct dscc4_dev_priv *root; 1973 struct dscc4_dev_priv *root;
@@ -2053,7 +2052,7 @@ static struct pci_driver dscc4_driver = {
2053 .name = DRV_NAME, 2052 .name = DRV_NAME,
2054 .id_table = dscc4_pci_tbl, 2053 .id_table = dscc4_pci_tbl,
2055 .probe = dscc4_init_one, 2054 .probe = dscc4_init_one,
2056 .remove = __devexit_p(dscc4_remove_one), 2055 .remove = dscc4_remove_one,
2057}; 2056};
2058 2057
2059module_pci_driver(dscc4_driver); 2058module_pci_driver(dscc4_driver);
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index b6271325f803..56941d6547eb 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2361,7 +2361,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
2361 * via a printk and leave the corresponding interface and all that follow 2361 * via a printk and leave the corresponding interface and all that follow
2362 * disabled. 2362 * disabled.
2363 */ 2363 */
2364static char *type_strings[] __devinitdata = { 2364static char *type_strings[] = {
2365 "no hardware", /* Should never be seen */ 2365 "no hardware", /* Should never be seen */
2366 "FarSync T2P", 2366 "FarSync T2P",
2367 "FarSync T4P", 2367 "FarSync T4P",
@@ -2371,7 +2371,7 @@ static char *type_strings[] __devinitdata = {
2371 "FarSync TE1" 2371 "FarSync TE1"
2372}; 2372};
2373 2373
2374static void __devinit 2374static void
2375fst_init_card(struct fst_card_info *card) 2375fst_init_card(struct fst_card_info *card)
2376{ 2376{
2377 int i; 2377 int i;
@@ -2415,7 +2415,7 @@ static const struct net_device_ops fst_ops = {
2415 * Initialise card when detected. 2415 * Initialise card when detected.
2416 * Returns 0 to indicate success, or errno otherwise. 2416 * Returns 0 to indicate success, or errno otherwise.
2417 */ 2417 */
2418static int __devinit 2418static int
2419fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2419fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2420{ 2420{
2421 static int no_of_cards_added = 0; 2421 static int no_of_cards_added = 0;
@@ -2615,7 +2615,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2615/* 2615/*
2616 * Cleanup and close down a card 2616 * Cleanup and close down a card
2617 */ 2617 */
2618static void __devexit 2618static void
2619fst_remove_one(struct pci_dev *pdev) 2619fst_remove_one(struct pci_dev *pdev)
2620{ 2620{
2621 struct fst_card_info *card; 2621 struct fst_card_info *card;
@@ -2652,7 +2652,7 @@ static struct pci_driver fst_driver = {
2652 .name = FST_NAME, 2652 .name = FST_NAME,
2653 .id_table = fst_pci_dev_id, 2653 .id_table = fst_pci_dev_id,
2654 .probe = fst_add_one, 2654 .probe = fst_add_one,
2655 .remove = __devexit_p(fst_remove_one), 2655 .remove = fst_remove_one,
2656 .suspend = NULL, 2656 .suspend = NULL,
2657 .resume = NULL, 2657 .resume = NULL,
2658}; 2658};
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index cf4903355a34..62f01b74cbd6 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -676,8 +676,7 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
676 676
677 677
678#ifdef NEED_DETECT_RAM 678#ifdef NEED_DETECT_RAM
679static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, 679static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
680 u32 ramsize)
681{ 680{
682 /* Round RAM size to 32 bits, fill from end to start */ 681 /* Round RAM size to 32 bits, fill from end to start */
683 u32 i = ramsize &= ~3; 682 u32 i = ramsize &= ~3;
@@ -705,7 +704,7 @@ static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
705#endif /* NEED_DETECT_RAM */ 704#endif /* NEED_DETECT_RAM */
706 705
707 706
708static void __devinit sca_init(card_t *card, int wait_states) 707static void sca_init(card_t *card, int wait_states)
709{ 708{
710 sca_out(wait_states, WCRL, card); /* Wait Control */ 709 sca_out(wait_states, WCRL, card); /* Wait Control */
711 sca_out(wait_states, WCRM, card); 710 sca_out(wait_states, WCRM, card);
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index e2779faa6c4f..6269a09c7369 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -605,8 +605,7 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
605} 605}
606 606
607 607
608static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase, 608static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
609 u32 ramsize)
610{ 609{
611 /* Round RAM size to 32 bits, fill from end to start */ 610 /* Round RAM size to 32 bits, fill from end to start */
612 u32 i = ramsize &= ~3; 611 u32 i = ramsize &= ~3;
@@ -625,7 +624,7 @@ static u32 __devinit sca_detect_ram(card_t *card, u8 __iomem *rambase,
625} 624}
626 625
627 626
628static void __devinit sca_init(card_t *card, int wait_states) 627static void sca_init(card_t *card, int wait_states)
629{ 628{
630 sca_out(wait_states, WCRL, card); /* Wait Control */ 629 sca_out(wait_states, WCRL, card); /* Wait Control */
631 sca_out(wait_states, WCRM, card); 630 sca_out(wait_states, WCRM, card);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 760776b3d66c..fc9d11d74d60 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -1326,7 +1326,7 @@ static const struct net_device_ops hss_hdlc_ops = {
1326 .ndo_do_ioctl = hss_hdlc_ioctl, 1326 .ndo_do_ioctl = hss_hdlc_ioctl,
1327}; 1327};
1328 1328
1329static int __devinit hss_init_one(struct platform_device *pdev) 1329static int hss_init_one(struct platform_device *pdev)
1330{ 1330{
1331 struct port *port; 1331 struct port *port;
1332 struct net_device *dev; 1332 struct net_device *dev;
@@ -1377,7 +1377,7 @@ err_free:
1377 return err; 1377 return err;
1378} 1378}
1379 1379
1380static int __devexit hss_remove_one(struct platform_device *pdev) 1380static int hss_remove_one(struct platform_device *pdev)
1381{ 1381{
1382 struct port *port = platform_get_drvdata(pdev); 1382 struct port *port = platform_get_drvdata(pdev);
1383 1383
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index f5d533a706ea..7ef435bab425 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -816,8 +816,7 @@ static const struct net_device_ops lmc_ops = {
816 .ndo_get_stats = lmc_get_stats, 816 .ndo_get_stats = lmc_get_stats,
817}; 817};
818 818
819static int __devinit lmc_init_one(struct pci_dev *pdev, 819static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
820 const struct pci_device_id *ent)
821{ 820{
822 lmc_softc_t *sc; 821 lmc_softc_t *sc;
823 struct net_device *dev; 822 struct net_device *dev;
@@ -986,7 +985,7 @@ err_req_io:
986/* 985/*
987 * Called from pci when removing module. 986 * Called from pci when removing module.
988 */ 987 */
989static void __devexit lmc_remove_one(struct pci_dev *pdev) 988static void lmc_remove_one(struct pci_dev *pdev)
990{ 989{
991 struct net_device *dev = pci_get_drvdata(pdev); 990 struct net_device *dev = pci_get_drvdata(pdev);
992 991
@@ -1733,7 +1732,7 @@ static struct pci_driver lmc_driver = {
1733 .name = "lmc", 1732 .name = "lmc",
1734 .id_table = lmc_pci_tbl, 1733 .id_table = lmc_pci_tbl,
1735 .probe = lmc_init_one, 1734 .probe = lmc_init_one,
1736 .remove = __devexit_p(lmc_remove_one), 1735 .remove = lmc_remove_one,
1737}; 1736};
1738 1737
1739module_pci_driver(lmc_driver); 1738module_pci_driver(lmc_driver);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 5fe246e060d7..53efc57fcace 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -297,8 +297,8 @@ static const struct net_device_ops pc300_ops = {
297 .ndo_do_ioctl = pc300_ioctl, 297 .ndo_do_ioctl = pc300_ioctl,
298}; 298};
299 299
300static int __devinit pc300_pci_init_one(struct pci_dev *pdev, 300static int pc300_pci_init_one(struct pci_dev *pdev,
301 const struct pci_device_id *ent) 301 const struct pci_device_id *ent)
302{ 302{
303 card_t *card; 303 card_t *card;
304 u32 __iomem *p; 304 u32 __iomem *p;
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 9659fcaa34ed..ddbce54040e2 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -276,8 +276,8 @@ static const struct net_device_ops pci200_ops = {
276 .ndo_do_ioctl = pci200_ioctl, 276 .ndo_do_ioctl = pci200_ioctl,
277}; 277};
278 278
279static int __devinit pci200_pci_init_one(struct pci_dev *pdev, 279static int pci200_pci_init_one(struct pci_dev *pdev,
280 const struct pci_device_id *ent) 280 const struct pci_device_id *ent)
281{ 281{
282 card_t *card; 282 card_t *card;
283 u32 __iomem *p; 283 u32 __iomem *p;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index feb7541b33fb..6a24a5a70cc7 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -557,8 +557,8 @@ static const struct net_device_ops wanxl_ops = {
557 .ndo_get_stats = wanxl_get_stats, 557 .ndo_get_stats = wanxl_get_stats,
558}; 558};
559 559
560static int __devinit wanxl_pci_init_one(struct pci_dev *pdev, 560static int wanxl_pci_init_one(struct pci_dev *pdev,
561 const struct pci_device_id *ent) 561 const struct pci_device_id *ent)
562{ 562{
563 card_t *card; 563 card_t *card;
564 u32 ramsize, stat; 564 u32 ramsize, stat;
diff --git a/drivers/net/wan/wanxlfw.S b/drivers/net/wan/wanxlfw.S
index 73aae2bf2f1c..21565d59ec7b 100644
--- a/drivers/net/wan/wanxlfw.S
+++ b/drivers/net/wan/wanxlfw.S
@@ -35,6 +35,7 @@
35*/ 35*/
36 36
37#include <linux/hdlc.h> 37#include <linux/hdlc.h>
38#include <linux/hdlc/ioctl.h>
38#include "wanxl.h" 39#include "wanxl.h"
39 40
40/* memory addresses and offsets */ 41/* memory addresses and offsets */
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 154a4965be4f..3d339e04efb7 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1761,7 +1761,7 @@ static const struct ieee80211_ops adm8211_ops = {
1761 .get_tsf = adm8211_get_tsft 1761 .get_tsf = adm8211_get_tsft
1762}; 1762};
1763 1763
1764static int __devinit adm8211_probe(struct pci_dev *pdev, 1764static int adm8211_probe(struct pci_dev *pdev,
1765 const struct pci_device_id *id) 1765 const struct pci_device_id *id)
1766{ 1766{
1767 struct ieee80211_hw *dev; 1767 struct ieee80211_hw *dev;
@@ -1935,7 +1935,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1935} 1935}
1936 1936
1937 1937
1938static void __devexit adm8211_remove(struct pci_dev *pdev) 1938static void adm8211_remove(struct pci_dev *pdev)
1939{ 1939{
1940 struct ieee80211_hw *dev = pci_get_drvdata(pdev); 1940 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
1941 struct adm8211_priv *priv; 1941 struct adm8211_priv *priv;
@@ -1985,7 +1985,7 @@ static struct pci_driver adm8211_driver = {
1985 .name = "adm8211", 1985 .name = "adm8211",
1986 .id_table = adm8211_pci_id_table, 1986 .id_table = adm8211_pci_id_table,
1987 .probe = adm8211_probe, 1987 .probe = adm8211_probe,
1988 .remove = __devexit_p(adm8211_remove), 1988 .remove = adm8211_remove,
1989#ifdef CONFIG_PM 1989#ifdef CONFIG_PM
1990 .suspend = adm8211_suspend, 1990 .suspend = adm8211_suspend,
1991 .resume = adm8211_resume, 1991 .resume = adm8211_resume,
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 3cd05a7173f6..53295418f576 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -78,7 +78,7 @@ static struct pci_driver airo_driver = {
78 .name = DRV_NAME, 78 .name = DRV_NAME,
79 .id_table = card_ids, 79 .id_table = card_ids,
80 .probe = airo_pci_probe, 80 .probe = airo_pci_probe,
81 .remove = __devexit_p(airo_pci_remove), 81 .remove = airo_pci_remove,
82 .suspend = airo_pci_suspend, 82 .suspend = airo_pci_suspend,
83 .resume = airo_pci_resume, 83 .resume = airo_pci_resume,
84}; 84};
@@ -5584,7 +5584,7 @@ static void timer_func( struct net_device *dev ) {
5584} 5584}
5585 5585
5586#ifdef CONFIG_PCI 5586#ifdef CONFIG_PCI
5587static int __devinit airo_pci_probe(struct pci_dev *pdev, 5587static int airo_pci_probe(struct pci_dev *pdev,
5588 const struct pci_device_id *pent) 5588 const struct pci_device_id *pent)
5589{ 5589{
5590 struct net_device *dev; 5590 struct net_device *dev;
@@ -5606,7 +5606,7 @@ static int __devinit airo_pci_probe(struct pci_dev *pdev,
5606 return 0; 5606 return 0;
5607} 5607}
5608 5608
5609static void __devexit airo_pci_remove(struct pci_dev *pdev) 5609static void airo_pci_remove(struct pci_dev *pdev)
5610{ 5610{
5611 struct net_device *dev = pci_get_drvdata(pdev); 5611 struct net_device *dev = pci_get_drvdata(pdev);
5612 5612
@@ -7433,7 +7433,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
7433 num_null_ies++; 7433 num_null_ies++;
7434 break; 7434 break;
7435 7435
7436 case WLAN_EID_GENERIC: 7436 case WLAN_EID_VENDOR_SPECIFIC:
7437 if (ie[1] >= 4 && 7437 if (ie[1] >= 4 &&
7438 ie[2] == 0x00 && 7438 ie[2] == 0x00 &&
7439 ie[3] == 0x50 && 7439 ie[3] == 0x50 &&
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 99b9ddf21273..77fa4286e5e9 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -379,7 +379,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
379 manifest_sync_timeout); 379 manifest_sync_timeout);
380 380
381 if (!size) { 381 if (!size) {
382 dev_printk(KERN_ERR, &udev->dev, "FW buffer length invalid!\n"); 382 dev_err(&udev->dev, "FW buffer length invalid!\n");
383 return -EINVAL; 383 return -EINVAL;
384 } 384 }
385 385
@@ -391,8 +391,8 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
391 if (need_dfu_state) { 391 if (need_dfu_state) {
392 ret = at76_dfu_get_state(udev, &dfu_state); 392 ret = at76_dfu_get_state(udev, &dfu_state);
393 if (ret < 0) { 393 if (ret < 0) {
394 dev_printk(KERN_ERR, &udev->dev, 394 dev_err(&udev->dev,
395 "cannot get DFU state: %d\n", ret); 395 "cannot get DFU state: %d\n", ret);
396 goto exit; 396 goto exit;
397 } 397 }
398 need_dfu_state = 0; 398 need_dfu_state = 0;
@@ -407,9 +407,9 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
407 dfu_timeout = at76_get_timeout(&dfu_stat_buf); 407 dfu_timeout = at76_get_timeout(&dfu_stat_buf);
408 need_dfu_state = 0; 408 need_dfu_state = 0;
409 } else 409 } else
410 dev_printk(KERN_ERR, &udev->dev, 410 dev_err(&udev->dev,
411 "at76_dfu_get_status returned %d\n", 411 "at76_dfu_get_status returned %d\n",
412 ret); 412 ret);
413 break; 413 break;
414 414
415 case STATE_DFU_DOWNLOAD_BUSY: 415 case STATE_DFU_DOWNLOAD_BUSY:
@@ -438,9 +438,9 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
438 blockno++; 438 blockno++;
439 439
440 if (ret != bsize) 440 if (ret != bsize)
441 dev_printk(KERN_ERR, &udev->dev, 441 dev_err(&udev->dev,
442 "at76_load_int_fw_block " 442 "at76_load_int_fw_block returned %d\n",
443 "returned %d\n", ret); 443 ret);
444 need_dfu_state = 1; 444 need_dfu_state = 1;
445 break; 445 break;
446 446
@@ -1255,8 +1255,7 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
1255 at76_dbg(DBG_DEVSTART, "opmode %d", op_mode); 1255 at76_dbg(DBG_DEVSTART, "opmode %d", op_mode);
1256 1256
1257 if (op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) { 1257 if (op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) {
1258 dev_printk(KERN_ERR, &udev->dev, "unexpected opmode %d\n", 1258 dev_err(&udev->dev, "unexpected opmode %d\n", op_mode);
1259 op_mode);
1260 return -EINVAL; 1259 return -EINVAL;
1261 } 1260 }
1262 1261
@@ -1275,9 +1274,9 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
1275 size, bsize, blockno); 1274 size, bsize, blockno);
1276 ret = at76_load_ext_fw_block(udev, blockno, block, bsize); 1275 ret = at76_load_ext_fw_block(udev, blockno, block, bsize);
1277 if (ret != bsize) { 1276 if (ret != bsize) {
1278 dev_printk(KERN_ERR, &udev->dev, 1277 dev_err(&udev->dev,
1279 "loading %dth firmware block failed: %d\n", 1278 "loading %dth firmware block failed: %d\n",
1280 blockno, ret); 1279 blockno, ret);
1281 goto exit; 1280 goto exit;
1282 } 1281 }
1283 buf += bsize; 1282 buf += bsize;
@@ -1293,8 +1292,8 @@ static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
1293exit: 1292exit:
1294 kfree(block); 1293 kfree(block);
1295 if (ret < 0) 1294 if (ret < 0)
1296 dev_printk(KERN_ERR, &udev->dev, 1295 dev_err(&udev->dev,
1297 "downloading external firmware failed: %d\n", ret); 1296 "downloading external firmware failed: %d\n", ret);
1298 return ret; 1297 return ret;
1299} 1298}
1300 1299
@@ -1308,8 +1307,8 @@ static int at76_load_internal_fw(struct usb_device *udev, struct fwentry *fwe)
1308 need_remap ? 0 : 2 * HZ); 1307 need_remap ? 0 : 2 * HZ);
1309 1308
1310 if (ret < 0) { 1309 if (ret < 0) {
1311 dev_printk(KERN_ERR, &udev->dev, 1310 dev_err(&udev->dev,
1312 "downloading internal fw failed with %d\n", ret); 1311 "downloading internal fw failed with %d\n", ret);
1313 goto exit; 1312 goto exit;
1314 } 1313 }
1315 1314
@@ -1319,8 +1318,8 @@ static int at76_load_internal_fw(struct usb_device *udev, struct fwentry *fwe)
1319 if (need_remap) { 1318 if (need_remap) {
1320 ret = at76_remap(udev); 1319 ret = at76_remap(udev);
1321 if (ret < 0) { 1320 if (ret < 0) {
1322 dev_printk(KERN_ERR, &udev->dev, 1321 dev_err(&udev->dev,
1323 "sending REMAP failed with %d\n", ret); 1322 "sending REMAP failed with %d\n", ret);
1324 goto exit; 1323 goto exit;
1325 } 1324 }
1326 } 1325 }
@@ -1555,11 +1554,10 @@ static struct fwentry *at76_load_firmware(struct usb_device *udev,
1555 at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname); 1554 at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname);
1556 ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev); 1555 ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev);
1557 if (ret < 0) { 1556 if (ret < 0) {
1558 dev_printk(KERN_ERR, &udev->dev, "firmware %s not found!\n", 1557 dev_err(&udev->dev, "firmware %s not found!\n",
1559 fwe->fwname); 1558 fwe->fwname);
1560 dev_printk(KERN_ERR, &udev->dev, 1559 dev_err(&udev->dev,
1561 "you may need to download the firmware from " 1560 "you may need to download the firmware from http://developer.berlios.de/projects/at76c503a/\n");
1562 "http://developer.berlios.de/projects/at76c503a/\n");
1563 goto exit; 1561 goto exit;
1564 } 1562 }
1565 1563
@@ -1567,17 +1565,17 @@ static struct fwentry *at76_load_firmware(struct usb_device *udev,
1567 fwh = (struct at76_fw_header *)(fwe->fw->data); 1565 fwh = (struct at76_fw_header *)(fwe->fw->data);
1568 1566
1569 if (fwe->fw->size <= sizeof(*fwh)) { 1567 if (fwe->fw->size <= sizeof(*fwh)) {
1570 dev_printk(KERN_ERR, &udev->dev, 1568 dev_err(&udev->dev,
1571 "firmware is too short (0x%zx)\n", fwe->fw->size); 1569 "firmware is too short (0x%zx)\n", fwe->fw->size);
1572 goto exit; 1570 goto exit;
1573 } 1571 }
1574 1572
1575 /* CRC currently not checked */ 1573 /* CRC currently not checked */
1576 fwe->board_type = le32_to_cpu(fwh->board_type); 1574 fwe->board_type = le32_to_cpu(fwh->board_type);
1577 if (fwe->board_type != board_type) { 1575 if (fwe->board_type != board_type) {
1578 dev_printk(KERN_ERR, &udev->dev, 1576 dev_err(&udev->dev,
1579 "board type mismatch, requested %u, got %u\n", 1577 "board type mismatch, requested %u, got %u\n",
1580 board_type, fwe->board_type); 1578 board_type, fwe->board_type);
1581 goto exit; 1579 goto exit;
1582 } 1580 }
1583 1581
@@ -2150,8 +2148,7 @@ static int at76_alloc_urbs(struct at76_priv *priv,
2150 } 2148 }
2151 2149
2152 if (!ep_in || !ep_out) { 2150 if (!ep_in || !ep_out) {
2153 dev_printk(KERN_ERR, &interface->dev, 2151 dev_err(&interface->dev, "bulk endpoints missing\n");
2154 "bulk endpoints missing\n");
2155 return -ENXIO; 2152 return -ENXIO;
2156 } 2153 }
2157 2154
@@ -2161,15 +2158,14 @@ static int at76_alloc_urbs(struct at76_priv *priv,
2161 priv->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 2158 priv->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
2162 priv->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 2159 priv->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
2163 if (!priv->rx_urb || !priv->tx_urb) { 2160 if (!priv->rx_urb || !priv->tx_urb) {
2164 dev_printk(KERN_ERR, &interface->dev, "cannot allocate URB\n"); 2161 dev_err(&interface->dev, "cannot allocate URB\n");
2165 return -ENOMEM; 2162 return -ENOMEM;
2166 } 2163 }
2167 2164
2168 buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE; 2165 buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE;
2169 priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL); 2166 priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
2170 if (!priv->bulk_out_buffer) { 2167 if (!priv->bulk_out_buffer) {
2171 dev_printk(KERN_ERR, &interface->dev, 2168 dev_err(&interface->dev, "cannot allocate output buffer\n");
2172 "cannot allocate output buffer\n");
2173 return -ENOMEM; 2169 return -ENOMEM;
2174 } 2170 }
2175 2171
@@ -2230,8 +2226,7 @@ static int at76_init_new_device(struct at76_priv *priv,
2230 /* MAC address */ 2226 /* MAC address */
2231 ret = at76_get_hw_config(priv); 2227 ret = at76_get_hw_config(priv);
2232 if (ret < 0) { 2228 if (ret < 0) {
2233 dev_printk(KERN_ERR, &interface->dev, 2229 dev_err(&interface->dev, "cannot get MAC address\n");
2234 "cannot get MAC address\n");
2235 goto exit; 2230 goto exit;
2236 } 2231 }
2237 2232
@@ -2358,8 +2353,8 @@ static int at76_probe(struct usb_interface *interface,
2358 we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */ 2353 we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */
2359 2354
2360 if (op_mode == OPMODE_HW_CONFIG_MODE) { 2355 if (op_mode == OPMODE_HW_CONFIG_MODE) {
2361 dev_printk(KERN_ERR, &interface->dev, 2356 dev_err(&interface->dev,
2362 "cannot handle a device in HW_CONFIG_MODE\n"); 2357 "cannot handle a device in HW_CONFIG_MODE\n");
2363 ret = -EBUSY; 2358 ret = -EBUSY;
2364 goto error; 2359 goto error;
2365 } 2360 }
@@ -2371,9 +2366,9 @@ static int at76_probe(struct usb_interface *interface,
2371 "downloading internal firmware\n"); 2366 "downloading internal firmware\n");
2372 ret = at76_load_internal_fw(udev, fwe); 2367 ret = at76_load_internal_fw(udev, fwe);
2373 if (ret < 0) { 2368 if (ret < 0) {
2374 dev_printk(KERN_ERR, &interface->dev, 2369 dev_err(&interface->dev,
2375 "error %d downloading internal firmware\n", 2370 "error %d downloading internal firmware\n",
2376 ret); 2371 ret);
2377 goto error; 2372 goto error;
2378 } 2373 }
2379 usb_put_dev(udev); 2374 usb_put_dev(udev);
@@ -2408,8 +2403,8 @@ static int at76_probe(struct usb_interface *interface,
2408 /* Re-check firmware version */ 2403 /* Re-check firmware version */
2409 ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv)); 2404 ret = at76_get_mib(udev, MIB_FW_VERSION, &fwv, sizeof(fwv));
2410 if (ret < 0) { 2405 if (ret < 0) {
2411 dev_printk(KERN_ERR, &interface->dev, 2406 dev_err(&interface->dev,
2412 "error %d getting firmware version\n", ret); 2407 "error %d getting firmware version\n", ret);
2413 goto error; 2408 goto error;
2414 } 2409 }
2415 } 2410 }
@@ -2449,7 +2444,7 @@ static void at76_disconnect(struct usb_interface *interface)
2449 2444
2450 wiphy_info(priv->hw->wiphy, "disconnecting\n"); 2445 wiphy_info(priv->hw->wiphy, "disconnecting\n");
2451 at76_delete_device(priv); 2446 at76_delete_device(priv);
2452 dev_printk(KERN_INFO, &interface->dev, "disconnected\n"); 2447 dev_info(&interface->dev, "disconnected\n");
2453} 2448}
2454 2449
2455/* Structure for registering this driver with the USB subsystem */ 2450/* Structure for registering this driver with the USB subsystem */
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 09602241901b..1a67a4f829fe 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,4 +1,7 @@
1menuconfig ATH_COMMON 1config ATH_COMMON
2 tristate
3
4menuconfig ATH_CARDS
2 tristate "Atheros Wireless Cards" 5 tristate "Atheros Wireless Cards"
3 depends on CFG80211 && (!UML || BROKEN) 6 depends on CFG80211 && (!UML || BROKEN)
4 ---help--- 7 ---help---
@@ -14,7 +17,7 @@ menuconfig ATH_COMMON
14 17
15 http://wireless.kernel.org/en/users/Drivers/Atheros 18 http://wireless.kernel.org/en/users/Drivers/Atheros
16 19
17if ATH_COMMON 20if ATH_CARDS
18 21
19config ATH_DEBUG 22config ATH_DEBUG
20 bool "Atheros wireless debugging" 23 bool "Atheros wireless debugging"
@@ -26,5 +29,6 @@ source "drivers/net/wireless/ath/ath5k/Kconfig"
26source "drivers/net/wireless/ath/ath9k/Kconfig" 29source "drivers/net/wireless/ath/ath9k/Kconfig"
27source "drivers/net/wireless/ath/carl9170/Kconfig" 30source "drivers/net/wireless/ath/carl9170/Kconfig"
28source "drivers/net/wireless/ath/ath6kl/Kconfig" 31source "drivers/net/wireless/ath/ath6kl/Kconfig"
32source "drivers/net/wireless/ath/ar5523/Kconfig"
29 33
30endif 34endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index d716b748e574..1e18621326dc 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_ATH5K) += ath5k/
2obj-$(CONFIG_ATH9K_HW) += ath9k/ 2obj-$(CONFIG_ATH9K_HW) += ath9k/
3obj-$(CONFIG_CARL9170) += carl9170/ 3obj-$(CONFIG_CARL9170) += carl9170/
4obj-$(CONFIG_ATH6KL) += ath6kl/ 4obj-$(CONFIG_ATH6KL) += ath6kl/
5obj-$(CONFIG_AR5523) += ar5523/
5 6
6obj-$(CONFIG_ATH_COMMON) += ath.o 7obj-$(CONFIG_ATH_COMMON) += ath.o
7 8
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
new file mode 100644
index 000000000000..0d320cc7769b
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -0,0 +1,8 @@
1config AR5523
2 tristate "Atheros AR5523 wireless driver support"
3 depends on MAC80211 && USB
4 select ATH_COMMON
5 select FW_LOADER
6 ---help---
7 This module add support for AR5523 based USB dongles such as D-Link
8 DWL-G132, Netgear WPN111 and many more.
diff --git a/drivers/net/wireless/ath/ar5523/Makefile b/drivers/net/wireless/ath/ar5523/Makefile
new file mode 100644
index 000000000000..ebf7f3bf0a33
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_AR5523) := ar5523.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
new file mode 100644
index 000000000000..7157f7d311c5
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -0,0 +1,1798 @@
1/*
2 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2006 Sam Leffler, Errno Consulting
4 * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
5 * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
6 * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
7 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/*
22 * This driver is based on the uath driver written by Damien Bergamini for
23 * OpenBSD, who did black-box analysis of the Windows binary driver to find
24 * out how the hardware works. It contains a lot magic numbers because of
25 * that and only has minimal functionality.
26 */
27#include <linux/compiler.h>
28#include <linux/init.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/list.h>
32#include <linux/completion.h>
33#include <linux/firmware.h>
34#include <linux/skbuff.h>
35#include <linux/usb.h>
36#include <net/mac80211.h>
37
38#include "ar5523.h"
39#include "ar5523_hw.h"
40
41/*
42 * Various supported device vendors/products.
43 * UB51: AR5005UG 802.11b/g, UB52: AR5005UX 802.11a/b/g
44 */
45
46static int ar5523_submit_rx_cmd(struct ar5523 *ar);
47static void ar5523_data_tx_pkt_put(struct ar5523 *ar);
48
49static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
50 struct ar5523_tx_cmd *cmd)
51{
52 int dlen, olen;
53 __be32 *rp;
54
55 dlen = be32_to_cpu(hdr->len) - sizeof(*hdr);
56
57 if (dlen < 0) {
58 WARN_ON(1);
59 goto out;
60 }
61
62 ar5523_dbg(ar, "Code = %d len = %d\n", be32_to_cpu(hdr->code) & 0xff,
63 dlen);
64
65 rp = (__be32 *)(hdr + 1);
66 if (dlen >= sizeof(u32)) {
67 olen = be32_to_cpu(rp[0]);
68 dlen -= sizeof(u32);
69 if (olen == 0) {
70 /* convention is 0 =>'s one word */
71 olen = sizeof(u32);
72 }
73 } else
74 olen = 0;
75
76 if (cmd->odata) {
77 if (cmd->olen < olen) {
78 ar5523_err(ar, "olen to small %d < %d\n",
79 cmd->olen, olen);
80 cmd->olen = 0;
81 cmd->res = -EOVERFLOW;
82 } else {
83 cmd->olen = olen;
84 memcpy(cmd->odata, &rp[1], olen);
85 cmd->res = 0;
86 }
87 }
88
89out:
90 complete(&cmd->done);
91}
92
93static void ar5523_cmd_rx_cb(struct urb *urb)
94{
95 struct ar5523 *ar = urb->context;
96 struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
97 struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf;
98 int dlen;
99 u32 code, hdrlen;
100
101 if (urb->status) {
102 if (urb->status != -ESHUTDOWN)
103 ar5523_err(ar, "RX USB error %d.\n", urb->status);
104 goto skip;
105 }
106
107 if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
108 ar5523_err(ar, "RX USB to short.\n");
109 goto skip;
110 }
111
112 ar5523_dbg(ar, "%s code %02x priv %d\n", __func__,
113 be32_to_cpu(hdr->code) & 0xff, hdr->priv);
114
115 code = be32_to_cpu(hdr->code);
116 hdrlen = be32_to_cpu(hdr->len);
117
118 switch (code & 0xff) {
119 default:
120 /* reply to a read command */
121 if (hdr->priv != AR5523_CMD_ID) {
122 ar5523_err(ar, "Unexpected command id: %02x\n",
123 code & 0xff);
124 goto skip;
125 }
126 ar5523_read_reply(ar, hdr, cmd);
127 break;
128
129 case WDCMSG_DEVICE_AVAIL:
130 ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n");
131 cmd->res = 0;
132 cmd->olen = 0;
133 complete(&cmd->done);
134 break;
135
136 case WDCMSG_SEND_COMPLETE:
137 ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n",
138 atomic_read(&ar->tx_nr_pending));
139 if (!test_bit(AR5523_HW_UP, &ar->flags))
140 ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n");
141 else {
142 mod_timer(&ar->tx_wd_timer,
143 jiffies + AR5523_TX_WD_TIMEOUT);
144 ar5523_data_tx_pkt_put(ar);
145
146 }
147 break;
148
149 case WDCMSG_TARGET_START:
150 /* This command returns a bogus id so it needs special
151 handling */
152 dlen = hdrlen - sizeof(*hdr);
153 if (dlen != (int)sizeof(u32)) {
154 ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
155 return;
156 }
157 memcpy(cmd->odata, hdr + 1, sizeof(u32));
158 cmd->olen = sizeof(u32);
159 cmd->res = 0;
160 complete(&cmd->done);
161 break;
162
163 case WDCMSG_STATS_UPDATE:
164 ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n");
165 break;
166 }
167
168skip:
169 ar5523_submit_rx_cmd(ar);
170}
171
172static int ar5523_alloc_rx_cmd(struct ar5523 *ar)
173{
174 ar->rx_cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
175 if (!ar->rx_cmd_urb)
176 return -ENOMEM;
177
178 ar->rx_cmd_buf = usb_alloc_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
179 GFP_KERNEL,
180 &ar->rx_cmd_urb->transfer_dma);
181 if (!ar->rx_cmd_buf) {
182 usb_free_urb(ar->rx_cmd_urb);
183 return -ENOMEM;
184 }
185 return 0;
186}
187
188static void ar5523_cancel_rx_cmd(struct ar5523 *ar)
189{
190 usb_kill_urb(ar->rx_cmd_urb);
191}
192
193static void ar5523_free_rx_cmd(struct ar5523 *ar)
194{
195 usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
196 ar->rx_cmd_buf, ar->rx_cmd_urb->transfer_dma);
197 usb_free_urb(ar->rx_cmd_urb);
198}
199
200static int ar5523_submit_rx_cmd(struct ar5523 *ar)
201{
202 int error;
203
204 usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev,
205 ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf,
206 AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar);
207 ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
208
209 error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC);
210 if (error) {
211 if (error != -ENODEV)
212 ar5523_err(ar, "error %d when submitting rx urb\n",
213 error);
214 return error;
215 }
216 return 0;
217}
218
219/*
220 * Command submitted cb
221 */
222static void ar5523_cmd_tx_cb(struct urb *urb)
223{
224 struct ar5523_tx_cmd *cmd = urb->context;
225 struct ar5523 *ar = cmd->ar;
226
227 if (urb->status) {
228 ar5523_err(ar, "Failed to TX command. Status = %d\n",
229 urb->status);
230 cmd->res = urb->status;
231 complete(&cmd->done);
232 return;
233 }
234
235 if (!(cmd->flags & AR5523_CMD_FLAG_READ)) {
236 cmd->res = 0;
237 complete(&cmd->done);
238 }
239}
240
241static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
242 int ilen, void *odata, int olen, int flags)
243{
244 struct ar5523_cmd_hdr *hdr;
245 struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
246 int xferlen, error;
247
248 /* always bulk-out a multiple of 4 bytes */
249 xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3;
250
251 hdr = (struct ar5523_cmd_hdr *)cmd->buf_tx;
252 memset(hdr, 0, sizeof(struct ar5523_cmd_hdr));
253 hdr->len = cpu_to_be32(xferlen);
254 hdr->code = cpu_to_be32(code);
255 hdr->priv = AR5523_CMD_ID;
256
257 if (flags & AR5523_CMD_FLAG_MAGIC)
258 hdr->magic = cpu_to_be32(1 << 24);
259 memcpy(hdr + 1, idata, ilen);
260
261 cmd->odata = odata;
262 cmd->olen = olen;
263 cmd->flags = flags;
264
265 ar5523_dbg(ar, "do cmd %02x\n", code);
266
267 usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev),
268 cmd->buf_tx, xferlen, ar5523_cmd_tx_cb, cmd);
269 cmd->urb_tx->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
270
271 error = usb_submit_urb(cmd->urb_tx, GFP_KERNEL);
272 if (error) {
273 ar5523_err(ar, "could not send command 0x%x, error=%d\n",
274 code, error);
275 return error;
276 }
277
278 if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
279 cmd->odata = NULL;
280 ar5523_err(ar, "timeout waiting for command %02x reply\n",
281 code);
282 cmd->res = -ETIMEDOUT;
283 }
284 return cmd->res;
285}
286
287static int ar5523_cmd_write(struct ar5523 *ar, u32 code, const void *data,
288 int len, int flags)
289{
290 flags &= ~AR5523_CMD_FLAG_READ;
291 return ar5523_cmd(ar, code, data, len, NULL, 0, flags);
292}
293
294static int ar5523_cmd_read(struct ar5523 *ar, u32 code, const void *idata,
295 int ilen, void *odata, int olen, int flags)
296{
297 flags |= AR5523_CMD_FLAG_READ;
298 return ar5523_cmd(ar, code, idata, ilen, odata, olen, flags);
299}
300
301static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val)
302{
303 struct ar5523_write_mac write;
304 int error;
305
306 write.reg = cpu_to_be32(reg);
307 write.len = cpu_to_be32(0); /* 0 = single write */
308 *(__be32 *)write.data = cpu_to_be32(val);
309
310 error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
311 3 * sizeof(u32), 0);
312 if (error != 0)
313 ar5523_err(ar, "could not write register 0x%02x\n", reg);
314 return error;
315}
316
317static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data,
318 int len)
319{
320 struct ar5523_write_mac write;
321 int error;
322
323 write.reg = cpu_to_be32(reg);
324 write.len = cpu_to_be32(len);
325 memcpy(write.data, data, len);
326
327 /* properly handle the case where len is zero (reset) */
328 error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
329 (len == 0) ? sizeof(u32) : 2 * sizeof(u32) + len, 0);
330 if (error != 0)
331 ar5523_err(ar, "could not write %d bytes to register 0x%02x\n",
332 len, reg);
333 return error;
334}
335
336static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata,
337 int olen)
338{
339 int error;
340 __be32 which_be;
341
342 which_be = cpu_to_be32(which);
343 error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS,
344 &which_be, sizeof(which_be), odata, olen, AR5523_CMD_FLAG_MAGIC);
345 if (error != 0)
346 ar5523_err(ar, "could not read EEPROM offset 0x%02x\n", which);
347 return error;
348}
349
350static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val)
351{
352 int error;
353 __be32 cap_be, val_be;
354
355 cap_be = cpu_to_be32(cap);
356 error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY, &cap_be,
357 sizeof(cap_be), &val_be, sizeof(__be32),
358 AR5523_CMD_FLAG_MAGIC);
359 if (error != 0) {
360 ar5523_err(ar, "could not read capability %u\n", cap);
361 return error;
362 }
363 *val = be32_to_cpu(val_be);
364 return error;
365}
366
367static int ar5523_get_devcap(struct ar5523 *ar)
368{
369#define GETCAP(x) do { \
370 error = ar5523_get_capability(ar, x, &cap); \
371 if (error != 0) \
372 return error; \
373 ar5523_info(ar, "Cap: " \
374 "%s=0x%08x\n", #x, cap); \
375} while (0)
376 int error;
377 u32 cap;
378
379 /* collect device capabilities */
380 GETCAP(CAP_TARGET_VERSION);
381 GETCAP(CAP_TARGET_REVISION);
382 GETCAP(CAP_MAC_VERSION);
383 GETCAP(CAP_MAC_REVISION);
384 GETCAP(CAP_PHY_REVISION);
385 GETCAP(CAP_ANALOG_5GHz_REVISION);
386 GETCAP(CAP_ANALOG_2GHz_REVISION);
387
388 GETCAP(CAP_REG_DOMAIN);
389 GETCAP(CAP_REG_CAP_BITS);
390 GETCAP(CAP_WIRELESS_MODES);
391 GETCAP(CAP_CHAN_SPREAD_SUPPORT);
392 GETCAP(CAP_COMPRESS_SUPPORT);
393 GETCAP(CAP_BURST_SUPPORT);
394 GETCAP(CAP_FAST_FRAMES_SUPPORT);
395 GETCAP(CAP_CHAP_TUNING_SUPPORT);
396 GETCAP(CAP_TURBOG_SUPPORT);
397 GETCAP(CAP_TURBO_PRIME_SUPPORT);
398 GETCAP(CAP_DEVICE_TYPE);
399 GETCAP(CAP_WME_SUPPORT);
400 GETCAP(CAP_TOTAL_QUEUES);
401 GETCAP(CAP_CONNECTION_ID_MAX);
402
403 GETCAP(CAP_LOW_5GHZ_CHAN);
404 GETCAP(CAP_HIGH_5GHZ_CHAN);
405 GETCAP(CAP_LOW_2GHZ_CHAN);
406 GETCAP(CAP_HIGH_2GHZ_CHAN);
407 GETCAP(CAP_TWICE_ANTENNAGAIN_5G);
408 GETCAP(CAP_TWICE_ANTENNAGAIN_2G);
409
410 GETCAP(CAP_CIPHER_AES_CCM);
411 GETCAP(CAP_CIPHER_TKIP);
412 GETCAP(CAP_MIC_TKIP);
413 return 0;
414}
415
416static int ar5523_set_ledsteady(struct ar5523 *ar, int lednum, int ledmode)
417{
418 struct ar5523_cmd_ledsteady led;
419
420 led.lednum = cpu_to_be32(lednum);
421 led.ledmode = cpu_to_be32(ledmode);
422
423 ar5523_dbg(ar, "set %s led %s (steady)\n",
424 (lednum == UATH_LED_LINK) ? "link" : "activity",
425 ledmode ? "on" : "off");
426 return ar5523_cmd_write(ar, WDCMSG_SET_LED_STEADY, &led, sizeof(led),
427 0);
428}
429
430static int ar5523_set_rxfilter(struct ar5523 *ar, u32 bits, u32 op)
431{
432 struct ar5523_cmd_rx_filter rxfilter;
433
434 rxfilter.bits = cpu_to_be32(bits);
435 rxfilter.op = cpu_to_be32(op);
436
437 ar5523_dbg(ar, "setting Rx filter=0x%x flags=0x%x\n", bits, op);
438 return ar5523_cmd_write(ar, WDCMSG_RX_FILTER, &rxfilter,
439 sizeof(rxfilter), 0);
440}
441
442static int ar5523_reset_tx_queues(struct ar5523 *ar)
443{
444 __be32 qid = cpu_to_be32(0);
445
446 ar5523_dbg(ar, "resetting Tx queue\n");
447 return ar5523_cmd_write(ar, WDCMSG_RELEASE_TX_QUEUE,
448 &qid, sizeof(qid), 0);
449}
450
451static int ar5523_set_chan(struct ar5523 *ar)
452{
453 struct ieee80211_conf *conf = &ar->hw->conf;
454
455 struct ar5523_cmd_reset reset;
456
457 memset(&reset, 0, sizeof(reset));
458 reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ);
459 reset.flags |= cpu_to_be32(UATH_CHAN_OFDM);
460 reset.freq = cpu_to_be32(conf->channel->center_freq);
461 reset.maxrdpower = cpu_to_be32(50); /* XXX */
462 reset.channelchange = cpu_to_be32(1);
463 reset.keeprccontent = cpu_to_be32(0);
464
465 ar5523_dbg(ar, "set chan flags 0x%x freq %d\n",
466 be32_to_cpu(reset.flags),
467 conf->channel->center_freq);
468 return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0);
469}
470
471static int ar5523_queue_init(struct ar5523 *ar)
472{
473 struct ar5523_cmd_txq_setup qinfo;
474
475 ar5523_dbg(ar, "setting up Tx queue\n");
476 qinfo.qid = cpu_to_be32(0);
477 qinfo.len = cpu_to_be32(sizeof(qinfo.attr));
478 qinfo.attr.priority = cpu_to_be32(0); /* XXX */
479 qinfo.attr.aifs = cpu_to_be32(3);
480 qinfo.attr.logcwmin = cpu_to_be32(4);
481 qinfo.attr.logcwmax = cpu_to_be32(10);
482 qinfo.attr.bursttime = cpu_to_be32(0);
483 qinfo.attr.mode = cpu_to_be32(0);
484 qinfo.attr.qflags = cpu_to_be32(1); /* XXX? */
485 return ar5523_cmd_write(ar, WDCMSG_SETUP_TX_QUEUE, &qinfo,
486 sizeof(qinfo), 0);
487}
488
489static int ar5523_switch_chan(struct ar5523 *ar)
490{
491 int error;
492
493 error = ar5523_set_chan(ar);
494 if (error) {
495 ar5523_err(ar, "could not set chan, error %d\n", error);
496 goto out_err;
497 }
498
499 /* reset Tx rings */
500 error = ar5523_reset_tx_queues(ar);
501 if (error) {
502 ar5523_err(ar, "could not reset Tx queues, error %d\n",
503 error);
504 goto out_err;
505 }
506 /* set Tx rings WME properties */
507 error = ar5523_queue_init(ar);
508 if (error)
509 ar5523_err(ar, "could not init wme, error %d\n", error);
510
511out_err:
512 return error;
513}
514
515static void ar5523_rx_data_put(struct ar5523 *ar,
516 struct ar5523_rx_data *data)
517{
518 unsigned long flags;
519 spin_lock_irqsave(&ar->rx_data_list_lock, flags);
520 list_move(&data->list, &ar->rx_data_free);
521 spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
522}
523
524static void ar5523_data_rx_cb(struct urb *urb)
525{
526 struct ar5523_rx_data *data = urb->context;
527 struct ar5523 *ar = data->ar;
528 struct ar5523_rx_desc *desc;
529 struct ar5523_chunk *chunk;
530 struct ieee80211_hw *hw = ar->hw;
531 struct ieee80211_rx_status *rx_status;
532 u32 rxlen;
533 int usblen = urb->actual_length;
534 int hdrlen, pad;
535
536 ar5523_dbg(ar, "%s\n", __func__);
537 /* sync/async unlink faults aren't errors */
538 if (urb->status) {
539 if (urb->status != -ESHUTDOWN)
540 ar5523_err(ar, "%s: USB err: %d\n", __func__,
541 urb->status);
542 goto skip;
543 }
544
545 if (usblen < AR5523_MIN_RXBUFSZ) {
546 ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen);
547 goto skip;
548 }
549
550 chunk = (struct ar5523_chunk *) data->skb->data;
551
552 if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) ||
553 chunk->seqnum != 0) {
554 ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n",
555 chunk->seqnum, chunk->flags,
556 be16_to_cpu(chunk->length));
557 goto skip;
558 }
559
560 /* Rx descriptor is located at the end, 32-bit aligned */
561 desc = (struct ar5523_rx_desc *)
562 (data->skb->data + usblen - sizeof(struct ar5523_rx_desc));
563
564 rxlen = be32_to_cpu(desc->len);
565 if (rxlen > ar->rxbufsz) {
566 ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n",
567 be32_to_cpu(desc->len));
568 goto skip;
569 }
570
571 if (!rxlen) {
572 ar5523_dbg(ar, "RX: rxlen is 0\n");
573 goto skip;
574 }
575
576 if (be32_to_cpu(desc->status) != 0) {
577 ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n",
578 be32_to_cpu(desc->status), be32_to_cpu(desc->len));
579 goto skip;
580 }
581
582 skb_reserve(data->skb, sizeof(*chunk));
583 skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));
584
585 hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
586 if (!IS_ALIGNED(hdrlen, 4)) {
587 ar5523_dbg(ar, "eek, alignment workaround activated\n");
588 pad = ALIGN(hdrlen, 4) - hdrlen;
589 memmove(data->skb->data + pad, data->skb->data, hdrlen);
590 skb_pull(data->skb, pad);
591 skb_put(data->skb, pad);
592 }
593
594 rx_status = IEEE80211_SKB_RXCB(data->skb);
595 memset(rx_status, 0, sizeof(*rx_status));
596 rx_status->freq = be32_to_cpu(desc->channel);
597 rx_status->band = hw->conf.channel->band;
598 rx_status->signal = -95 + be32_to_cpu(desc->rssi);
599
600 ieee80211_rx_irqsafe(hw, data->skb);
601 data->skb = NULL;
602
603skip:
604 if (data->skb) {
605 dev_kfree_skb_irq(data->skb);
606 data->skb = NULL;
607 }
608
609 ar5523_rx_data_put(ar, data);
610 if (atomic_inc_return(&ar->rx_data_free_cnt) >=
611 AR5523_RX_DATA_REFILL_COUNT &&
612 test_bit(AR5523_HW_UP, &ar->flags))
613 queue_work(ar->wq, &ar->rx_refill_work);
614}
615
616static void ar5523_rx_refill_work(struct work_struct *work)
617{
618 struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work);
619 struct ar5523_rx_data *data;
620 unsigned long flags;
621 int error;
622
623 ar5523_dbg(ar, "%s\n", __func__);
624 do {
625 spin_lock_irqsave(&ar->rx_data_list_lock, flags);
626
627 if (!list_empty(&ar->rx_data_free))
628 data = (struct ar5523_rx_data *) ar->rx_data_free.next;
629 else
630 data = NULL;
631 spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
632
633 if (!data)
634 goto done;
635
636 data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL);
637 if (!data->skb) {
638 ar5523_err(ar, "could not allocate rx skbuff\n");
639 return;
640 }
641
642 usb_fill_bulk_urb(data->urb, ar->dev,
643 ar5523_data_rx_pipe(ar->dev), data->skb->data,
644 ar->rxbufsz, ar5523_data_rx_cb, data);
645
646 spin_lock_irqsave(&ar->rx_data_list_lock, flags);
647 list_move(&data->list, &ar->rx_data_used);
648 spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
649 atomic_dec(&ar->rx_data_free_cnt);
650
651 error = usb_submit_urb(data->urb, GFP_KERNEL);
652 if (error) {
653 kfree_skb(data->skb);
654 if (error != -ENODEV)
655 ar5523_err(ar, "Err sending rx data urb %d\n",
656 error);
657 ar5523_rx_data_put(ar, data);
658 atomic_inc(&ar->rx_data_free_cnt);
659 return;
660 }
661
662 } while (true);
663done:
664 return;
665}
666
667static void ar5523_cancel_rx_bufs(struct ar5523 *ar)
668{
669 struct ar5523_rx_data *data;
670 unsigned long flags;
671
672 do {
673 spin_lock_irqsave(&ar->rx_data_list_lock, flags);
674 if (!list_empty(&ar->rx_data_used))
675 data = (struct ar5523_rx_data *) ar->rx_data_used.next;
676 else
677 data = NULL;
678 spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
679
680 if (!data)
681 break;
682
683 usb_kill_urb(data->urb);
684 list_move(&data->list, &ar->rx_data_free);
685 atomic_inc(&ar->rx_data_free_cnt);
686 } while (data);
687}
688
689static void ar5523_free_rx_bufs(struct ar5523 *ar)
690{
691 struct ar5523_rx_data *data;
692
693 ar5523_cancel_rx_bufs(ar);
694 while (!list_empty(&ar->rx_data_free)) {
695 data = (struct ar5523_rx_data *) ar->rx_data_free.next;
696 list_del(&data->list);
697 usb_free_urb(data->urb);
698 }
699}
700
701static int ar5523_alloc_rx_bufs(struct ar5523 *ar)
702{
703 int i;
704
705 for (i = 0; i < AR5523_RX_DATA_COUNT; i++) {
706 struct ar5523_rx_data *data = &ar->rx_data[i];
707
708 data->ar = ar;
709 data->urb = usb_alloc_urb(0, GFP_KERNEL);
710 if (!data->urb) {
711 ar5523_err(ar, "could not allocate rx data urb\n");
712 goto err;
713 }
714 list_add_tail(&data->list, &ar->rx_data_free);
715 atomic_inc(&ar->rx_data_free_cnt);
716 }
717 return 0;
718
719err:
720 ar5523_free_rx_bufs(ar);
721 return -ENOMEM;
722}
723
724static void ar5523_data_tx_pkt_put(struct ar5523 *ar)
725{
726 atomic_dec(&ar->tx_nr_total);
727 if (!atomic_dec_return(&ar->tx_nr_pending)) {
728 del_timer(&ar->tx_wd_timer);
729 wake_up(&ar->tx_flush_waitq);
730 }
731
732 if (atomic_read(&ar->tx_nr_total) < AR5523_TX_DATA_RESTART_COUNT) {
733 ar5523_dbg(ar, "restart tx queue\n");
734 ieee80211_wake_queues(ar->hw);
735 }
736}
737
738static void ar5523_data_tx_cb(struct urb *urb)
739{
740 struct sk_buff *skb = urb->context;
741 struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
742 struct ar5523_tx_data *data = (struct ar5523_tx_data *)
743 txi->driver_data;
744 struct ar5523 *ar = data->ar;
745 unsigned long flags;
746
747 ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status);
748
749 spin_lock_irqsave(&ar->tx_data_list_lock, flags);
750 list_del(&data->list);
751 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
752
753 if (urb->status) {
754 ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status);
755 ar5523_data_tx_pkt_put(ar);
756 ieee80211_free_txskb(ar->hw, skb);
757 } else {
758 skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32));
759 ieee80211_tx_status_irqsafe(ar->hw, skb);
760 }
761 usb_free_urb(urb);
762}
763
764static void ar5523_tx(struct ieee80211_hw *hw,
765 struct ieee80211_tx_control *control,
766 struct sk_buff *skb)
767{
768 struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
769 struct ar5523_tx_data *data = (struct ar5523_tx_data *)
770 txi->driver_data;
771 struct ar5523 *ar = hw->priv;
772 unsigned long flags;
773
774 ar5523_dbg(ar, "tx called\n");
775 if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) {
776 ar5523_dbg(ar, "tx queue full\n");
777 ar5523_dbg(ar, "stop queues (tot %d pend %d)\n",
778 atomic_read(&ar->tx_nr_total),
779 atomic_read(&ar->tx_nr_pending));
780 ieee80211_stop_queues(hw);
781 }
782
783 data->skb = skb;
784
785 spin_lock_irqsave(&ar->tx_data_list_lock, flags);
786 list_add_tail(&data->list, &ar->tx_queue_pending);
787 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
788
789 ieee80211_queue_work(ar->hw, &ar->tx_work);
790}
791
792static void ar5523_tx_work_locked(struct ar5523 *ar)
793{
794 struct ar5523_tx_data *data;
795 struct ar5523_tx_desc *desc;
796 struct ar5523_chunk *chunk;
797 struct ieee80211_tx_info *txi;
798 struct urb *urb;
799 struct sk_buff *skb;
800 int error = 0, paylen;
801 u32 txqid;
802 unsigned long flags;
803
804 BUILD_BUG_ON(sizeof(struct ar5523_tx_data) >
805 IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
806
807 ar5523_dbg(ar, "%s\n", __func__);
808 do {
809 spin_lock_irqsave(&ar->tx_data_list_lock, flags);
810 if (!list_empty(&ar->tx_queue_pending)) {
811 data = (struct ar5523_tx_data *)
812 ar->tx_queue_pending.next;
813 list_del(&data->list);
814 } else
815 data = NULL;
816 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
817
818 if (!data)
819 break;
820
821 skb = data->skb;
822 txqid = 0;
823 txi = IEEE80211_SKB_CB(skb);
824 paylen = skb->len;
825 urb = usb_alloc_urb(0, GFP_KERNEL);
826 if (!urb) {
827 ar5523_err(ar, "Failed to allocate TX urb\n");
828 ieee80211_free_txskb(ar->hw, skb);
829 continue;
830 }
831
832 data->ar = ar;
833 data->urb = urb;
834
835 desc = (struct ar5523_tx_desc *)skb_push(skb, sizeof(*desc));
836 chunk = (struct ar5523_chunk *)skb_push(skb, sizeof(*chunk));
837
838 chunk->seqnum = 0;
839 chunk->flags = UATH_CFLAGS_FINAL;
840 chunk->length = cpu_to_be16(skb->len);
841
842 desc->msglen = cpu_to_be32(skb->len);
843 desc->msgid = AR5523_DATA_ID;
844 desc->buflen = cpu_to_be32(paylen);
845 desc->type = cpu_to_be32(WDCMSG_SEND);
846 desc->flags = cpu_to_be32(UATH_TX_NOTIFY);
847
848 if (test_bit(AR5523_CONNECTED, &ar->flags))
849 desc->connid = cpu_to_be32(AR5523_ID_BSS);
850 else
851 desc->connid = cpu_to_be32(AR5523_ID_BROADCAST);
852
853 if (txi->flags & IEEE80211_TX_CTL_USE_MINRATE)
854 txqid |= UATH_TXQID_MINRATE;
855
856 desc->txqid = cpu_to_be32(txqid);
857
858 urb->transfer_flags = URB_ZERO_PACKET;
859 usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev),
860 skb->data, skb->len, ar5523_data_tx_cb, skb);
861
862 spin_lock_irqsave(&ar->tx_data_list_lock, flags);
863 list_add_tail(&data->list, &ar->tx_queue_submitted);
864 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
865 mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT);
866 atomic_inc(&ar->tx_nr_pending);
867
868 ar5523_dbg(ar, "TX Frame (%d pending)\n",
869 atomic_read(&ar->tx_nr_pending));
870 error = usb_submit_urb(urb, GFP_KERNEL);
871 if (error) {
872 ar5523_err(ar, "error %d when submitting tx urb\n",
873 error);
874 spin_lock_irqsave(&ar->tx_data_list_lock, flags);
875 list_del(&data->list);
876 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
877 atomic_dec(&ar->tx_nr_pending);
878 ar5523_data_tx_pkt_put(ar);
879 usb_free_urb(urb);
880 ieee80211_free_txskb(ar->hw, skb);
881 }
882 } while (true);
883}
884
885static void ar5523_tx_work(struct work_struct *work)
886{
887 struct ar5523 *ar = container_of(work, struct ar5523, tx_work);
888
889 ar5523_dbg(ar, "%s\n", __func__);
890 mutex_lock(&ar->mutex);
891 ar5523_tx_work_locked(ar);
892 mutex_unlock(&ar->mutex);
893}
894
895static void ar5523_tx_wd_timer(unsigned long arg)
896{
897 struct ar5523 *ar = (struct ar5523 *) arg;
898
899 ar5523_dbg(ar, "TX watchdog timer triggered\n");
900 ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
901}
902
903static void ar5523_tx_wd_work(struct work_struct *work)
904{
905 struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work);
906
907 /* Occasionally the TX queues stop responding. The only way to
908 * recover seems to be to reset the dongle.
909 */
910
911 mutex_lock(&ar->mutex);
912 ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n",
913 atomic_read(&ar->tx_nr_total),
914 atomic_read(&ar->tx_nr_pending));
915
916 ar5523_err(ar, "Will restart dongle.\n");
917 ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0);
918 mutex_unlock(&ar->mutex);
919}
920
921static void ar5523_flush_tx(struct ar5523 *ar)
922{
923 ar5523_tx_work_locked(ar);
924
925 /* Don't waste time trying to flush if USB is disconnected */
926 if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags))
927 return;
928 if (!wait_event_timeout(ar->tx_flush_waitq,
929 !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT))
930 ar5523_err(ar, "flush timeout (tot %d pend %d)\n",
931 atomic_read(&ar->tx_nr_total),
932 atomic_read(&ar->tx_nr_pending));
933}
934
935static void ar5523_free_tx_cmd(struct ar5523 *ar)
936{
937 struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
938
939 usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, cmd->buf_tx,
940 cmd->urb_tx->transfer_dma);
941 usb_free_urb(cmd->urb_tx);
942}
943
944static int ar5523_alloc_tx_cmd(struct ar5523 *ar)
945{
946 struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
947
948 cmd->ar = ar;
949 init_completion(&cmd->done);
950
951 cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL);
952 if (!cmd->urb_tx) {
953 ar5523_err(ar, "could not allocate urb\n");
954 return -ENOMEM;
955 }
956 cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ,
957 GFP_KERNEL,
958 &cmd->urb_tx->transfer_dma);
959 if (!cmd->buf_tx) {
960 usb_free_urb(cmd->urb_tx);
961 return -ENOMEM;
962 }
963 return 0;
964}
965
966/*
967 * This function is called periodically (every second) when associated to
968 * query device statistics.
969 */
970static void ar5523_stat_work(struct work_struct *work)
971{
972 struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work);
973 int error;
974
975 ar5523_dbg(ar, "%s\n", __func__);
976 mutex_lock(&ar->mutex);
977
978 /*
979 * Send request for statistics asynchronously once a second. This
980 * seems to be important. Throughput is a lot better if this is done.
981 */
982 error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0);
983 if (error)
984 ar5523_err(ar, "could not query stats, error %d\n", error);
985 mutex_unlock(&ar->mutex);
986 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ);
987}
988
989/*
990 * Interface routines to the mac80211 stack.
991 */
992static int ar5523_start(struct ieee80211_hw *hw)
993{
994 struct ar5523 *ar = hw->priv;
995 int error;
996 __be32 val;
997
998 ar5523_dbg(ar, "start called\n");
999
1000 mutex_lock(&ar->mutex);
1001 val = cpu_to_be32(0);
1002 ar5523_cmd_write(ar, WDCMSG_BIND, &val, sizeof(val), 0);
1003
1004 /* set MAC address */
1005 ar5523_config_multi(ar, CFG_MAC_ADDR, &ar->hw->wiphy->perm_addr,
1006 ETH_ALEN);
1007
1008 /* XXX honor net80211 state */
1009 ar5523_config(ar, CFG_RATE_CONTROL_ENABLE, 0x00000001);
1010 ar5523_config(ar, CFG_DIVERSITY_CTL, 0x00000001);
1011 ar5523_config(ar, CFG_ABOLT, 0x0000003f);
1012 ar5523_config(ar, CFG_WME_ENABLED, 0x00000000);
1013
1014 ar5523_config(ar, CFG_SERVICE_TYPE, 1);
1015 ar5523_config(ar, CFG_TP_SCALE, 0x00000000);
1016 ar5523_config(ar, CFG_TPC_HALF_DBM5, 0x0000003c);
1017 ar5523_config(ar, CFG_TPC_HALF_DBM2, 0x0000003c);
1018 ar5523_config(ar, CFG_OVERRD_TX_POWER, 0x00000000);
1019 ar5523_config(ar, CFG_GMODE_PROTECTION, 0x00000000);
1020 ar5523_config(ar, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003);
1021 ar5523_config(ar, CFG_PROTECTION_TYPE, 0x00000000);
1022 ar5523_config(ar, CFG_MODE_CTS, 0x00000002);
1023
1024 error = ar5523_cmd_read(ar, WDCMSG_TARGET_START, NULL, 0,
1025 &val, sizeof(val), AR5523_CMD_FLAG_MAGIC);
1026 if (error) {
1027 ar5523_dbg(ar, "could not start target, error %d\n", error);
1028 goto err;
1029 }
1030 ar5523_dbg(ar, "WDCMSG_TARGET_START returns handle: 0x%x\n",
1031 be32_to_cpu(val));
1032
1033 ar5523_switch_chan(ar);
1034
1035 val = cpu_to_be32(TARGET_DEVICE_AWAKE);
1036 ar5523_cmd_write(ar, WDCMSG_SET_PWR_MODE, &val, sizeof(val), 0);
1037 /* XXX? check */
1038 ar5523_cmd_write(ar, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0);
1039
1040 set_bit(AR5523_HW_UP, &ar->flags);
1041 queue_work(ar->wq, &ar->rx_refill_work);
1042
1043 /* enable Rx */
1044 ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
1045 ar5523_set_rxfilter(ar,
1046 UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
1047 UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON,
1048 UATH_FILTER_OP_SET);
1049
1050 ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_ON);
1051 ar5523_dbg(ar, "start OK\n");
1052
1053err:
1054 mutex_unlock(&ar->mutex);
1055 return error;
1056}
1057
1058static void ar5523_stop(struct ieee80211_hw *hw)
1059{
1060 struct ar5523 *ar = hw->priv;
1061
1062 ar5523_dbg(ar, "stop called\n");
1063
1064 cancel_delayed_work_sync(&ar->stat_work);
1065 mutex_lock(&ar->mutex);
1066 clear_bit(AR5523_HW_UP, &ar->flags);
1067
1068 ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
1069 ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_OFF);
1070
1071 ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0);
1072
1073 del_timer_sync(&ar->tx_wd_timer);
1074 cancel_work_sync(&ar->tx_wd_work);
1075 cancel_work_sync(&ar->rx_refill_work);
1076 ar5523_cancel_rx_bufs(ar);
1077 mutex_unlock(&ar->mutex);
1078}
1079
1080static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1081{
1082 struct ar5523 *ar = hw->priv;
1083 int ret;
1084
1085 ar5523_dbg(ar, "set_rts_threshold called\n");
1086 mutex_lock(&ar->mutex);
1087
1088 ret = ar5523_config(ar, CFG_USER_RTS_THRESHOLD, value);
1089
1090 mutex_unlock(&ar->mutex);
1091 return ret;
1092}
1093
1094static void ar5523_flush(struct ieee80211_hw *hw, bool drop)
1095{
1096 struct ar5523 *ar = hw->priv;
1097
1098 ar5523_dbg(ar, "flush called\n");
1099 ar5523_flush_tx(ar);
1100}
1101
1102static int ar5523_add_interface(struct ieee80211_hw *hw,
1103 struct ieee80211_vif *vif)
1104{
1105 struct ar5523 *ar = hw->priv;
1106
1107 ar5523_dbg(ar, "add interface called\n");
1108
1109 if (ar->vif) {
1110 ar5523_dbg(ar, "invalid add_interface\n");
1111 return -EOPNOTSUPP;
1112 }
1113
1114 switch (vif->type) {
1115 case NL80211_IFTYPE_STATION:
1116 ar->vif = vif;
1117 break;
1118 default:
1119 return -EOPNOTSUPP;
1120 }
1121 return 0;
1122}
1123
1124static void ar5523_remove_interface(struct ieee80211_hw *hw,
1125 struct ieee80211_vif *vif)
1126{
1127 struct ar5523 *ar = hw->priv;
1128
1129 ar5523_dbg(ar, "remove interface called\n");
1130 ar->vif = NULL;
1131}
1132
1133static int ar5523_hwconfig(struct ieee80211_hw *hw, u32 changed)
1134{
1135 struct ar5523 *ar = hw->priv;
1136
1137 ar5523_dbg(ar, "config called\n");
1138 mutex_lock(&ar->mutex);
1139 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1140 ar5523_dbg(ar, "Do channel switch\n");
1141 ar5523_flush_tx(ar);
1142 ar5523_switch_chan(ar);
1143 }
1144 mutex_unlock(&ar->mutex);
1145 return 0;
1146}
1147
1148static int ar5523_get_wlan_mode(struct ar5523 *ar,
1149 struct ieee80211_bss_conf *bss_conf)
1150{
1151 struct ieee80211_supported_band *band;
1152 int bit;
1153 struct ieee80211_sta *sta;
1154 u32 sta_rate_set;
1155
1156 band = ar->hw->wiphy->bands[ar->hw->conf.channel->band];
1157 sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
1158 if (!sta) {
1159 ar5523_info(ar, "STA not found!\n");
1160 return WLAN_MODE_11b;
1161 }
1162 sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band];
1163
1164 for (bit = 0; bit < band->n_bitrates; bit++) {
1165 if (sta_rate_set & 1) {
1166 int rate = band->bitrates[bit].bitrate;
1167 switch (rate) {
1168 case 60:
1169 case 90:
1170 case 120:
1171 case 180:
1172 case 240:
1173 case 360:
1174 case 480:
1175 case 540:
1176 return WLAN_MODE_11g;
1177 }
1178 }
1179 sta_rate_set >>= 1;
1180 }
1181 return WLAN_MODE_11b;
1182}
1183
1184static void ar5523_create_rateset(struct ar5523 *ar,
1185 struct ieee80211_bss_conf *bss_conf,
1186 struct ar5523_cmd_rateset *rs,
1187 bool basic)
1188{
1189 struct ieee80211_supported_band *band;
1190 struct ieee80211_sta *sta;
1191 int bit, i = 0;
1192 u32 sta_rate_set, basic_rate_set;
1193
1194 sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
1195 basic_rate_set = bss_conf->basic_rates;
1196 if (!sta) {
1197 ar5523_info(ar, "STA not found. Cannot set rates\n");
1198 sta_rate_set = bss_conf->basic_rates;
1199 } else
1200 sta_rate_set = sta->supp_rates[ar->hw->conf.channel->band];
1201
1202 ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
1203
1204 band = ar->hw->wiphy->bands[ar->hw->conf.channel->band];
1205 for (bit = 0; bit < band->n_bitrates; bit++) {
1206 BUG_ON(i >= AR5523_MAX_NRATES);
1207 ar5523_dbg(ar, "Considering rate %d : %d\n",
1208 band->bitrates[bit].hw_value, sta_rate_set & 1);
1209 if (sta_rate_set & 1) {
1210 rs->set[i] = band->bitrates[bit].hw_value;
1211 if (basic_rate_set & 1 && basic)
1212 rs->set[i] |= 0x80;
1213 i++;
1214 }
1215 sta_rate_set >>= 1;
1216 basic_rate_set >>= 1;
1217 }
1218
1219 rs->length = i;
1220}
1221
1222static int ar5523_set_basic_rates(struct ar5523 *ar,
1223 struct ieee80211_bss_conf *bss)
1224{
1225 struct ar5523_cmd_rates rates;
1226
1227 memset(&rates, 0, sizeof(rates));
1228 rates.connid = cpu_to_be32(2); /* XXX */
1229 rates.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
1230 ar5523_create_rateset(ar, bss, &rates.rateset, true);
1231
1232 return ar5523_cmd_write(ar, WDCMSG_SET_BASIC_RATE, &rates,
1233 sizeof(rates), 0);
1234}
1235
1236static int ar5523_create_connection(struct ar5523 *ar,
1237 struct ieee80211_vif *vif,
1238 struct ieee80211_bss_conf *bss)
1239{
1240 struct ar5523_cmd_create_connection create;
1241 int wlan_mode;
1242
1243 memset(&create, 0, sizeof(create));
1244 create.connid = cpu_to_be32(2);
1245 create.bssid = cpu_to_be32(0);
1246 /* XXX packed or not? */
1247 create.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
1248
1249 ar5523_create_rateset(ar, bss, &create.connattr.rateset, false);
1250
1251 wlan_mode = ar5523_get_wlan_mode(ar, bss);
1252 create.connattr.wlanmode = cpu_to_be32(wlan_mode);
1253
1254 return ar5523_cmd_write(ar, WDCMSG_CREATE_CONNECTION, &create,
1255 sizeof(create), 0);
1256}
1257
1258static int ar5523_write_associd(struct ar5523 *ar,
1259 struct ieee80211_bss_conf *bss)
1260{
1261 struct ar5523_cmd_set_associd associd;
1262
1263 memset(&associd, 0, sizeof(associd));
1264 associd.defaultrateix = cpu_to_be32(0); /* XXX */
1265 associd.associd = cpu_to_be32(bss->aid);
1266 associd.timoffset = cpu_to_be32(0x3b); /* XXX */
1267 memcpy(associd.bssid, bss->bssid, ETH_ALEN);
1268 return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd,
1269 sizeof(associd), 0);
1270}
1271
1272static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
1273 struct ieee80211_vif *vif,
1274 struct ieee80211_bss_conf *bss,
1275 u32 changed)
1276{
1277 struct ar5523 *ar = hw->priv;
1278 int error;
1279
1280 ar5523_dbg(ar, "bss_info_changed called\n");
1281 mutex_lock(&ar->mutex);
1282
1283 if (!(changed & BSS_CHANGED_ASSOC))
1284 goto out_unlock;
1285
1286 if (bss->assoc) {
1287 error = ar5523_create_connection(ar, vif, bss);
1288 if (error) {
1289 ar5523_err(ar, "could not create connection\n");
1290 goto out_unlock;
1291 }
1292
1293 error = ar5523_set_basic_rates(ar, bss);
1294 if (error) {
1295 ar5523_err(ar, "could not set negotiated rate set\n");
1296 goto out_unlock;
1297 }
1298
1299 error = ar5523_write_associd(ar, bss);
1300 if (error) {
1301 ar5523_err(ar, "could not set association\n");
1302 goto out_unlock;
1303 }
1304
1305 /* turn link LED on */
1306 ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON);
1307 set_bit(AR5523_CONNECTED, &ar->flags);
1308 ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ);
1309
1310 } else {
1311 cancel_delayed_work(&ar->stat_work);
1312 clear_bit(AR5523_CONNECTED, &ar->flags);
1313 ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
1314 }
1315
1316out_unlock:
1317 mutex_unlock(&ar->mutex);
1318
1319}
1320
1321#define AR5523_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
1322 FIF_ALLMULTI | \
1323 FIF_FCSFAIL | \
1324 FIF_OTHER_BSS)
1325
1326static void ar5523_configure_filter(struct ieee80211_hw *hw,
1327 unsigned int changed_flags,
1328 unsigned int *total_flags,
1329 u64 multicast)
1330{
1331 struct ar5523 *ar = hw->priv;
1332 u32 filter = 0;
1333
1334 ar5523_dbg(ar, "configure_filter called\n");
1335 mutex_lock(&ar->mutex);
1336 ar5523_flush_tx(ar);
1337
1338 *total_flags &= AR5523_SUPPORTED_FILTERS;
1339
1340 /* The filters seems strange. UATH_FILTER_RX_BCAST and
1341 * UATH_FILTER_RX_MCAST does not result in those frames being RXed.
1342 * The only way I have found to get [mb]cast frames seems to be
1343 * to set UATH_FILTER_RX_PROM. */
1344 filter |= UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
1345 UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON |
1346 UATH_FILTER_RX_PROM;
1347
1348 ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
1349 ar5523_set_rxfilter(ar, filter, UATH_FILTER_OP_SET);
1350
1351 mutex_unlock(&ar->mutex);
1352}
1353
1354static const struct ieee80211_ops ar5523_ops = {
1355 .start = ar5523_start,
1356 .stop = ar5523_stop,
1357 .tx = ar5523_tx,
1358 .set_rts_threshold = ar5523_set_rts_threshold,
1359 .add_interface = ar5523_add_interface,
1360 .remove_interface = ar5523_remove_interface,
1361 .config = ar5523_hwconfig,
1362 .bss_info_changed = ar5523_bss_info_changed,
1363 .configure_filter = ar5523_configure_filter,
1364 .flush = ar5523_flush,
1365};
1366
1367static int ar5523_host_available(struct ar5523 *ar)
1368{
1369 struct ar5523_cmd_host_available setup;
1370
1371 /* inform target the host is available */
1372 setup.sw_ver_major = cpu_to_be32(ATH_SW_VER_MAJOR);
1373 setup.sw_ver_minor = cpu_to_be32(ATH_SW_VER_MINOR);
1374 setup.sw_ver_patch = cpu_to_be32(ATH_SW_VER_PATCH);
1375 setup.sw_ver_build = cpu_to_be32(ATH_SW_VER_BUILD);
1376 return ar5523_cmd_read(ar, WDCMSG_HOST_AVAILABLE,
1377 &setup, sizeof(setup), NULL, 0, 0);
1378}
1379
1380static int ar5523_get_devstatus(struct ar5523 *ar)
1381{
1382 u8 macaddr[ETH_ALEN];
1383 int error;
1384
1385 /* retrieve MAC address */
1386 error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN);
1387 if (error) {
1388 ar5523_err(ar, "could not read MAC address\n");
1389 return error;
1390 }
1391
1392 SET_IEEE80211_PERM_ADDR(ar->hw, macaddr);
1393
1394 error = ar5523_get_status(ar, ST_SERIAL_NUMBER,
1395 &ar->serial[0], sizeof(ar->serial));
1396 if (error) {
1397 ar5523_err(ar, "could not read device serial number\n");
1398 return error;
1399 }
1400 return 0;
1401}
1402
1403#define AR5523_SANE_RXBUFSZ 2000
1404
1405static int ar5523_get_max_rxsz(struct ar5523 *ar)
1406{
1407 int error;
1408 __be32 rxsize;
1409
1410 /* Get max rx size */
1411 error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize,
1412 sizeof(rxsize));
1413 if (error != 0) {
1414 ar5523_err(ar, "could not read max RX size\n");
1415 return error;
1416 }
1417
1418 ar->rxbufsz = be32_to_cpu(rxsize);
1419
1420 if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) {
1421 ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n",
1422 AR5523_SANE_RXBUFSZ);
1423 ar->rxbufsz = AR5523_SANE_RXBUFSZ;
1424 }
1425
1426 ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz);
1427 return 0;
1428}
1429
1430/*
1431 * This is copied from rtl818x, but we should probably move this
1432 * to common code as in OpenBSD.
1433 */
1434static const struct ieee80211_rate ar5523_rates[] = {
1435 { .bitrate = 10, .hw_value = 2, },
1436 { .bitrate = 20, .hw_value = 4 },
1437 { .bitrate = 55, .hw_value = 11, },
1438 { .bitrate = 110, .hw_value = 22, },
1439 { .bitrate = 60, .hw_value = 12, },
1440 { .bitrate = 90, .hw_value = 18, },
1441 { .bitrate = 120, .hw_value = 24, },
1442 { .bitrate = 180, .hw_value = 36, },
1443 { .bitrate = 240, .hw_value = 48, },
1444 { .bitrate = 360, .hw_value = 72, },
1445 { .bitrate = 480, .hw_value = 96, },
1446 { .bitrate = 540, .hw_value = 108, },
1447};
1448
1449static const struct ieee80211_channel ar5523_channels[] = {
1450 { .center_freq = 2412 },
1451 { .center_freq = 2417 },
1452 { .center_freq = 2422 },
1453 { .center_freq = 2427 },
1454 { .center_freq = 2432 },
1455 { .center_freq = 2437 },
1456 { .center_freq = 2442 },
1457 { .center_freq = 2447 },
1458 { .center_freq = 2452 },
1459 { .center_freq = 2457 },
1460 { .center_freq = 2462 },
1461 { .center_freq = 2467 },
1462 { .center_freq = 2472 },
1463 { .center_freq = 2484 },
1464};
1465
1466static int ar5523_init_modes(struct ar5523 *ar)
1467{
1468 BUILD_BUG_ON(sizeof(ar->channels) != sizeof(ar5523_channels));
1469 BUILD_BUG_ON(sizeof(ar->rates) != sizeof(ar5523_rates));
1470
1471 memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
1472 memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
1473
1474 ar->band.band = IEEE80211_BAND_2GHZ;
1475 ar->band.channels = ar->channels;
1476 ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
1477 ar->band.bitrates = ar->rates;
1478 ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
1479 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
1480 return 0;
1481}
1482
1483/*
1484 * Load the MIPS R4000 microcode into the device. Once the image is loaded,
1485 * the device will detach itself from the bus and reattach later with a new
1486 * product Id (a la ezusb).
1487 */
1488static int ar5523_load_firmware(struct usb_device *dev)
1489{
1490 struct ar5523_fwblock *txblock, *rxblock;
1491 const struct firmware *fw;
1492 void *fwbuf;
1493 int len, offset;
1494 int foolen; /* XXX(hch): handle short transfers */
1495 int error = -ENXIO;
1496
1497 if (request_firmware(&fw, AR5523_FIRMWARE_FILE, &dev->dev)) {
1498 dev_err(&dev->dev, "no firmware found: %s\n",
1499 AR5523_FIRMWARE_FILE);
1500 return -ENOENT;
1501 }
1502
1503 txblock = kmalloc(sizeof(*txblock), GFP_KERNEL);
1504 if (!txblock)
1505 goto out;
1506
1507 rxblock = kmalloc(sizeof(*rxblock), GFP_KERNEL);
1508 if (!rxblock)
1509 goto out_free_txblock;
1510
1511 fwbuf = kmalloc(AR5523_MAX_FWBLOCK_SIZE, GFP_KERNEL);
1512 if (!fwbuf)
1513 goto out_free_rxblock;
1514
1515 memset(txblock, 0, sizeof(struct ar5523_fwblock));
1516 txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
1517 txblock->total = cpu_to_be32(fw->size);
1518
1519 offset = 0;
1520 len = fw->size;
1521 while (len > 0) {
1522 int mlen = min(len, AR5523_MAX_FWBLOCK_SIZE);
1523
1524 txblock->remain = cpu_to_be32(len - mlen);
1525 txblock->len = cpu_to_be32(mlen);
1526
1527 /* send firmware block meta-data */
1528 error = usb_bulk_msg(dev, ar5523_cmd_tx_pipe(dev),
1529 txblock, sizeof(*txblock), &foolen,
1530 AR5523_CMD_TIMEOUT);
1531 if (error) {
1532 dev_err(&dev->dev,
1533 "could not send firmware block info\n");
1534 goto out_free_fwbuf;
1535 }
1536
1537 /* send firmware block data */
1538 memcpy(fwbuf, fw->data + offset, mlen);
1539 error = usb_bulk_msg(dev, ar5523_data_tx_pipe(dev),
1540 fwbuf, mlen, &foolen,
1541 AR5523_DATA_TIMEOUT);
1542 if (error) {
1543 dev_err(&dev->dev,
1544 "could not send firmware block data\n");
1545 goto out_free_fwbuf;
1546 }
1547
1548 /* wait for ack from firmware */
1549 error = usb_bulk_msg(dev, ar5523_cmd_rx_pipe(dev),
1550 rxblock, sizeof(*rxblock), &foolen,
1551 AR5523_CMD_TIMEOUT);
1552 if (error) {
1553 dev_err(&dev->dev,
1554 "could not read firmware answer\n");
1555 goto out_free_fwbuf;
1556 }
1557
1558 len -= mlen;
1559 offset += mlen;
1560 }
1561
1562 /*
1563 * Set the error to -ENXIO to make sure we continue probing for
1564 * a driver.
1565 */
1566 error = -ENXIO;
1567
1568 out_free_fwbuf:
1569 kfree(fwbuf);
1570 out_free_rxblock:
1571 kfree(rxblock);
1572 out_free_txblock:
1573 kfree(txblock);
1574 out:
1575 release_firmware(fw);
1576 return error;
1577}
1578
1579static int ar5523_probe(struct usb_interface *intf,
1580 const struct usb_device_id *id)
1581{
1582 struct usb_device *dev = interface_to_usbdev(intf);
1583 struct ieee80211_hw *hw;
1584 struct ar5523 *ar;
1585 int error = -ENOMEM;
1586
1587 /*
1588 * Load firmware if the device requires it. This will return
1589 * -ENXIO on success and we'll get called back afer the usb
1590 * id changes to indicate that the firmware is present.
1591 */
1592 if (id->driver_info & AR5523_FLAG_PRE_FIRMWARE)
1593 return ar5523_load_firmware(dev);
1594
1595
1596 hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops);
1597 if (!hw)
1598 goto out;
1599 SET_IEEE80211_DEV(hw, &intf->dev);
1600
1601 ar = hw->priv;
1602 ar->hw = hw;
1603 ar->dev = dev;
1604 mutex_init(&ar->mutex);
1605
1606 INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work);
1607 init_timer(&ar->tx_wd_timer);
1608 setup_timer(&ar->tx_wd_timer, ar5523_tx_wd_timer, (unsigned long) ar);
1609 INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work);
1610 INIT_WORK(&ar->tx_work, ar5523_tx_work);
1611 INIT_LIST_HEAD(&ar->tx_queue_pending);
1612 INIT_LIST_HEAD(&ar->tx_queue_submitted);
1613 spin_lock_init(&ar->tx_data_list_lock);
1614 atomic_set(&ar->tx_nr_total, 0);
1615 atomic_set(&ar->tx_nr_pending, 0);
1616 init_waitqueue_head(&ar->tx_flush_waitq);
1617
1618 atomic_set(&ar->rx_data_free_cnt, 0);
1619 INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work);
1620 INIT_LIST_HEAD(&ar->rx_data_free);
1621 INIT_LIST_HEAD(&ar->rx_data_used);
1622 spin_lock_init(&ar->rx_data_list_lock);
1623
1624 ar->wq = create_singlethread_workqueue("ar5523");
1625 if (!ar->wq) {
1626 ar5523_err(ar, "Could not create wq\n");
1627 goto out_free_ar;
1628 }
1629
1630 error = ar5523_alloc_rx_bufs(ar);
1631 if (error) {
1632 ar5523_err(ar, "Could not allocate rx buffers\n");
1633 goto out_free_wq;
1634 }
1635
1636 error = ar5523_alloc_rx_cmd(ar);
1637 if (error) {
1638 ar5523_err(ar, "Could not allocate rx command buffers\n");
1639 goto out_free_rx_bufs;
1640 }
1641
1642 error = ar5523_alloc_tx_cmd(ar);
1643 if (error) {
1644 ar5523_err(ar, "Could not allocate tx command buffers\n");
1645 goto out_free_rx_cmd;
1646 }
1647
1648 error = ar5523_submit_rx_cmd(ar);
1649 if (error) {
1650 ar5523_err(ar, "Failed to submit rx cmd\n");
1651 goto out_free_tx_cmd;
1652 }
1653
1654 /*
1655 * We're now ready to send/receive firmware commands.
1656 */
1657 error = ar5523_host_available(ar);
1658 if (error) {
1659 ar5523_err(ar, "could not initialize adapter\n");
1660 goto out_cancel_rx_cmd;
1661 }
1662
1663 error = ar5523_get_max_rxsz(ar);
1664 if (error) {
1665 ar5523_err(ar, "could not get caps from adapter\n");
1666 goto out_cancel_rx_cmd;
1667 }
1668
1669 error = ar5523_get_devcap(ar);
1670 if (error) {
1671 ar5523_err(ar, "could not get caps from adapter\n");
1672 goto out_cancel_rx_cmd;
1673 }
1674
1675 error = ar5523_get_devstatus(ar);
1676 if (error != 0) {
1677 ar5523_err(ar, "could not get device status\n");
1678 goto out_cancel_rx_cmd;
1679 }
1680
1681 ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n",
1682 (id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
1683
1684 ar->vif = NULL;
1685 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1686 IEEE80211_HW_SIGNAL_DBM |
1687 IEEE80211_HW_HAS_RATE_CONTROL;
1688 hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
1689 sizeof(struct ar5523_chunk);
1690 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1691 hw->queues = 1;
1692
1693 error = ar5523_init_modes(ar);
1694 if (error)
1695 goto out_cancel_rx_cmd;
1696
1697 usb_set_intfdata(intf, hw);
1698
1699 error = ieee80211_register_hw(hw);
1700 if (error) {
1701 ar5523_err(ar, "could not register device\n");
1702 goto out_cancel_rx_cmd;
1703 }
1704
1705 ar5523_info(ar, "Found and initialized AR5523 device\n");
1706 return 0;
1707
1708out_cancel_rx_cmd:
1709 ar5523_cancel_rx_cmd(ar);
1710out_free_tx_cmd:
1711 ar5523_free_tx_cmd(ar);
1712out_free_rx_cmd:
1713 ar5523_free_rx_cmd(ar);
1714out_free_rx_bufs:
1715 ar5523_free_rx_bufs(ar);
1716out_free_wq:
1717 destroy_workqueue(ar->wq);
1718out_free_ar:
1719 ieee80211_free_hw(hw);
1720out:
1721 return error;
1722}
1723
1724static void ar5523_disconnect(struct usb_interface *intf)
1725{
1726 struct ieee80211_hw *hw = usb_get_intfdata(intf);
1727 struct ar5523 *ar = hw->priv;
1728
1729 ar5523_dbg(ar, "detaching\n");
1730 set_bit(AR5523_USB_DISCONNECTED, &ar->flags);
1731
1732 ieee80211_unregister_hw(hw);
1733
1734 ar5523_cancel_rx_cmd(ar);
1735 ar5523_free_tx_cmd(ar);
1736 ar5523_free_rx_cmd(ar);
1737 ar5523_free_rx_bufs(ar);
1738
1739 destroy_workqueue(ar->wq);
1740
1741 ieee80211_free_hw(hw);
1742 usb_set_intfdata(intf, NULL);
1743}
1744
1745#define AR5523_DEVICE_UG(vendor, device) \
1746 { USB_DEVICE((vendor), (device)) }, \
1747 { USB_DEVICE((vendor), (device) + 1), \
1748 .driver_info = AR5523_FLAG_PRE_FIRMWARE }
1749#define AR5523_DEVICE_UX(vendor, device) \
1750 { USB_DEVICE((vendor), (device)), \
1751 .driver_info = AR5523_FLAG_ABG }, \
1752 { USB_DEVICE((vendor), (device) + 1), \
1753 .driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE }
1754
1755static struct usb_device_id ar5523_id_table[] = {
1756 AR5523_DEVICE_UG(0x168c, 0x0001), /* Atheros / AR5523 */
1757 AR5523_DEVICE_UG(0x0cf3, 0x0001), /* Atheros2 / AR5523_1 */
1758 AR5523_DEVICE_UG(0x0cf3, 0x0003), /* Atheros2 / AR5523_2 */
1759 AR5523_DEVICE_UX(0x0cf3, 0x0005), /* Atheros2 / AR5523_3 */
1760 AR5523_DEVICE_UG(0x0d8e, 0x7801), /* Conceptronic / AR5523_1 */
1761 AR5523_DEVICE_UX(0x0d8e, 0x7811), /* Conceptronic / AR5523_2 */
1762 AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
1763 AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
1764 AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
1765 AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
1766 AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
1767 AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
1768 (CyberTAN Technology) */
1769 AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
1770 AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
1771 AR5523_DEVICE_UG(0x0d8e, 0x7802), /* Globalsun / AR5523_3 */
1772 AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
1773 AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
1774 AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
1775 AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
1776 AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
1777 AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
1778 AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */
1779 AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */
1780 AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */
1781 AR5523_DEVICE_UG(0x1385, 0x4250), /* Netgear3 / WG111T (2) */
1782 AR5523_DEVICE_UG(0x1385, 0x5f00), /* Netgear / WPN111 */
1783 AR5523_DEVICE_UG(0x1385, 0x5f02), /* Netgear / WPN111 */
1784 { }
1785};
1786MODULE_DEVICE_TABLE(usb, ar5523_id_table);
1787
1788static struct usb_driver ar5523_driver = {
1789 .name = "ar5523",
1790 .id_table = ar5523_id_table,
1791 .probe = ar5523_probe,
1792 .disconnect = ar5523_disconnect,
1793};
1794
1795module_usb_driver(ar5523_driver);
1796
1797MODULE_LICENSE("Dual BSD/GPL");
1798MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.h b/drivers/net/wireless/ath/ar5523/ar5523.h
new file mode 100644
index 000000000000..00c6fd346d48
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.h
@@ -0,0 +1,152 @@
1/*
2 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2006 Sam Leffler, Errno Consulting
4 * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
5 * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
6 * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
7 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#define AR5523_FLAG_PRE_FIRMWARE (1 << 0)
22#define AR5523_FLAG_ABG (1 << 1)
23
24#define AR5523_FIRMWARE_FILE "ar5523.bin"
25
26#define AR5523_CMD_TX_PIPE 0x01
27#define AR5523_DATA_TX_PIPE 0x02
28#define AR5523_CMD_RX_PIPE 0x81
29#define AR5523_DATA_RX_PIPE 0x82
30
31#define ar5523_cmd_tx_pipe(dev) \
32 usb_sndbulkpipe((dev), AR5523_CMD_TX_PIPE)
33#define ar5523_data_tx_pipe(dev) \
34 usb_sndbulkpipe((dev), AR5523_DATA_TX_PIPE)
35#define ar5523_cmd_rx_pipe(dev) \
36 usb_rcvbulkpipe((dev), AR5523_CMD_RX_PIPE)
37#define ar5523_data_rx_pipe(dev) \
38 usb_rcvbulkpipe((dev), AR5523_DATA_RX_PIPE)
39
40#define AR5523_DATA_TIMEOUT 10000
41#define AR5523_CMD_TIMEOUT 1000
42
43#define AR5523_TX_DATA_COUNT 8
44#define AR5523_TX_DATA_RESTART_COUNT 2
45#define AR5523_RX_DATA_COUNT 16
46#define AR5523_RX_DATA_REFILL_COUNT 8
47
48#define AR5523_CMD_ID 1
49#define AR5523_DATA_ID 2
50
51#define AR5523_TX_WD_TIMEOUT (HZ * 2)
52#define AR5523_FLUSH_TIMEOUT (HZ * 3)
53
54enum AR5523_flags {
55 AR5523_HW_UP,
56 AR5523_USB_DISCONNECTED,
57 AR5523_CONNECTED
58};
59
60struct ar5523_tx_cmd {
61 struct ar5523 *ar;
62 struct urb *urb_tx;
63 void *buf_tx;
64 void *odata;
65 int olen;
66 int flags;
67 int res;
68 struct completion done;
69};
70
71/* This struct is placed in tx_info->driver_data. It must not be larger
72 * than IEEE80211_TX_INFO_DRIVER_DATA_SIZE.
73 */
74struct ar5523_tx_data {
75 struct list_head list;
76 struct ar5523 *ar;
77 struct sk_buff *skb;
78 struct urb *urb;
79};
80
81struct ar5523_rx_data {
82 struct list_head list;
83 struct ar5523 *ar;
84 struct urb *urb;
85 struct sk_buff *skb;
86};
87
88struct ar5523 {
89 struct usb_device *dev;
90 struct ieee80211_hw *hw;
91
92 unsigned long flags;
93 struct mutex mutex;
94 struct workqueue_struct *wq;
95
96 struct ar5523_tx_cmd tx_cmd;
97
98 struct delayed_work stat_work;
99
100 struct timer_list tx_wd_timer;
101 struct work_struct tx_wd_work;
102 struct work_struct tx_work;
103 struct list_head tx_queue_pending;
104 struct list_head tx_queue_submitted;
105 spinlock_t tx_data_list_lock;
106 wait_queue_head_t tx_flush_waitq;
107
108 /* Queued + Submitted TX frames */
109 atomic_t tx_nr_total;
110
111 /* Submitted TX frames */
112 atomic_t tx_nr_pending;
113
114 void *rx_cmd_buf;
115 struct urb *rx_cmd_urb;
116
117 struct ar5523_rx_data rx_data[AR5523_RX_DATA_COUNT];
118 spinlock_t rx_data_list_lock;
119 struct list_head rx_data_free;
120 struct list_head rx_data_used;
121 atomic_t rx_data_free_cnt;
122
123 struct work_struct rx_refill_work;
124
125 unsigned int rxbufsz;
126 u8 serial[16];
127
128 struct ieee80211_channel channels[14];
129 struct ieee80211_rate rates[12];
130 struct ieee80211_supported_band band;
131 struct ieee80211_vif *vif;
132};
133
134/* flags for sending firmware commands */
135#define AR5523_CMD_FLAG_READ (1 << 1)
136#define AR5523_CMD_FLAG_MAGIC (1 << 2)
137
138#define ar5523_dbg(ar, format, arg...) \
139 dev_dbg(&(ar)->dev->dev, format, ## arg)
140
141/* On USB hot-unplug there can be a lot of URBs in flight and they'll all
142 * fail. Instead of dealing with them in every possible place just surpress
143 * any messages on USB disconnect.
144 */
145#define ar5523_err(ar, format, arg...) \
146do { \
147 if (!test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) { \
148 dev_err(&(ar)->dev->dev, format, ## arg); \
149 } \
150} while (0)
151#define ar5523_info(ar, format, arg...) \
152 dev_info(&(ar)->dev->dev, format, ## arg)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523_hw.h b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
new file mode 100644
index 000000000000..0fe2c803f48f
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
@@ -0,0 +1,431 @@
1/*
2 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
3 * Copyright (c) 2006 Sam Leffler, Errno Consulting
4 * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
5 * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
6 * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
7 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21/* all fields are big endian */
22struct ar5523_fwblock {
23 __be32 flags;
24#define AR5523_WRITE_BLOCK (1 << 4)
25
26 __be32 len;
27#define AR5523_MAX_FWBLOCK_SIZE 2048
28
29 __be32 total;
30 __be32 remain;
31 __be32 rxtotal;
32 __be32 pad[123];
33} __packed;
34
35#define AR5523_MAX_RXCMDSZ 1024
36#define AR5523_MAX_TXCMDSZ 1024
37
38struct ar5523_cmd_hdr {
39 __be32 len;
40 __be32 code;
41/* NB: these are defined for rev 1.5 firmware; rev 1.6 is different */
42/* messages from Host -> Target */
43#define WDCMSG_HOST_AVAILABLE 0x01
44#define WDCMSG_BIND 0x02
45#define WDCMSG_TARGET_RESET 0x03
46#define WDCMSG_TARGET_GET_CAPABILITY 0x04
47#define WDCMSG_TARGET_SET_CONFIG 0x05
48#define WDCMSG_TARGET_GET_STATUS 0x06
49#define WDCMSG_TARGET_GET_STATS 0x07
50#define WDCMSG_TARGET_START 0x08
51#define WDCMSG_TARGET_STOP 0x09
52#define WDCMSG_TARGET_ENABLE 0x0a
53#define WDCMSG_TARGET_DISABLE 0x0b
54#define WDCMSG_CREATE_CONNECTION 0x0c
55#define WDCMSG_UPDATE_CONNECT_ATTR 0x0d
56#define WDCMSG_DELETE_CONNECT 0x0e
57#define WDCMSG_SEND 0x0f
58#define WDCMSG_FLUSH 0x10
59/* messages from Target -> Host */
60#define WDCMSG_STATS_UPDATE 0x11
61#define WDCMSG_BMISS 0x12
62#define WDCMSG_DEVICE_AVAIL 0x13
63#define WDCMSG_SEND_COMPLETE 0x14
64#define WDCMSG_DATA_AVAIL 0x15
65#define WDCMSG_SET_PWR_MODE 0x16
66#define WDCMSG_BMISS_ACK 0x17
67#define WDCMSG_SET_LED_STEADY 0x18
68#define WDCMSG_SET_LED_BLINK 0x19
69/* more messages */
70#define WDCMSG_SETUP_BEACON_DESC 0x1a
71#define WDCMSG_BEACON_INIT 0x1b
72#define WDCMSG_RESET_KEY_CACHE 0x1c
73#define WDCMSG_RESET_KEY_CACHE_ENTRY 0x1d
74#define WDCMSG_SET_KEY_CACHE_ENTRY 0x1e
75#define WDCMSG_SET_DECOMP_MASK 0x1f
76#define WDCMSG_SET_REGULATORY_DOMAIN 0x20
77#define WDCMSG_SET_LED_STATE 0x21
78#define WDCMSG_WRITE_ASSOCID 0x22
79#define WDCMSG_SET_STA_BEACON_TIMERS 0x23
80#define WDCMSG_GET_TSF 0x24
81#define WDCMSG_RESET_TSF 0x25
82#define WDCMSG_SET_ADHOC_MODE 0x26
83#define WDCMSG_SET_BASIC_RATE 0x27
84#define WDCMSG_MIB_CONTROL 0x28
85#define WDCMSG_GET_CHANNEL_DATA 0x29
86#define WDCMSG_GET_CUR_RSSI 0x2a
87#define WDCMSG_SET_ANTENNA_SWITCH 0x2b
88#define WDCMSG_USE_SHORT_SLOT_TIME 0x2f
89#define WDCMSG_SET_POWER_MODE 0x30
90#define WDCMSG_SETUP_PSPOLL_DESC 0x31
91#define WDCMSG_SET_RX_MULTICAST_FILTER 0x32
92#define WDCMSG_RX_FILTER 0x33
93#define WDCMSG_PER_CALIBRATION 0x34
94#define WDCMSG_RESET 0x35
95#define WDCMSG_DISABLE 0x36
96#define WDCMSG_PHY_DISABLE 0x37
97#define WDCMSG_SET_TX_POWER_LIMIT 0x38
98#define WDCMSG_SET_TX_QUEUE_PARAMS 0x39
99#define WDCMSG_SETUP_TX_QUEUE 0x3a
100#define WDCMSG_RELEASE_TX_QUEUE 0x3b
101#define WDCMSG_SET_DEFAULT_KEY 0x43
102
103 __u32 priv; /* driver private data,
104 don't care about endianess */
105 __be32 magic;
106 __be32 reserved2[4];
107};
108
109struct ar5523_cmd_host_available {
110 __be32 sw_ver_major;
111 __be32 sw_ver_minor;
112 __be32 sw_ver_patch;
113 __be32 sw_ver_build;
114} __packed;
115
116#define ATH_SW_VER_MAJOR 1
117#define ATH_SW_VER_MINOR 5
118#define ATH_SW_VER_PATCH 0
119#define ATH_SW_VER_BUILD 9999
120
121struct ar5523_chunk {
122 u8 seqnum; /* sequence number for ordering */
123 u8 flags;
124#define UATH_CFLAGS_FINAL 0x01 /* final chunk of a msg */
125#define UATH_CFLAGS_RXMSG 0x02 /* chunk contains rx completion */
126#define UATH_CFLAGS_DEBUG 0x04 /* for debugging */
127 __be16 length; /* chunk size in bytes */
128 /* chunk data follows */
129} __packed;
130
131/*
132 * Message format for a WDCMSG_DATA_AVAIL message from Target to Host.
133 */
134struct ar5523_rx_desc {
135 __be32 len; /* msg length including header */
136 __be32 code; /* WDCMSG_DATA_AVAIL */
137 __be32 gennum; /* generation number */
138 __be32 status; /* start of RECEIVE_INFO */
139#define UATH_STATUS_OK 0
140#define UATH_STATUS_STOP_IN_PROGRESS 1
141#define UATH_STATUS_CRC_ERR 2
142#define UATH_STATUS_PHY_ERR 3
143#define UATH_STATUS_DECRYPT_CRC_ERR 4
144#define UATH_STATUS_DECRYPT_MIC_ERR 5
145#define UATH_STATUS_DECOMP_ERR 6
146#define UATH_STATUS_KEY_ERR 7
147#define UATH_STATUS_ERR 8
148 __be32 tstamp_low; /* low-order 32-bits of rx timestamp */
149 __be32 tstamp_high; /* high-order 32-bits of rx timestamp */
150 __be32 framelen; /* frame length */
151 __be32 rate; /* rx rate code */
152 __be32 antenna;
153 __be32 rssi;
154 __be32 channel;
155 __be32 phyerror;
156 __be32 connix; /* key table ix for bss traffic */
157 __be32 decrypterror;
158 __be32 keycachemiss;
159 __be32 pad; /* XXX? */
160} __packed;
161
162struct ar5523_tx_desc {
163 __be32 msglen;
164 u32 msgid; /* msg id (supplied by host) */
165 __be32 type; /* opcode: WDMSG_SEND or WDCMSG_FLUSH */
166 __be32 txqid; /* tx queue id and flags */
167#define UATH_TXQID_MASK 0x0f
168#define UATH_TXQID_MINRATE 0x10 /* use min tx rate */
169#define UATH_TXQID_FF 0x20 /* content is fast frame */
170 __be32 connid; /* tx connection id */
171#define UATH_ID_INVALID 0xffffffff /* for sending prior to connection */
172 __be32 flags; /* non-zero if response desired */
173#define UATH_TX_NOTIFY (1 << 24) /* f/w will send a UATH_NOTIF_TX */
174 __be32 buflen; /* payload length */
175} __packed;
176
177
178#define AR5523_ID_BSS 2
179#define AR5523_ID_BROADCAST 0xffffffff
180
181/* structure for command UATH_CMD_WRITE_MAC */
182struct ar5523_write_mac {
183 __be32 reg;
184 __be32 len;
185 u8 data[32];
186} __packed;
187
188struct ar5523_cmd_rateset {
189 __u8 length;
190#define AR5523_MAX_NRATES 32
191 __u8 set[AR5523_MAX_NRATES];
192};
193
194struct ar5523_cmd_set_associd { /* AR5523_WRITE_ASSOCID */
195 __be32 defaultrateix;
196 __be32 associd;
197 __be32 timoffset;
198 __be32 turboprime;
199 __u8 bssid[6];
200} __packed;
201
202/* structure for command WDCMSG_RESET */
203struct ar5523_cmd_reset {
204 __be32 flags; /* channel flags */
205#define UATH_CHAN_TURBO 0x0100
206#define UATH_CHAN_CCK 0x0200
207#define UATH_CHAN_OFDM 0x0400
208#define UATH_CHAN_2GHZ 0x1000
209#define UATH_CHAN_5GHZ 0x2000
210 __be32 freq; /* channel frequency */
211 __be32 maxrdpower;
212 __be32 cfgctl;
213 __be32 twiceantennareduction;
214 __be32 channelchange;
215 __be32 keeprccontent;
216} __packed;
217
218/* structure for command WDCMSG_SET_BASIC_RATE */
219struct ar5523_cmd_rates {
220 __be32 connid;
221 __be32 keeprccontent;
222 __be32 size;
223 struct ar5523_cmd_rateset rateset;
224} __packed;
225
226enum {
227 WLAN_MODE_NONE = 0,
228 WLAN_MODE_11b,
229 WLAN_MODE_11a,
230 WLAN_MODE_11g,
231 WLAN_MODE_11a_TURBO,
232 WLAN_MODE_11g_TURBO,
233 WLAN_MODE_11a_TURBO_PRIME,
234 WLAN_MODE_11g_TURBO_PRIME,
235 WLAN_MODE_11a_XR,
236 WLAN_MODE_11g_XR,
237};
238
239struct ar5523_cmd_connection_attr {
240 __be32 longpreambleonly;
241 struct ar5523_cmd_rateset rateset;
242 __be32 wlanmode;
243} __packed;
244
245/* structure for command AR5523_CREATE_CONNECTION */
246struct ar5523_cmd_create_connection {
247 __be32 connid;
248 __be32 bssid;
249 __be32 size;
250 struct ar5523_cmd_connection_attr connattr;
251} __packed;
252
253struct ar5523_cmd_ledsteady { /* WDCMSG_SET_LED_STEADY */
254 __be32 lednum;
255#define UATH_LED_LINK 0
256#define UATH_LED_ACTIVITY 1
257 __be32 ledmode;
258#define UATH_LED_OFF 0
259#define UATH_LED_ON 1
260} __packed;
261
262struct ar5523_cmd_ledblink { /* WDCMSG_SET_LED_BLINK */
263 __be32 lednum;
264 __be32 ledmode;
265 __be32 blinkrate;
266 __be32 slowmode;
267} __packed;
268
269struct ar5523_cmd_ledstate { /* WDCMSG_SET_LED_STATE */
270 __be32 connected;
271} __packed;
272
273struct ar5523_cmd_txq_attr {
274 __be32 priority;
275 __be32 aifs;
276 __be32 logcwmin;
277 __be32 logcwmax;
278 __be32 bursttime;
279 __be32 mode;
280 __be32 qflags;
281} __packed;
282
283struct ar5523_cmd_txq_setup { /* WDCMSG_SETUP_TX_QUEUE */
284 __be32 qid;
285 __be32 len;
286 struct ar5523_cmd_txq_attr attr;
287} __packed;
288
289struct ar5523_cmd_rx_filter { /* WDCMSG_RX_FILTER */
290 __be32 bits;
291#define UATH_FILTER_RX_UCAST 0x00000001
292#define UATH_FILTER_RX_MCAST 0x00000002
293#define UATH_FILTER_RX_BCAST 0x00000004
294#define UATH_FILTER_RX_CONTROL 0x00000008
295#define UATH_FILTER_RX_BEACON 0x00000010 /* beacon frames */
296#define UATH_FILTER_RX_PROM 0x00000020 /* promiscuous mode */
297#define UATH_FILTER_RX_PHY_ERR 0x00000040 /* phy errors */
298#define UATH_FILTER_RX_PHY_RADAR 0x00000080 /* radar phy errors */
299#define UATH_FILTER_RX_XR_POOL 0x00000400 /* XR group polls */
300#define UATH_FILTER_RX_PROBE_REQ 0x00000800
301 __be32 op;
302#define UATH_FILTER_OP_INIT 0x0
303#define UATH_FILTER_OP_SET 0x1
304#define UATH_FILTER_OP_CLEAR 0x2
305#define UATH_FILTER_OP_TEMP 0x3
306#define UATH_FILTER_OP_RESTORE 0x4
307} __packed;
308
309enum {
310 CFG_NONE, /* Sentinal to indicate "no config" */
311 CFG_REG_DOMAIN, /* Regulatory Domain */
312 CFG_RATE_CONTROL_ENABLE,
313 CFG_DEF_XMIT_DATA_RATE, /* NB: if rate control is not enabled */
314 CFG_HW_TX_RETRIES,
315 CFG_SW_TX_RETRIES,
316 CFG_SLOW_CLOCK_ENABLE,
317 CFG_COMP_PROC,
318 CFG_USER_RTS_THRESHOLD,
319 CFG_XR2NORM_RATE_THRESHOLD,
320 CFG_XRMODE_SWITCH_COUNT,
321 CFG_PROTECTION_TYPE,
322 CFG_BURST_SEQ_THRESHOLD,
323 CFG_ABOLT,
324 CFG_IQ_LOG_COUNT_MAX,
325 CFG_MODE_CTS,
326 CFG_WME_ENABLED,
327 CFG_GPRS_CBR_PERIOD,
328 CFG_SERVICE_TYPE,
329 /* MAC Address to use. Overrides EEPROM */
330 CFG_MAC_ADDR,
331 CFG_DEBUG_EAR,
332 CFG_INIT_REGS,
333 /* An ID for use in error & debug messages */
334 CFG_DEBUG_ID,
335 CFG_COMP_WIN_SZ,
336 CFG_DIVERSITY_CTL,
337 CFG_TP_SCALE,
338 CFG_TPC_HALF_DBM5,
339 CFG_TPC_HALF_DBM2,
340 CFG_OVERRD_TX_POWER,
341 CFG_USE_32KHZ_CLOCK,
342 CFG_GMODE_PROTECTION,
343 CFG_GMODE_PROTECT_RATE_INDEX,
344 CFG_GMODE_NON_ERP_PREAMBLE,
345 CFG_WDC_TRANSPORT_CHUNK_SIZE,
346};
347
348enum {
349 /* Sentinal to indicate "no capability" */
350 CAP_NONE,
351 CAP_ALL, /* ALL capabilities */
352 CAP_TARGET_VERSION,
353 CAP_TARGET_REVISION,
354 CAP_MAC_VERSION,
355 CAP_MAC_REVISION,
356 CAP_PHY_REVISION,
357 CAP_ANALOG_5GHz_REVISION,
358 CAP_ANALOG_2GHz_REVISION,
359 /* Target supports WDC message debug features */
360 CAP_DEBUG_WDCMSG_SUPPORT,
361
362 CAP_REG_DOMAIN,
363 CAP_COUNTRY_CODE,
364 CAP_REG_CAP_BITS,
365
366 CAP_WIRELESS_MODES,
367 CAP_CHAN_SPREAD_SUPPORT,
368 CAP_SLEEP_AFTER_BEACON_BROKEN,
369 CAP_COMPRESS_SUPPORT,
370 CAP_BURST_SUPPORT,
371 CAP_FAST_FRAMES_SUPPORT,
372 CAP_CHAP_TUNING_SUPPORT,
373 CAP_TURBOG_SUPPORT,
374 CAP_TURBO_PRIME_SUPPORT,
375 CAP_DEVICE_TYPE,
376 CAP_XR_SUPPORT,
377 CAP_WME_SUPPORT,
378 CAP_TOTAL_QUEUES,
379 CAP_CONNECTION_ID_MAX, /* Should absorb CAP_KEY_CACHE_SIZE */
380
381 CAP_LOW_5GHZ_CHAN,
382 CAP_HIGH_5GHZ_CHAN,
383 CAP_LOW_2GHZ_CHAN,
384 CAP_HIGH_2GHZ_CHAN,
385
386 CAP_MIC_AES_CCM,
387 CAP_MIC_CKIP,
388 CAP_MIC_TKIP,
389 CAP_MIC_TKIP_WME,
390 CAP_CIPHER_AES_CCM,
391 CAP_CIPHER_CKIP,
392 CAP_CIPHER_TKIP,
393
394 CAP_TWICE_ANTENNAGAIN_5G,
395 CAP_TWICE_ANTENNAGAIN_2G,
396};
397
398enum {
399 ST_NONE, /* Sentinal to indicate "no status" */
400 ST_ALL,
401 ST_SERVICE_TYPE,
402 ST_WLAN_MODE,
403 ST_FREQ,
404 ST_BAND,
405 ST_LAST_RSSI,
406 ST_PS_FRAMES_DROPPED,
407 ST_CACHED_DEF_ANT,
408 ST_COUNT_OTHER_RX_ANT,
409 ST_USE_FAST_DIVERSITY,
410 ST_MAC_ADDR,
411 ST_RX_GENERATION_NUM,
412 ST_TX_QUEUE_DEPTH,
413 ST_SERIAL_NUMBER,
414 ST_WDC_TRANSPORT_CHUNK_SIZE,
415};
416
417enum {
418 TARGET_DEVICE_AWAKE,
419 TARGET_DEVICE_SLEEP,
420 TARGET_DEVICE_PWRDN,
421 TARGET_DEVICE_PWRSAVE,
422 TARGET_DEVICE_SUSPEND,
423 TARGET_DEVICE_RESUME,
424};
425
426/* this is in net/ieee80211.h, but that conflicts with the mac80211 headers */
427#define IEEE80211_2ADDR_LEN 16
428
429#define AR5523_MIN_RXBUFSZ \
430 (((sizeof(__be32) + IEEE80211_2ADDR_LEN + \
431 sizeof(struct ar5523_rx_desc)) + 3) & ~3)
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 338c5c42357d..c9f81a388f15 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,6 +1,7 @@
1config ATH5K 1config ATH5K
2 tristate "Atheros 5xxx wireless cards support" 2 tristate "Atheros 5xxx wireless cards support"
3 depends on (PCI || ATHEROS_AR231X) && MAC80211 3 depends on (PCI || ATHEROS_AR231X) && MAC80211
4 select ATH_COMMON
4 select MAC80211_LEDS 5 select MAC80211_LEDS
5 select LEDS_CLASS 6 select LEDS_CLASS
6 select NEW_LEDS 7 select NEW_LEDS
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index aec33cc207fd..8e8bcc7a4805 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -236,17 +236,4 @@ static struct platform_driver ath_ahb_driver = {
236 }, 236 },
237}; 237};
238 238
239static int __init 239module_platform_driver(ath_ahb_driver);
240ath5k_ahb_init(void)
241{
242 return platform_driver_register(&ath_ahb_driver);
243}
244
245static void __exit
246ath5k_ahb_exit(void)
247{
248 platform_driver_unregister(&ath_ahb_driver);
249}
250
251module_init(ath5k_ahb_init);
252module_exit(ath5k_ahb_exit);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9f31cfa56cc0..30ca0a60a64c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -511,8 +511,9 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
511 ath5k_vif_iter(&iter_data, vif->addr, vif); 511 ath5k_vif_iter(&iter_data, vif->addr, vif);
512 512
513 /* Get list of all active MAC addresses */ 513 /* Get list of all active MAC addresses */
514 ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter, 514 ieee80211_iterate_active_interfaces_atomic(
515 &iter_data); 515 ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
516 ath5k_vif_iter, &iter_data);
516 memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN); 517 memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
517 518
518 ah->opmode = iter_data.opmode; 519 ah->opmode = iter_data.opmode;
@@ -848,7 +849,7 @@ ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
848 return; 849 return;
849 dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len, 850 dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
850 DMA_TO_DEVICE); 851 DMA_TO_DEVICE);
851 dev_kfree_skb_any(bf->skb); 852 ieee80211_free_txskb(ah->hw, bf->skb);
852 bf->skb = NULL; 853 bf->skb = NULL;
853 bf->skbaddr = 0; 854 bf->skbaddr = 0;
854 bf->desc->ds_data = 0; 855 bf->desc->ds_data = 0;
@@ -1335,20 +1336,9 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
1335 * 15bit only. that means TSF extension has to be done within 1336 * 15bit only. that means TSF extension has to be done within
1336 * 32768usec (about 32ms). it might be necessary to move this to 1337 * 32768usec (about 32ms). it might be necessary to move this to
1337 * the interrupt handler, like it is done in madwifi. 1338 * the interrupt handler, like it is done in madwifi.
1338 *
1339 * Unfortunately we don't know when the hardware takes the rx
1340 * timestamp (beginning of phy frame, data frame, end of rx?).
1341 * The only thing we know is that it is hardware specific...
1342 * On AR5213 it seems the rx timestamp is at the end of the
1343 * frame, but I'm not sure.
1344 *
1345 * NOTE: mac80211 defines mactime at the beginning of the first
1346 * data symbol. Since we don't have any time references it's
1347 * impossible to comply to that. This affects IBSS merge only
1348 * right now, so it's not too bad...
1349 */ 1339 */
1350 rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp); 1340 rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
1351 rxs->flag |= RX_FLAG_MACTIME_MPDU; 1341 rxs->flag |= RX_FLAG_MACTIME_END;
1352 1342
1353 rxs->freq = ah->curchan->center_freq; 1343 rxs->freq = ah->curchan->center_freq;
1354 rxs->band = ah->curchan->band; 1344 rxs->band = ah->curchan->band;
@@ -1575,7 +1565,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1575 return; 1565 return;
1576 1566
1577drop_packet: 1567drop_packet:
1578 dev_kfree_skb_any(skb); 1568 ieee80211_free_txskb(hw, skb);
1579} 1569}
1580 1570
1581static void 1571static void
@@ -2434,7 +2424,7 @@ static const struct ieee80211_iface_combination if_comb = {
2434 .num_different_channels = 1, 2424 .num_different_channels = 1,
2435}; 2425};
2436 2426
2437int __devinit 2427int
2438ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) 2428ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2439{ 2429{
2440 struct ieee80211_hw *hw = ah->hw; 2430 struct ieee80211_hw *hw = ah->hw;
@@ -2860,7 +2850,7 @@ static void ath5k_reset_work(struct work_struct *work)
2860 mutex_unlock(&ah->lock); 2850 mutex_unlock(&ah->lock);
2861} 2851}
2862 2852
2863static int __devinit 2853static int
2864ath5k_init(struct ieee80211_hw *hw) 2854ath5k_init(struct ieee80211_hw *hw)
2865{ 2855{
2866 2856
@@ -3045,8 +3035,9 @@ ath5k_any_vif_assoc(struct ath5k_hw *ah)
3045 iter_data.need_set_hw_addr = false; 3035 iter_data.need_set_hw_addr = false;
3046 iter_data.found_active = true; 3036 iter_data.found_active = true;
3047 3037
3048 ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter, 3038 ieee80211_iterate_active_interfaces_atomic(
3049 &iter_data); 3039 ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
3040 ath5k_vif_iter, &iter_data);
3050 return iter_data.any_assoc; 3041 return iter_data.any_assoc;
3051} 3042}
3052 3043
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index b9f708a45f4e..f77ef36acf87 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -158,7 +158,7 @@ void ath5k_unregister_leds(struct ath5k_hw *ah)
158 ath5k_unregister_led(&ah->tx_led); 158 ath5k_unregister_led(&ah->tx_led);
159} 159}
160 160
161int __devinit ath5k_init_leds(struct ath5k_hw *ah) 161int ath5k_init_leds(struct ath5k_hw *ah)
162{ 162{
163 int ret = 0; 163 int ret = 0;
164 struct ieee80211_hw *hw = ah->hw; 164 struct ieee80211_hw *hw = ah->hw;
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 7a28538e6e05..4264341533ea 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -62,7 +62,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
62 u16 qnum = skb_get_queue_mapping(skb); 62 u16 qnum = skb_get_queue_mapping(skb);
63 63
64 if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) { 64 if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
65 dev_kfree_skb_any(skb); 65 ieee80211_free_txskb(hw, skb);
66 return; 66 return;
67 } 67 }
68 68
@@ -452,8 +452,9 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
452 iter_data.hw_macaddr = NULL; 452 iter_data.hw_macaddr = NULL;
453 iter_data.n_stas = 0; 453 iter_data.n_stas = 0;
454 iter_data.need_set_hw_addr = false; 454 iter_data.need_set_hw_addr = false;
455 ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter, 455 ieee80211_iterate_active_interfaces_atomic(
456 &iter_data); 456 ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
457 ath5k_vif_iter, &iter_data);
457 458
458 /* Set up RX Filter */ 459 /* Set up RX Filter */
459 if (iter_data.n_stas > 1) { 460 if (iter_data.n_stas > 1) {
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index dff48fbc63bf..859db7c34f87 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -155,7 +155,7 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
155* PCI Initialization * 155* PCI Initialization *
156\********************/ 156\********************/
157 157
158static int __devinit 158static int
159ath5k_pci_probe(struct pci_dev *pdev, 159ath5k_pci_probe(struct pci_dev *pdev,
160 const struct pci_device_id *id) 160 const struct pci_device_id *id)
161{ 161{
@@ -285,7 +285,7 @@ err:
285 return ret; 285 return ret;
286} 286}
287 287
288static void __devexit 288static void
289ath5k_pci_remove(struct pci_dev *pdev) 289ath5k_pci_remove(struct pci_dev *pdev)
290{ 290{
291 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 291 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
@@ -336,7 +336,7 @@ static struct pci_driver ath5k_pci_driver = {
336 .name = KBUILD_MODNAME, 336 .name = KBUILD_MODNAME,
337 .id_table = ath5k_pci_id_table, 337 .id_table = ath5k_pci_id_table,
338 .probe = ath5k_pci_probe, 338 .probe = ath5k_pci_probe,
339 .remove = __devexit_p(ath5k_pci_remove), 339 .remove = ath5k_pci_remove,
340 .driver.pm = ATH5K_PM_OPS, 340 .driver.pm = ATH5K_PM_OPS,
341}; 341};
342 342
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 0c2dd4771c36..4084b1076286 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -789,9 +789,9 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
789 * (I don't think it supports 44MHz) */ 789 * (I don't think it supports 44MHz) */
790 /* On 2425 initvals TURBO_SHORT is not present */ 790 /* On 2425 initvals TURBO_SHORT is not present */
791 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) { 791 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
792 turbo = AR5K_PHY_TURBO_MODE | 792 turbo = AR5K_PHY_TURBO_MODE;
793 (ah->ah_radio == AR5K_RF2425) ? 0 : 793 if (ah->ah_radio != AR5K_RF2425)
794 AR5K_PHY_TURBO_SHORT; 794 turbo |= AR5K_PHY_TURBO_SHORT;
795 } else if (ah->ah_bwmode != AR5K_BWMODE_DEFAULT) { 795 } else if (ah->ah_bwmode != AR5K_BWMODE_DEFAULT) {
796 if (ah->ah_radio == AR5K_RF5413) { 796 if (ah->ah_radio == AR5K_RF5413) {
797 mode |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ? 797 mode |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index d755a5e7ed20..26c4b7220859 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -30,3 +30,12 @@ config ATH6KL_DEBUG
30 depends on ATH6KL 30 depends on ATH6KL
31 ---help--- 31 ---help---
32 Enables debug support 32 Enables debug support
33
34config ATH6KL_REGDOMAIN
35 bool "Atheros ath6kl regdomain support"
36 depends on ATH6KL
37 depends on CFG80211_CERTIFICATION_ONUS
38 ---help---
39 Enabling this makes it possible to change the regdomain in
40 the firmware. This can be only enabled if regulatory requirements
41 are taken into account.
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 8cae8886f17d..cab0ec0d5380 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -34,6 +34,7 @@ ath6kl_core-y += main.o
34ath6kl_core-y += txrx.o 34ath6kl_core-y += txrx.o
35ath6kl_core-y += wmi.o 35ath6kl_core-y += wmi.o
36ath6kl_core-y += core.o 36ath6kl_core-y += core.o
37ath6kl_core-y += recovery.o
37ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o 38ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
38 39
39obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o 40obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7089f8160ad5..5516a8ccc3c6 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -147,15 +147,15 @@ static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif)
147{ 147{
148 struct ath6kl *ar = vif->ar; 148 struct ath6kl *ar = vif->ar;
149 149
150 if (ar->state != ATH6KL_STATE_SCHED_SCAN) 150 if (!test_and_clear_bit(SCHED_SCANNING, &vif->flags))
151 return false; 151 return false;
152 152
153 del_timer_sync(&vif->sched_scan_timer); 153 del_timer_sync(&vif->sched_scan_timer);
154 154
155 ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, 155 if (ar->state == ATH6KL_STATE_RECOVERY)
156 ATH6KL_HOST_MODE_AWAKE); 156 return true;
157 157
158 ar->state = ATH6KL_STATE_ON; 158 ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, false);
159 159
160 return true; 160 return true;
161} 161}
@@ -301,7 +301,7 @@ static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
301 301
302static bool ath6kl_is_wpa_ie(const u8 *pos) 302static bool ath6kl_is_wpa_ie(const u8 *pos)
303{ 303{
304 return pos[0] == WLAN_EID_WPA && pos[1] >= 4 && 304 return pos[0] == WLAN_EID_VENDOR_SPECIFIC && pos[1] >= 4 &&
305 pos[2] == 0x00 && pos[3] == 0x50 && 305 pos[2] == 0x00 && pos[3] == 0x50 &&
306 pos[4] == 0xf2 && pos[5] == 0x01; 306 pos[4] == 0xf2 && pos[5] == 0x01;
307} 307}
@@ -369,17 +369,13 @@ static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
369{ 369{
370 switch (type) { 370 switch (type) {
371 case NL80211_IFTYPE_STATION: 371 case NL80211_IFTYPE_STATION:
372 case NL80211_IFTYPE_P2P_CLIENT:
372 *nw_type = INFRA_NETWORK; 373 *nw_type = INFRA_NETWORK;
373 break; 374 break;
374 case NL80211_IFTYPE_ADHOC: 375 case NL80211_IFTYPE_ADHOC:
375 *nw_type = ADHOC_NETWORK; 376 *nw_type = ADHOC_NETWORK;
376 break; 377 break;
377 case NL80211_IFTYPE_AP: 378 case NL80211_IFTYPE_AP:
378 *nw_type = AP_NETWORK;
379 break;
380 case NL80211_IFTYPE_P2P_CLIENT:
381 *nw_type = INFRA_NETWORK;
382 break;
383 case NL80211_IFTYPE_P2P_GO: 379 case NL80211_IFTYPE_P2P_GO:
384 *nw_type = AP_NETWORK; 380 *nw_type = AP_NETWORK;
385 break; 381 break;
@@ -1031,30 +1027,15 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy,
1031 1027
1032 vif->scan_req = request; 1028 vif->scan_req = request;
1033 1029
1034 if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, 1030 ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
1035 ar->fw_capabilities)) { 1031 WMI_LONG_SCAN, force_fg_scan,
1036 /* 1032 false, 0,
1037 * If capable of doing P2P mgmt operations using 1033 ATH6KL_FG_SCAN_INTERVAL,
1038 * station interface, send additional information like 1034 n_channels, channels,
1039 * supported rates to advertise and xmit rates for 1035 request->no_cck,
1040 * probe requests 1036 request->rates);
1041 */
1042 ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
1043 WMI_LONG_SCAN, force_fg_scan,
1044 false, 0,
1045 ATH6KL_FG_SCAN_INTERVAL,
1046 n_channels, channels,
1047 request->no_cck,
1048 request->rates);
1049 } else {
1050 ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
1051 WMI_LONG_SCAN, force_fg_scan,
1052 false, 0,
1053 ATH6KL_FG_SCAN_INTERVAL,
1054 n_channels, channels);
1055 }
1056 if (ret) { 1037 if (ret) {
1057 ath6kl_err("wmi_startscan_cmd failed\n"); 1038 ath6kl_err("failed to start scan: %d\n", ret);
1058 vif->scan_req = NULL; 1039 vif->scan_req = NULL;
1059 } 1040 }
1060 1041
@@ -1093,15 +1074,18 @@ out:
1093void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, 1074void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
1094 enum wmi_phy_mode mode) 1075 enum wmi_phy_mode mode)
1095{ 1076{
1096 enum nl80211_channel_type type; 1077 struct cfg80211_chan_def chandef;
1097 1078
1098 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, 1079 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
1099 "channel switch notify nw_type %d freq %d mode %d\n", 1080 "channel switch notify nw_type %d freq %d mode %d\n",
1100 vif->nw_type, freq, mode); 1081 vif->nw_type, freq, mode);
1101 1082
1102 type = (mode == WMI_11G_HT20) ? NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT; 1083 cfg80211_chandef_create(&chandef,
1084 ieee80211_get_channel(vif->ar->wiphy, freq),
1085 (mode == WMI_11G_HT20) ?
1086 NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
1103 1087
1104 cfg80211_ch_switch_notify(vif->ndev, freq, type); 1088 cfg80211_ch_switch_notify(vif->ndev, &chandef);
1105} 1089}
1106 1090
1107static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, 1091static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -1384,11 +1368,8 @@ static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1384 return 0; 1368 return 0;
1385} 1369}
1386 1370
1387/*
1388 * The type nl80211_tx_power_setting replaces the following
1389 * data type from 2.6.36 onwards
1390*/
1391static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy, 1371static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
1372 struct wireless_dev *wdev,
1392 enum nl80211_tx_power_setting type, 1373 enum nl80211_tx_power_setting type,
1393 int mbm) 1374 int mbm)
1394{ 1375{
@@ -1423,7 +1404,9 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
1423 return 0; 1404 return 0;
1424} 1405}
1425 1406
1426static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm) 1407static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy,
1408 struct wireless_dev *wdev,
1409 int *dbm)
1427{ 1410{
1428 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy); 1411 struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
1429 struct ath6kl_vif *vif; 1412 struct ath6kl_vif *vif;
@@ -1614,8 +1597,8 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
1614 vif->ssid_len = ibss_param->ssid_len; 1597 vif->ssid_len = ibss_param->ssid_len;
1615 memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len); 1598 memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
1616 1599
1617 if (ibss_param->channel) 1600 if (ibss_param->chandef.chan)
1618 vif->ch_hint = ibss_param->channel->center_freq; 1601 vif->ch_hint = ibss_param->chandef.chan->center_freq;
1619 1602
1620 if (ibss_param->channel_fixed) { 1603 if (ibss_param->channel_fixed) {
1621 /* 1604 /*
@@ -1889,7 +1872,7 @@ static int ath6kl_wow_usr(struct ath6kl *ar, struct ath6kl_vif *vif,
1889 struct cfg80211_wowlan *wow, u32 *filter) 1872 struct cfg80211_wowlan *wow, u32 *filter)
1890{ 1873{
1891 int ret, pos; 1874 int ret, pos;
1892 u8 mask[WOW_MASK_SIZE]; 1875 u8 mask[WOW_PATTERN_SIZE];
1893 u16 i; 1876 u16 i;
1894 1877
1895 /* Configure the patterns that we received from the user. */ 1878 /* Configure the patterns that we received from the user. */
@@ -2107,33 +2090,16 @@ static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif)
2107 return ret; 2090 return ret;
2108} 2091}
2109 2092
2110static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) 2093static int ath6kl_wow_suspend_vif(struct ath6kl_vif *vif,
2094 struct cfg80211_wowlan *wow, u32 *filter)
2111{ 2095{
2096 struct ath6kl *ar = vif->ar;
2112 struct in_device *in_dev; 2097 struct in_device *in_dev;
2113 struct in_ifaddr *ifa; 2098 struct in_ifaddr *ifa;
2114 struct ath6kl_vif *vif;
2115 int ret; 2099 int ret;
2116 u32 filter = 0;
2117 u16 i, bmiss_time; 2100 u16 i, bmiss_time;
2118 u8 index = 0;
2119 __be32 ips[MAX_IP_ADDRS]; 2101 __be32 ips[MAX_IP_ADDRS];
2120 2102 u8 index = 0;
2121 /* The FW currently can't support multi-vif WoW properly. */
2122 if (ar->num_vif > 1)
2123 return -EIO;
2124
2125 vif = ath6kl_vif_first(ar);
2126 if (!vif)
2127 return -EIO;
2128
2129 if (!ath6kl_cfg80211_ready(vif))
2130 return -EIO;
2131
2132 if (!test_bit(CONNECTED, &vif->flags))
2133 return -ENOTCONN;
2134
2135 if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
2136 return -EINVAL;
2137 2103
2138 if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) && 2104 if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
2139 test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, 2105 test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
@@ -2155,7 +2121,7 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
2155 * the user. 2121 * the user.
2156 */ 2122 */
2157 if (wow) 2123 if (wow)
2158 ret = ath6kl_wow_usr(ar, vif, wow, &filter); 2124 ret = ath6kl_wow_usr(ar, vif, wow, filter);
2159 else if (vif->nw_type == AP_NETWORK) 2125 else if (vif->nw_type == AP_NETWORK)
2160 ret = ath6kl_wow_ap(ar, vif); 2126 ret = ath6kl_wow_ap(ar, vif);
2161 else 2127 else
@@ -2190,12 +2156,10 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
2190 return ret; 2156 return ret;
2191 } 2157 }
2192 2158
2193 ar->state = ATH6KL_STATE_SUSPENDING;
2194
2195 /* Setup own IP addr for ARP agent. */ 2159 /* Setup own IP addr for ARP agent. */
2196 in_dev = __in_dev_get_rtnl(vif->ndev); 2160 in_dev = __in_dev_get_rtnl(vif->ndev);
2197 if (!in_dev) 2161 if (!in_dev)
2198 goto skip_arp; 2162 return 0;
2199 2163
2200 ifa = in_dev->ifa_list; 2164 ifa = in_dev->ifa_list;
2201 memset(&ips, 0, sizeof(ips)); 2165 memset(&ips, 0, sizeof(ips));
@@ -2218,41 +2182,61 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
2218 return ret; 2182 return ret;
2219 } 2183 }
2220 2184
2221skip_arp: 2185 return ret;
2222 ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx, 2186}
2187
2188static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
2189{
2190 struct ath6kl_vif *first_vif, *vif;
2191 int ret = 0;
2192 u32 filter = 0;
2193 bool connected = false;
2194
2195 /* enter / leave wow suspend on first vif always */
2196 first_vif = ath6kl_vif_first(ar);
2197 if (WARN_ON(unlikely(!first_vif)) ||
2198 !ath6kl_cfg80211_ready(first_vif))
2199 return -EIO;
2200
2201 if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
2202 return -EINVAL;
2203
2204 /* install filters for each connected vif */
2205 spin_lock_bh(&ar->list_lock);
2206 list_for_each_entry(vif, &ar->vif_list, list) {
2207 if (!test_bit(CONNECTED, &vif->flags) ||
2208 !ath6kl_cfg80211_ready(vif))
2209 continue;
2210 connected = true;
2211
2212 ret = ath6kl_wow_suspend_vif(vif, wow, &filter);
2213 if (ret)
2214 break;
2215 }
2216 spin_unlock_bh(&ar->list_lock);
2217
2218 if (!connected)
2219 return -ENOTCONN;
2220 else if (ret)
2221 return ret;
2222
2223 ar->state = ATH6KL_STATE_SUSPENDING;
2224
2225 ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, first_vif->fw_vif_idx,
2223 ATH6KL_WOW_MODE_ENABLE, 2226 ATH6KL_WOW_MODE_ENABLE,
2224 filter, 2227 filter,
2225 WOW_HOST_REQ_DELAY); 2228 WOW_HOST_REQ_DELAY);
2226 if (ret) 2229 if (ret)
2227 return ret; 2230 return ret;
2228 2231
2229 ret = ath6kl_cfg80211_host_sleep(ar, vif); 2232 return ath6kl_cfg80211_host_sleep(ar, first_vif);
2230 if (ret)
2231 return ret;
2232
2233 return 0;
2234} 2233}
2235 2234
2236static int ath6kl_wow_resume(struct ath6kl *ar) 2235static int ath6kl_wow_resume_vif(struct ath6kl_vif *vif)
2237{ 2236{
2238 struct ath6kl_vif *vif; 2237 struct ath6kl *ar = vif->ar;
2239 int ret; 2238 int ret;
2240 2239
2241 vif = ath6kl_vif_first(ar);
2242 if (!vif)
2243 return -EIO;
2244
2245 ar->state = ATH6KL_STATE_RESUMING;
2246
2247 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
2248 ATH6KL_HOST_MODE_AWAKE);
2249 if (ret) {
2250 ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
2251 ret);
2252 ar->state = ATH6KL_STATE_WOW;
2253 return ret;
2254 }
2255
2256 if (vif->nw_type != AP_NETWORK) { 2240 if (vif->nw_type != AP_NETWORK) {
2257 ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 2241 ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
2258 0, 0, 0, 0, 0, 0, 3, 0, 0, 0); 2242 0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
@@ -2270,13 +2254,11 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
2270 return ret; 2254 return ret;
2271 } 2255 }
2272 2256
2273 ar->state = ATH6KL_STATE_ON;
2274
2275 if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) && 2257 if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
2276 test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, 2258 test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
2277 ar->fw_capabilities)) { 2259 ar->fw_capabilities)) {
2278 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi, 2260 ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
2279 vif->fw_vif_idx, true); 2261 vif->fw_vif_idx, true);
2280 if (ret) 2262 if (ret)
2281 return ret; 2263 return ret;
2282 } 2264 }
@@ -2286,6 +2268,48 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
2286 return 0; 2268 return 0;
2287} 2269}
2288 2270
2271static int ath6kl_wow_resume(struct ath6kl *ar)
2272{
2273 struct ath6kl_vif *vif;
2274 int ret;
2275
2276 vif = ath6kl_vif_first(ar);
2277 if (WARN_ON(unlikely(!vif)) ||
2278 !ath6kl_cfg80211_ready(vif))
2279 return -EIO;
2280
2281 ar->state = ATH6KL_STATE_RESUMING;
2282
2283 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
2284 ATH6KL_HOST_MODE_AWAKE);
2285 if (ret) {
2286 ath6kl_warn("Failed to configure host sleep mode for wow resume: %d\n",
2287 ret);
2288 goto cleanup;
2289 }
2290
2291 spin_lock_bh(&ar->list_lock);
2292 list_for_each_entry(vif, &ar->vif_list, list) {
2293 if (!test_bit(CONNECTED, &vif->flags) ||
2294 !ath6kl_cfg80211_ready(vif))
2295 continue;
2296 ret = ath6kl_wow_resume_vif(vif);
2297 if (ret)
2298 break;
2299 }
2300 spin_unlock_bh(&ar->list_lock);
2301
2302 if (ret)
2303 goto cleanup;
2304
2305 ar->state = ATH6KL_STATE_ON;
2306 return 0;
2307
2308cleanup:
2309 ar->state = ATH6KL_STATE_WOW;
2310 return ret;
2311}
2312
2289static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar) 2313static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
2290{ 2314{
2291 struct ath6kl_vif *vif; 2315 struct ath6kl_vif *vif;
@@ -2422,13 +2446,6 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
2422 2446
2423 break; 2447 break;
2424 2448
2425 case ATH6KL_CFG_SUSPEND_SCHED_SCAN:
2426 /*
2427 * Nothing needed for schedule scan, firmware is already in
2428 * wow mode and sleeping most of the time.
2429 */
2430 break;
2431
2432 default: 2449 default:
2433 break; 2450 break;
2434 } 2451 }
@@ -2476,9 +2493,6 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
2476 } 2493 }
2477 break; 2494 break;
2478 2495
2479 case ATH6KL_STATE_SCHED_SCAN:
2480 break;
2481
2482 default: 2496 default:
2483 break; 2497 break;
2484 } 2498 }
@@ -2495,14 +2509,23 @@ static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
2495{ 2509{
2496 struct ath6kl *ar = wiphy_priv(wiphy); 2510 struct ath6kl *ar = wiphy_priv(wiphy);
2497 2511
2512 ath6kl_recovery_suspend(ar);
2513
2498 return ath6kl_hif_suspend(ar, wow); 2514 return ath6kl_hif_suspend(ar, wow);
2499} 2515}
2500 2516
2501static int __ath6kl_cfg80211_resume(struct wiphy *wiphy) 2517static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
2502{ 2518{
2503 struct ath6kl *ar = wiphy_priv(wiphy); 2519 struct ath6kl *ar = wiphy_priv(wiphy);
2520 int err;
2504 2521
2505 return ath6kl_hif_resume(ar); 2522 err = ath6kl_hif_resume(ar);
2523 if (err)
2524 return err;
2525
2526 ath6kl_recovery_resume(ar);
2527
2528 return 0;
2506} 2529}
2507 2530
2508/* 2531/*
@@ -2739,6 +2762,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2739 int res; 2762 int res;
2740 int i, ret; 2763 int i, ret;
2741 u16 rsn_capab = 0; 2764 u16 rsn_capab = 0;
2765 int inactivity_timeout = 0;
2742 2766
2743 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__); 2767 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
2744 2768
@@ -2857,7 +2881,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2857 p.ssid_len = vif->ssid_len; 2881 p.ssid_len = vif->ssid_len;
2858 memcpy(p.ssid, vif->ssid, vif->ssid_len); 2882 memcpy(p.ssid, vif->ssid, vif->ssid_len);
2859 p.dot11_auth_mode = vif->dot11_auth_mode; 2883 p.dot11_auth_mode = vif->dot11_auth_mode;
2860 p.ch = cpu_to_le16(info->channel->center_freq); 2884 p.ch = cpu_to_le16(info->chandef.chan->center_freq);
2861 2885
2862 /* Enable uAPSD support by default */ 2886 /* Enable uAPSD support by default */
2863 res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true); 2887 res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
@@ -2875,14 +2899,22 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2875 } 2899 }
2876 2900
2877 if (info->inactivity_timeout) { 2901 if (info->inactivity_timeout) {
2902
2903 inactivity_timeout = info->inactivity_timeout;
2904
2905 if (ar->hw.flags & ATH6KL_HW_AP_INACTIVITY_MINS)
2906 inactivity_timeout = DIV_ROUND_UP(inactivity_timeout,
2907 60);
2908
2878 res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx, 2909 res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
2879 info->inactivity_timeout); 2910 inactivity_timeout);
2880 if (res < 0) 2911 if (res < 0)
2881 return res; 2912 return res;
2882 } 2913 }
2883 2914
2884 if (ath6kl_set_htcap(vif, info->channel->band, 2915 if (ath6kl_set_htcap(vif, info->chandef.chan->band,
2885 info->channel_type != NL80211_CHAN_NO_HT)) 2916 cfg80211_get_chandef_type(&info->chandef)
2917 != NL80211_CHAN_NO_HT))
2886 return -EIO; 2918 return -EIO;
2887 2919
2888 /* 2920 /*
@@ -2898,6 +2930,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2898 WLAN_EID_RSN, WMI_RSN_IE_CAPB, 2930 WLAN_EID_RSN, WMI_RSN_IE_CAPB,
2899 (const u8 *) &rsn_capab, 2931 (const u8 *) &rsn_capab,
2900 sizeof(rsn_capab)); 2932 sizeof(rsn_capab));
2933 vif->rsn_capab = rsn_capab;
2901 if (res < 0) 2934 if (res < 0)
2902 return res; 2935 return res;
2903 } 2936 }
@@ -2977,7 +3010,6 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
2977static int ath6kl_remain_on_channel(struct wiphy *wiphy, 3010static int ath6kl_remain_on_channel(struct wiphy *wiphy,
2978 struct wireless_dev *wdev, 3011 struct wireless_dev *wdev,
2979 struct ieee80211_channel *chan, 3012 struct ieee80211_channel *chan,
2980 enum nl80211_channel_type channel_type,
2981 unsigned int duration, 3013 unsigned int duration,
2982 u64 *cookie) 3014 u64 *cookie)
2983{ 3015{
@@ -3136,10 +3168,8 @@ static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
3136 3168
3137static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 3169static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
3138 struct ieee80211_channel *chan, bool offchan, 3170 struct ieee80211_channel *chan, bool offchan,
3139 enum nl80211_channel_type channel_type, 3171 unsigned int wait, const u8 *buf, size_t len,
3140 bool channel_type_valid, unsigned int wait, 3172 bool no_cck, bool dont_wait_for_ack, u64 *cookie)
3141 const u8 *buf, size_t len, bool no_cck,
3142 bool dont_wait_for_ack, u64 *cookie)
3143{ 3173{
3144 struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev); 3174 struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
3145 struct ath6kl *ar = ath6kl_priv(vif->ndev); 3175 struct ath6kl *ar = ath6kl_priv(vif->ndev);
@@ -3211,7 +3241,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3211 struct ath6kl *ar = ath6kl_priv(dev); 3241 struct ath6kl *ar = ath6kl_priv(dev);
3212 struct ath6kl_vif *vif = netdev_priv(dev); 3242 struct ath6kl_vif *vif = netdev_priv(dev);
3213 u16 interval; 3243 u16 interval;
3214 int ret; 3244 int ret, rssi_thold;
3215 3245
3216 if (ar->state != ATH6KL_STATE_ON) 3246 if (ar->state != ATH6KL_STATE_ON)
3217 return -EIO; 3247 return -EIO;
@@ -3219,10 +3249,6 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3219 if (vif->sme_state != SME_DISCONNECTED) 3249 if (vif->sme_state != SME_DISCONNECTED)
3220 return -EBUSY; 3250 return -EBUSY;
3221 3251
3222 /* The FW currently can't support multi-vif WoW properly. */
3223 if (ar->num_vif > 1)
3224 return -EIO;
3225
3226 ath6kl_cfg80211_scan_complete_event(vif, true); 3252 ath6kl_cfg80211_scan_complete_event(vif, true);
3227 3253
3228 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids, 3254 ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
@@ -3244,6 +3270,23 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3244 return ret; 3270 return ret;
3245 } 3271 }
3246 3272
3273 if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
3274 ar->fw_capabilities)) {
3275 if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
3276 rssi_thold = 0;
3277 else if (request->rssi_thold < -127)
3278 rssi_thold = -127;
3279 else
3280 rssi_thold = request->rssi_thold;
3281
3282 ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
3283 rssi_thold);
3284 if (ret) {
3285 ath6kl_err("failed to set RSSI threshold for scan\n");
3286 return ret;
3287 }
3288 }
3289
3247 /* fw uses seconds, also make sure that it's >0 */ 3290 /* fw uses seconds, also make sure that it's >0 */
3248 interval = max_t(u16, 1, request->interval / 1000); 3291 interval = max_t(u16, 1, request->interval / 1000);
3249 3292
@@ -3251,15 +3294,6 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3251 interval, interval, 3294 interval, interval,
3252 vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0); 3295 vif->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
3253 3296
3254 ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
3255 ATH6KL_WOW_MODE_ENABLE,
3256 WOW_FILTER_SSID,
3257 WOW_HOST_REQ_DELAY);
3258 if (ret) {
3259 ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret);
3260 return ret;
3261 }
3262
3263 /* this also clears IE in fw if it's not set */ 3297 /* this also clears IE in fw if it's not set */
3264 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx, 3298 ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
3265 WMI_FRAME_PROBE_REQ, 3299 WMI_FRAME_PROBE_REQ,
@@ -3270,17 +3304,13 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
3270 return ret; 3304 return ret;
3271 } 3305 }
3272 3306
3273 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx, 3307 ret = ath6kl_wmi_enable_sched_scan_cmd(ar->wmi, vif->fw_vif_idx, true);
3274 ATH6KL_HOST_MODE_ASLEEP); 3308 if (ret)
3275 if (ret) {
3276 ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n",
3277 ret);
3278 return ret; 3309 return ret;
3279 }
3280 3310
3281 ar->state = ATH6KL_STATE_SCHED_SCAN; 3311 set_bit(SCHED_SCANNING, &vif->flags);
3282 3312
3283 return ret; 3313 return 0;
3284} 3314}
3285 3315
3286static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy, 3316static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
@@ -3309,6 +3339,27 @@ static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
3309 mask); 3339 mask);
3310} 3340}
3311 3341
3342static int ath6kl_cfg80211_set_txe_config(struct wiphy *wiphy,
3343 struct net_device *dev,
3344 u32 rate, u32 pkts, u32 intvl)
3345{
3346 struct ath6kl *ar = ath6kl_priv(dev);
3347 struct ath6kl_vif *vif = netdev_priv(dev);
3348
3349 if (vif->nw_type != INFRA_NETWORK ||
3350 !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, ar->fw_capabilities))
3351 return -EOPNOTSUPP;
3352
3353 if (vif->sme_state != SME_CONNECTED)
3354 return -ENOTCONN;
3355
3356 /* save this since the firmware won't report the interval */
3357 vif->txe_intvl = intvl;
3358
3359 return ath6kl_wmi_set_txe_notify(ar->wmi, vif->fw_vif_idx,
3360 rate, pkts, intvl);
3361}
3362
3312static const struct ieee80211_txrx_stypes 3363static const struct ieee80211_txrx_stypes
3313ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = { 3364ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
3314 [NL80211_IFTYPE_STATION] = { 3365 [NL80211_IFTYPE_STATION] = {
@@ -3375,6 +3426,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
3375 .sched_scan_start = ath6kl_cfg80211_sscan_start, 3426 .sched_scan_start = ath6kl_cfg80211_sscan_start,
3376 .sched_scan_stop = ath6kl_cfg80211_sscan_stop, 3427 .sched_scan_stop = ath6kl_cfg80211_sscan_stop,
3377 .set_bitrate_mask = ath6kl_cfg80211_set_bitrate, 3428 .set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
3429 .set_cqm_txe_config = ath6kl_cfg80211_set_txe_config,
3378}; 3430};
3379 3431
3380void ath6kl_cfg80211_stop(struct ath6kl_vif *vif) 3432void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
@@ -3395,16 +3447,22 @@ void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
3395 break; 3447 break;
3396 } 3448 }
3397 3449
3398 if (test_bit(CONNECTED, &vif->flags) || 3450 if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
3399 test_bit(CONNECT_PEND, &vif->flags)) 3451 (test_bit(CONNECTED, &vif->flags) ||
3452 test_bit(CONNECT_PEND, &vif->flags)))
3400 ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx); 3453 ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
3401 3454
3402 vif->sme_state = SME_DISCONNECTED; 3455 vif->sme_state = SME_DISCONNECTED;
3403 clear_bit(CONNECTED, &vif->flags); 3456 clear_bit(CONNECTED, &vif->flags);
3404 clear_bit(CONNECT_PEND, &vif->flags); 3457 clear_bit(CONNECT_PEND, &vif->flags);
3405 3458
3459 /* Stop netdev queues, needed during recovery */
3460 netif_stop_queue(vif->ndev);
3461 netif_carrier_off(vif->ndev);
3462
3406 /* disable scanning */ 3463 /* disable scanning */
3407 if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF, 3464 if (vif->ar->state != ATH6KL_STATE_RECOVERY &&
3465 ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
3408 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0) 3466 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
3409 ath6kl_warn("failed to disable scan during stop\n"); 3467 ath6kl_warn("failed to disable scan during stop\n");
3410 3468
@@ -3416,7 +3474,7 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
3416 struct ath6kl_vif *vif; 3474 struct ath6kl_vif *vif;
3417 3475
3418 vif = ath6kl_vif_first(ar); 3476 vif = ath6kl_vif_first(ar);
3419 if (!vif) { 3477 if (!vif && ar->state != ATH6KL_STATE_RECOVERY) {
3420 /* save the current power mode before enabling power save */ 3478 /* save the current power mode before enabling power save */
3421 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode; 3479 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
3422 3480
@@ -3434,6 +3492,56 @@ void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
3434 ath6kl_cfg80211_stop(vif); 3492 ath6kl_cfg80211_stop(vif);
3435} 3493}
3436 3494
3495static int ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
3496 struct regulatory_request *request)
3497{
3498 struct ath6kl *ar = wiphy_priv(wiphy);
3499 u32 rates[IEEE80211_NUM_BANDS];
3500 int ret, i;
3501
3502 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
3503 "cfg reg_notify %c%c%s%s initiator %d hint_type %d\n",
3504 request->alpha2[0], request->alpha2[1],
3505 request->intersect ? " intersect" : "",
3506 request->processed ? " processed" : "",
3507 request->initiator, request->user_reg_hint_type);
3508
3509 /*
3510 * As firmware is not able intersect regdoms, we can only listen to
3511 * cellular hints.
3512 */
3513 if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
3514 return -EOPNOTSUPP;
3515
3516 ret = ath6kl_wmi_set_regdomain_cmd(ar->wmi, request->alpha2);
3517 if (ret) {
3518 ath6kl_err("failed to set regdomain: %d\n", ret);
3519 return ret;
3520 }
3521
3522 /*
3523 * Firmware will apply the regdomain change only after a scan is
3524 * issued and it will send a WMI_REGDOMAIN_EVENTID when it has been
3525 * changed.
3526 */
3527
3528 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
3529 if (wiphy->bands[i])
3530 rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
3531
3532
3533 ret = ath6kl_wmi_beginscan_cmd(ar->wmi, 0, WMI_LONG_SCAN, false,
3534 false, 0, ATH6KL_FG_SCAN_INTERVAL,
3535 0, NULL, false, rates);
3536 if (ret) {
3537 ath6kl_err("failed to start scan for a regdomain change: %d\n",
3538 ret);
3539 return ret;
3540 }
3541
3542 return 0;
3543}
3544
3437static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif) 3545static int ath6kl_cfg80211_vif_init(struct ath6kl_vif *vif)
3438{ 3546{
3439 vif->aggr_cntxt = aggr_init(vif); 3547 vif->aggr_cntxt = aggr_init(vif);
@@ -3506,9 +3614,13 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
3506 vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true; 3614 vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
3507 3615
3508 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); 3616 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
3509 if (fw_vif_idx != 0) 3617 if (fw_vif_idx != 0) {
3510 ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) | 3618 ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
3511 0x2; 3619 0x2;
3620 if (test_bit(ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
3621 ar->fw_capabilities))
3622 ndev->dev_addr[4] ^= 0x80;
3623 }
3512 3624
3513 init_netdev(ndev); 3625 init_netdev(ndev);
3514 3626
@@ -3562,6 +3674,12 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3562 BIT(NL80211_IFTYPE_P2P_CLIENT); 3674 BIT(NL80211_IFTYPE_P2P_CLIENT);
3563 } 3675 }
3564 3676
3677 if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) &&
3678 test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) {
3679 wiphy->reg_notifier = ath6kl_cfg80211_reg_notify;
3680 ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS;
3681 }
3682
3565 /* max num of ssids that can be probed during scanning */ 3683 /* max num of ssids that can be probed during scanning */
3566 wiphy->max_scan_ssids = MAX_PROBED_SSIDS; 3684 wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
3567 3685
@@ -3607,7 +3725,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3607 ath6kl_band_5ghz.ht_cap.ht_supported = false; 3725 ath6kl_band_5ghz.ht_cap.ht_supported = false;
3608 } 3726 }
3609 3727
3610 if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES) { 3728 if (ar->hw.flags & ATH6KL_HW_64BIT_RATES) {
3611 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff; 3729 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3612 ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff; 3730 ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
3613 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff; 3731 ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
@@ -3646,12 +3764,12 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3646 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | 3764 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
3647 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 3765 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
3648 3766
3649 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities)) 3767 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, ar->fw_capabilities))
3650 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 3768 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
3651 3769
3652 if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, 3770 if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
3653 ar->fw_capabilities)) 3771 ar->fw_capabilities))
3654 ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER; 3772 ar->wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER;
3655 3773
3656 ar->wiphy->probe_resp_offload = 3774 ar->wiphy->probe_resp_offload =
3657 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 3775 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 780f77775a91..e5e70f3a8ca8 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -22,7 +22,6 @@ enum ath6kl_cfg_suspend_mode {
22 ATH6KL_CFG_SUSPEND_DEEPSLEEP, 22 ATH6KL_CFG_SUSPEND_DEEPSLEEP,
23 ATH6KL_CFG_SUSPEND_CUTPOWER, 23 ATH6KL_CFG_SUSPEND_CUTPOWER,
24 ATH6KL_CFG_SUSPEND_WOW, 24 ATH6KL_CFG_SUSPEND_WOW,
25 ATH6KL_CFG_SUSPEND_SCHED_SCAN,
26}; 25};
27 26
28struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name, 27struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 82c4dd2a960e..4b46adbe8c92 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -33,6 +33,8 @@ static unsigned int wow_mode;
33static unsigned int uart_debug; 33static unsigned int uart_debug;
34static unsigned int ath6kl_p2p; 34static unsigned int ath6kl_p2p;
35static unsigned int testmode; 35static unsigned int testmode;
36static unsigned int recovery_enable;
37static unsigned int heart_beat_poll;
36 38
37module_param(debug_mask, uint, 0644); 39module_param(debug_mask, uint, 0644);
38module_param(suspend_mode, uint, 0644); 40module_param(suspend_mode, uint, 0644);
@@ -40,6 +42,12 @@ module_param(wow_mode, uint, 0644);
40module_param(uart_debug, uint, 0644); 42module_param(uart_debug, uint, 0644);
41module_param(ath6kl_p2p, uint, 0644); 43module_param(ath6kl_p2p, uint, 0644);
42module_param(testmode, uint, 0644); 44module_param(testmode, uint, 0644);
45module_param(recovery_enable, uint, 0644);
46module_param(heart_beat_poll, uint, 0644);
47MODULE_PARM_DESC(recovery_enable, "Enable recovery from firmware error");
48MODULE_PARM_DESC(heart_beat_poll, "Enable fw error detection periodic" \
49 "polling. This also specifies the polling interval in" \
50 "msecs. Set reocvery_enable for this to be effective");
43 51
44void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb) 52void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
45{ 53{
@@ -202,6 +210,17 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
202 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n", 210 ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
203 __func__, wdev->netdev->name, wdev->netdev, ar); 211 __func__, wdev->netdev->name, wdev->netdev, ar);
204 212
213 ar->fw_recovery.enable = !!recovery_enable;
214 if (!ar->fw_recovery.enable)
215 return ret;
216
217 if (heart_beat_poll &&
218 test_bit(ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
219 ar->fw_capabilities))
220 ar->fw_recovery.hb_poll = heart_beat_poll;
221
222 ath6kl_recovery_init(ar);
223
205 return ret; 224 return ret;
206 225
207err_rxbuf_cleanup: 226err_rxbuf_cleanup:
@@ -291,6 +310,8 @@ void ath6kl_core_cleanup(struct ath6kl *ar)
291{ 310{
292 ath6kl_hif_power_off(ar); 311 ath6kl_hif_power_off(ar);
293 312
313 ath6kl_recovery_cleanup(ar);
314
294 destroy_workqueue(ar->ath6kl_wq); 315 destroy_workqueue(ar->ath6kl_wq);
295 316
296 if (ar->htc_target) 317 if (ar->htc_target)
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index cec49a31029a..189d8faf8c87 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -115,6 +115,27 @@ enum ath6kl_fw_capability {
115 */ 115 */
116 ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, 116 ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
117 117
118 /* Firmware supports filtering BSS results by RSSI */
119 ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
120
121 /* FW sets mac_addr[4] ^= 0x80 for newly created interfaces */
122 ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
123
124 /* Firmware supports TX error rate notification */
125 ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
126
127 /* supports WMI_SET_REGDOMAIN_CMDID command */
128 ATH6KL_FW_CAPABILITY_REGDOMAIN,
129
130 /* Firmware supports sched scan decoupled from host sleep */
131 ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2,
132
133 /*
134 * Firmware capability for hang detection through heart beat
135 * challenge messages.
136 */
137 ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL,
138
118 /* this needs to be last */ 139 /* this needs to be last */
119 ATH6KL_FW_CAPABILITY_MAX, 140 ATH6KL_FW_CAPABILITY_MAX,
120}; 141};
@@ -128,11 +149,15 @@ struct ath6kl_fw_ie {
128}; 149};
129 150
130enum ath6kl_hw_flags { 151enum ath6kl_hw_flags {
131 ATH6KL_HW_FLAG_64BIT_RATES = BIT(0), 152 ATH6KL_HW_64BIT_RATES = BIT(0),
153 ATH6KL_HW_AP_INACTIVITY_MINS = BIT(1),
154 ATH6KL_HW_MAP_LP_ENDPOINT = BIT(2),
155 ATH6KL_HW_SDIO_CRC_ERROR_WAR = BIT(3),
132}; 156};
133 157
134#define ATH6KL_FW_API2_FILE "fw-2.bin" 158#define ATH6KL_FW_API2_FILE "fw-2.bin"
135#define ATH6KL_FW_API3_FILE "fw-3.bin" 159#define ATH6KL_FW_API3_FILE "fw-3.bin"
160#define ATH6KL_FW_API4_FILE "fw-4.bin"
136 161
137/* AR6003 1.0 definitions */ 162/* AR6003 1.0 definitions */
138#define AR6003_HW_1_0_VERSION 0x300002ba 163#define AR6003_HW_1_0_VERSION 0x300002ba
@@ -186,6 +211,13 @@ enum ath6kl_hw_flags {
186#define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \ 211#define AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE \
187 AR6004_HW_1_2_FW_DIR "/bdata.bin" 212 AR6004_HW_1_2_FW_DIR "/bdata.bin"
188 213
214/* AR6004 1.3 definitions */
215#define AR6004_HW_1_3_VERSION 0x31c8088a
216#define AR6004_HW_1_3_FW_DIR "ath6k/AR6004/hw1.3"
217#define AR6004_HW_1_3_FIRMWARE_FILE "fw.ram.bin"
218#define AR6004_HW_1_3_BOARD_DATA_FILE "ath6k/AR6004/hw1.3/bdata.bin"
219#define AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw1.3/bdata.bin"
220
189/* Per STA data, used in AP mode */ 221/* Per STA data, used in AP mode */
190#define STA_PS_AWAKE BIT(0) 222#define STA_PS_AWAKE BIT(0)
191#define STA_PS_SLEEP BIT(1) 223#define STA_PS_SLEEP BIT(1)
@@ -536,6 +568,7 @@ enum ath6kl_vif_state {
536 HOST_SLEEP_MODE_CMD_PROCESSED, 568 HOST_SLEEP_MODE_CMD_PROCESSED,
537 NETDEV_MCAST_ALL_ON, 569 NETDEV_MCAST_ALL_ON,
538 NETDEV_MCAST_ALL_OFF, 570 NETDEV_MCAST_ALL_OFF,
571 SCHED_SCANNING,
539}; 572};
540 573
541struct ath6kl_vif { 574struct ath6kl_vif {
@@ -580,11 +613,13 @@ struct ath6kl_vif {
580 u16 assoc_bss_beacon_int; 613 u16 assoc_bss_beacon_int;
581 u16 listen_intvl_t; 614 u16 listen_intvl_t;
582 u16 bmiss_time_t; 615 u16 bmiss_time_t;
616 u32 txe_intvl;
583 u16 bg_scan_period; 617 u16 bg_scan_period;
584 u8 assoc_bss_dtim_period; 618 u8 assoc_bss_dtim_period;
585 struct net_device_stats net_stats; 619 struct net_device_stats net_stats;
586 struct target_stats target_stats; 620 struct target_stats target_stats;
587 struct wmi_connect_cmd profile; 621 struct wmi_connect_cmd profile;
622 u16 rsn_capab;
588 623
589 struct list_head mc_filter; 624 struct list_head mc_filter;
590}; 625};
@@ -609,6 +644,7 @@ enum ath6kl_dev_state {
609 SKIP_SCAN, 644 SKIP_SCAN,
610 ROAM_TBL_PEND, 645 ROAM_TBL_PEND,
611 FIRST_BOOT, 646 FIRST_BOOT,
647 RECOVERY_CLEANUP,
612}; 648};
613 649
614enum ath6kl_state { 650enum ath6kl_state {
@@ -619,7 +655,16 @@ enum ath6kl_state {
619 ATH6KL_STATE_DEEPSLEEP, 655 ATH6KL_STATE_DEEPSLEEP,
620 ATH6KL_STATE_CUTPOWER, 656 ATH6KL_STATE_CUTPOWER,
621 ATH6KL_STATE_WOW, 657 ATH6KL_STATE_WOW,
622 ATH6KL_STATE_SCHED_SCAN, 658 ATH6KL_STATE_RECOVERY,
659};
660
661/* Fw error recovery */
662#define ATH6KL_HB_RESP_MISS_THRES 5
663
664enum ath6kl_fw_err {
665 ATH6KL_FW_ASSERT,
666 ATH6KL_FW_HB_RESP_FAILURE,
667 ATH6KL_FW_EP_FULL,
623}; 668};
624 669
625struct ath6kl { 670struct ath6kl {
@@ -679,6 +724,7 @@ struct ath6kl {
679 struct ath6kl_req_key ap_mode_bkey; 724 struct ath6kl_req_key ap_mode_bkey;
680 struct sk_buff_head mcastpsq; 725 struct sk_buff_head mcastpsq;
681 u32 want_ch_switch; 726 u32 want_ch_switch;
727 u16 last_ch;
682 728
683 /* 729 /*
684 * FIXME: protects access to mcastpsq but is actually useless as 730 * FIXME: protects access to mcastpsq but is actually useless as
@@ -764,6 +810,17 @@ struct ath6kl {
764 810
765 bool wiphy_registered; 811 bool wiphy_registered;
766 812
813 struct ath6kl_fw_recovery {
814 struct work_struct recovery_work;
815 unsigned long err_reason;
816 unsigned long hb_poll;
817 struct timer_list hb_timer;
818 u32 seq_num;
819 bool hb_pending;
820 u8 hb_misscnt;
821 bool enable;
822 } fw_recovery;
823
767#ifdef CONFIG_ATH6KL_DEBUG 824#ifdef CONFIG_ATH6KL_DEBUG
768 struct { 825 struct {
769 struct sk_buff_head fwlog_queue; 826 struct sk_buff_head fwlog_queue;
@@ -899,4 +956,12 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
899void ath6kl_core_cleanup(struct ath6kl *ar); 956void ath6kl_core_cleanup(struct ath6kl *ar);
900void ath6kl_core_destroy(struct ath6kl *ar); 957void ath6kl_core_destroy(struct ath6kl *ar);
901 958
959/* Fw error recovery */
960void ath6kl_init_hw_restart(struct ath6kl *ar);
961void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason);
962void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie);
963void ath6kl_recovery_init(struct ath6kl *ar);
964void ath6kl_recovery_cleanup(struct ath6kl *ar);
965void ath6kl_recovery_suspend(struct ath6kl *ar);
966void ath6kl_recovery_resume(struct ath6kl *ar);
902#endif /* CORE_H */ 967#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 49639d8266c2..f97cd4ead543 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -44,6 +44,7 @@ enum ATH6K_DEBUG_MASK {
44 ATH6KL_DBG_SUSPEND = BIT(20), 44 ATH6KL_DBG_SUSPEND = BIT(20),
45 ATH6KL_DBG_USB = BIT(21), 45 ATH6KL_DBG_USB = BIT(21),
46 ATH6KL_DBG_USB_BULK = BIT(22), 46 ATH6KL_DBG_USB_BULK = BIT(22),
47 ATH6KL_DBG_RECOVERY = BIT(23),
47 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ 48 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
48}; 49};
49 50
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 68ed6c2665b7..a6b614421fa4 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -136,6 +136,7 @@ static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
136 136
137 ath6kl_hif_dump_fw_crash(dev->ar); 137 ath6kl_hif_dump_fw_crash(dev->ar);
138 ath6kl_read_fwlogs(dev->ar); 138 ath6kl_read_fwlogs(dev->ar);
139 ath6kl_recovery_err_notify(dev->ar, ATH6KL_FW_ASSERT);
139 140
140 return ret; 141 return ret;
141} 142}
@@ -338,8 +339,7 @@ static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
338 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS, 339 status = hif_read_write_sync(dev->ar, ERROR_INT_STATUS_ADDRESS,
339 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); 340 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
340 341
341 if (status) 342 WARN_ON(status);
342 WARN_ON(1);
343 343
344 return status; 344 return status;
345} 345}
@@ -383,8 +383,7 @@ static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
383 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS, 383 status = hif_read_write_sync(dev->ar, CPU_INT_STATUS_ADDRESS,
384 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX); 384 reg_buf, 4, HIF_WR_SYNC_BYTE_FIX);
385 385
386 if (status) 386 WARN_ON(status);
387 WARN_ON(1);
388 387
389 return status; 388 return status;
390} 389}
@@ -695,11 +694,6 @@ int ath6kl_hif_setup(struct ath6kl_device *dev)
695 ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n", 694 ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
696 dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr); 695 dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
697 696
698 /* usb doesn't support enabling interrupts */
699 /* FIXME: remove check once USB support is implemented */
700 if (dev->ar->hif_type == ATH6KL_HIF_TYPE_USB)
701 return 0;
702
703 status = ath6kl_hif_disable_intrs(dev); 697 status = ath6kl_hif_disable_intrs(dev);
704 698
705fail_setup: 699fail_setup:
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index cd0e1ba410d6..fbb78dfe078f 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -2492,7 +2492,8 @@ static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
2492 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz); 2492 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz);
2493 } 2493 }
2494 2494
2495 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) { 2495 if (WARN_ON_ONCE(assigned_ep == ENDPOINT_UNUSED ||
2496 assigned_ep >= ENDPOINT_MAX || !max_msg_sz)) {
2496 status = -ENOMEM; 2497 status = -ENOMEM;
2497 goto fail_tx; 2498 goto fail_tx;
2498 } 2499 }
@@ -2655,12 +2656,6 @@ static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
2655 struct htc_service_connect_resp resp; 2656 struct htc_service_connect_resp resp;
2656 int status; 2657 int status;
2657 2658
2658 /* FIXME: remove once USB support is implemented */
2659 if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) {
2660 ath6kl_err("HTC doesn't support USB yet. Patience!\n");
2661 return -EOPNOTSUPP;
2662 }
2663
2664 /* we should be getting 1 control message that the target is ready */ 2659 /* we should be getting 1 control message that the target is ready */
2665 packet = htc_wait_for_ctrl_msg(target); 2660 packet = htc_wait_for_ctrl_msg(target);
2666 2661
@@ -2890,9 +2885,7 @@ static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
2890{ 2885{
2891 struct htc_packet *packet, *tmp_packet; 2886 struct htc_packet *packet, *tmp_packet;
2892 2887
2893 /* FIXME: remove check once USB support is implemented */ 2888 ath6kl_hif_cleanup_scatter(target->dev->ar);
2894 if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB)
2895 ath6kl_hif_cleanup_scatter(target->dev->ar);
2896 2889
2897 list_for_each_entry_safe(packet, tmp_packet, 2890 list_for_each_entry_safe(packet, tmp_packet,
2898 &target->free_ctrl_txbuf, list) { 2891 &target->free_ctrl_txbuf, list) {
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index f9626c723693..ba6bd497b787 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -374,9 +374,8 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
374 packet = list_first_entry(txq, 374 packet = list_first_entry(txq,
375 struct htc_packet, 375 struct htc_packet,
376 list); 376 list);
377 list_del(&packet->list); 377 /* move to local queue */
378 /* insert into local queue */ 378 list_move_tail(&packet->list, &send_queue);
379 list_add_tail(&packet->list, &send_queue);
380 } 379 }
381 380
382 /* 381 /*
@@ -399,11 +398,10 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
399 * for cleanup */ 398 * for cleanup */
400 } else { 399 } else {
401 /* callback wants to keep this packet, 400 /* callback wants to keep this packet,
402 * remove from caller's queue */ 401 * move from caller's queue to the send
403 list_del(&packet->list); 402 * queue */
404 /* put it in the send queue */ 403 list_move_tail(&packet->list,
405 list_add_tail(&packet->list, 404 &send_queue);
406 &send_queue);
407 } 405 }
408 406
409 } 407 }
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index f90b5db741cf..f21fa322e5ca 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -42,7 +42,7 @@ static const struct ath6kl_hw hw_list[] = {
42 .reserved_ram_size = 6912, 42 .reserved_ram_size = 6912,
43 .refclk_hz = 26000000, 43 .refclk_hz = 26000000,
44 .uarttx_pin = 8, 44 .uarttx_pin = 8,
45 .flags = 0, 45 .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR,
46 46
47 /* hw2.0 needs override address hardcoded */ 47 /* hw2.0 needs override address hardcoded */
48 .app_start_override_addr = 0x944C00, 48 .app_start_override_addr = 0x944C00,
@@ -68,7 +68,7 @@ static const struct ath6kl_hw hw_list[] = {
68 .refclk_hz = 26000000, 68 .refclk_hz = 26000000,
69 .uarttx_pin = 8, 69 .uarttx_pin = 8,
70 .testscript_addr = 0x57ef74, 70 .testscript_addr = 0x57ef74,
71 .flags = 0, 71 .flags = ATH6KL_HW_SDIO_CRC_ERROR_WAR,
72 72
73 .fw = { 73 .fw = {
74 .dir = AR6003_HW_2_1_1_FW_DIR, 74 .dir = AR6003_HW_2_1_1_FW_DIR,
@@ -93,7 +93,8 @@ static const struct ath6kl_hw hw_list[] = {
93 .board_addr = 0x433900, 93 .board_addr = 0x433900,
94 .refclk_hz = 26000000, 94 .refclk_hz = 26000000,
95 .uarttx_pin = 11, 95 .uarttx_pin = 11,
96 .flags = ATH6KL_HW_FLAG_64BIT_RATES, 96 .flags = ATH6KL_HW_64BIT_RATES |
97 ATH6KL_HW_AP_INACTIVITY_MINS,
97 98
98 .fw = { 99 .fw = {
99 .dir = AR6004_HW_1_0_FW_DIR, 100 .dir = AR6004_HW_1_0_FW_DIR,
@@ -113,8 +114,8 @@ static const struct ath6kl_hw hw_list[] = {
113 .board_addr = 0x43d400, 114 .board_addr = 0x43d400,
114 .refclk_hz = 40000000, 115 .refclk_hz = 40000000,
115 .uarttx_pin = 11, 116 .uarttx_pin = 11,
116 .flags = ATH6KL_HW_FLAG_64BIT_RATES, 117 .flags = ATH6KL_HW_64BIT_RATES |
117 118 ATH6KL_HW_AP_INACTIVITY_MINS,
118 .fw = { 119 .fw = {
119 .dir = AR6004_HW_1_1_FW_DIR, 120 .dir = AR6004_HW_1_1_FW_DIR,
120 .fw = AR6004_HW_1_1_FIRMWARE_FILE, 121 .fw = AR6004_HW_1_1_FIRMWARE_FILE,
@@ -133,7 +134,8 @@ static const struct ath6kl_hw hw_list[] = {
133 .board_addr = 0x435c00, 134 .board_addr = 0x435c00,
134 .refclk_hz = 40000000, 135 .refclk_hz = 40000000,
135 .uarttx_pin = 11, 136 .uarttx_pin = 11,
136 .flags = ATH6KL_HW_FLAG_64BIT_RATES, 137 .flags = ATH6KL_HW_64BIT_RATES |
138 ATH6KL_HW_AP_INACTIVITY_MINS,
137 139
138 .fw = { 140 .fw = {
139 .dir = AR6004_HW_1_2_FW_DIR, 141 .dir = AR6004_HW_1_2_FW_DIR,
@@ -142,6 +144,28 @@ static const struct ath6kl_hw hw_list[] = {
142 .fw_board = AR6004_HW_1_2_BOARD_DATA_FILE, 144 .fw_board = AR6004_HW_1_2_BOARD_DATA_FILE,
143 .fw_default_board = AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE, 145 .fw_default_board = AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE,
144 }, 146 },
147 {
148 .id = AR6004_HW_1_3_VERSION,
149 .name = "ar6004 hw 1.3",
150 .dataset_patch_addr = 0x437860,
151 .app_load_addr = 0x1234,
152 .board_ext_data_addr = 0x437000,
153 .reserved_ram_size = 7168,
154 .board_addr = 0x436400,
155 .refclk_hz = 40000000,
156 .uarttx_pin = 11,
157 .flags = ATH6KL_HW_64BIT_RATES |
158 ATH6KL_HW_AP_INACTIVITY_MINS |
159 ATH6KL_HW_MAP_LP_ENDPOINT,
160
161 .fw = {
162 .dir = AR6004_HW_1_3_FW_DIR,
163 .fw = AR6004_HW_1_3_FIRMWARE_FILE,
164 },
165
166 .fw_board = AR6004_HW_1_3_BOARD_DATA_FILE,
167 .fw_default_board = AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE,
168 },
145}; 169};
146 170
147/* 171/*
@@ -337,7 +361,7 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
337 if (ath6kl_connectservice(ar, &connect, "WMI DATA BK")) 361 if (ath6kl_connectservice(ar, &connect, "WMI DATA BK"))
338 return -EIO; 362 return -EIO;
339 363
340 /* connect to Video service, map this to to HI PRI */ 364 /* connect to Video service, map this to HI PRI */
341 connect.svc_id = WMI_DATA_VI_SVC; 365 connect.svc_id = WMI_DATA_VI_SVC;
342 if (ath6kl_connectservice(ar, &connect, "WMI DATA VI")) 366 if (ath6kl_connectservice(ar, &connect, "WMI DATA VI"))
343 return -EIO; 367 return -EIO;
@@ -1088,6 +1112,12 @@ int ath6kl_init_fetch_firmwares(struct ath6kl *ar)
1088 if (ret) 1112 if (ret)
1089 return ret; 1113 return ret;
1090 1114
1115 ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API4_FILE);
1116 if (ret == 0) {
1117 ar->fw_api = 4;
1118 goto out;
1119 }
1120
1091 ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE); 1121 ret = ath6kl_fetch_fw_apin(ar, ATH6KL_FW_API3_FILE);
1092 if (ret == 0) { 1122 if (ret == 0) {
1093 ar->fw_api = 3; 1123 ar->fw_api = 3;
@@ -1401,8 +1431,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
1401 return status; 1431 return status;
1402 1432
1403 /* WAR to avoid SDIO CRC err */ 1433 /* WAR to avoid SDIO CRC err */
1404 if (ar->version.target_ver == AR6003_HW_2_0_VERSION || 1434 if (ar->hw.flags & ATH6KL_HW_SDIO_CRC_ERROR_WAR) {
1405 ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
1406 ath6kl_err("temporary war to avoid sdio crc error\n"); 1435 ath6kl_err("temporary war to avoid sdio crc error\n");
1407 1436
1408 param = 0x28; 1437 param = 0x28;
@@ -1520,7 +1549,7 @@ static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
1520 return NULL; 1549 return NULL;
1521} 1550}
1522 1551
1523int ath6kl_init_hw_start(struct ath6kl *ar) 1552static int __ath6kl_init_hw_start(struct ath6kl *ar)
1524{ 1553{
1525 long timeleft; 1554 long timeleft;
1526 int ret, i; 1555 int ret, i;
@@ -1616,8 +1645,6 @@ int ath6kl_init_hw_start(struct ath6kl *ar)
1616 goto err_htc_stop; 1645 goto err_htc_stop;
1617 } 1646 }
1618 1647
1619 ar->state = ATH6KL_STATE_ON;
1620
1621 return 0; 1648 return 0;
1622 1649
1623err_htc_stop: 1650err_htc_stop:
@@ -1630,7 +1657,18 @@ err_power_off:
1630 return ret; 1657 return ret;
1631} 1658}
1632 1659
1633int ath6kl_init_hw_stop(struct ath6kl *ar) 1660int ath6kl_init_hw_start(struct ath6kl *ar)
1661{
1662 int err;
1663
1664 err = __ath6kl_init_hw_start(ar);
1665 if (err)
1666 return err;
1667 ar->state = ATH6KL_STATE_ON;
1668 return 0;
1669}
1670
1671static int __ath6kl_init_hw_stop(struct ath6kl *ar)
1634{ 1672{
1635 int ret; 1673 int ret;
1636 1674
@@ -1646,11 +1684,37 @@ int ath6kl_init_hw_stop(struct ath6kl *ar)
1646 if (ret) 1684 if (ret)
1647 ath6kl_warn("failed to power off hif: %d\n", ret); 1685 ath6kl_warn("failed to power off hif: %d\n", ret);
1648 1686
1649 ar->state = ATH6KL_STATE_OFF; 1687 return 0;
1688}
1650 1689
1690int ath6kl_init_hw_stop(struct ath6kl *ar)
1691{
1692 int err;
1693
1694 err = __ath6kl_init_hw_stop(ar);
1695 if (err)
1696 return err;
1697 ar->state = ATH6KL_STATE_OFF;
1651 return 0; 1698 return 0;
1652} 1699}
1653 1700
1701void ath6kl_init_hw_restart(struct ath6kl *ar)
1702{
1703 clear_bit(WMI_READY, &ar->flag);
1704
1705 ath6kl_cfg80211_stop_all(ar);
1706
1707 if (__ath6kl_init_hw_stop(ar)) {
1708 ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to stop during fw error recovery\n");
1709 return;
1710 }
1711
1712 if (__ath6kl_init_hw_start(ar)) {
1713 ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Failed to restart during fw error recovery\n");
1714 return;
1715 }
1716}
1717
1654/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */ 1718/* FIXME: move this to cfg80211.c and rename to ath6kl_cfg80211_vif_stop() */
1655void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready) 1719void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
1656{ 1720{
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index c189e28e86a9..bd50b6b7b492 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -293,13 +293,17 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
293 } 293 }
294 294
295 address = TARG_VTOP(ar->target_type, debug_hdr_addr); 295 address = TARG_VTOP(ar->target_type, debug_hdr_addr);
296 ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr)); 296 ret = ath6kl_diag_read(ar, address, &debug_hdr, sizeof(debug_hdr));
297 if (ret)
298 goto out;
297 299
298 address = TARG_VTOP(ar->target_type, 300 address = TARG_VTOP(ar->target_type,
299 le32_to_cpu(debug_hdr.dbuf_addr)); 301 le32_to_cpu(debug_hdr.dbuf_addr));
300 firstbuf = address; 302 firstbuf = address;
301 dropped = le32_to_cpu(debug_hdr.dropped); 303 dropped = le32_to_cpu(debug_hdr.dropped);
302 ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); 304 ret = ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf));
305 if (ret)
306 goto out;
303 307
304 loop = 100; 308 loop = 100;
305 309
@@ -322,7 +326,8 @@ int ath6kl_read_fwlogs(struct ath6kl *ar)
322 326
323 address = TARG_VTOP(ar->target_type, 327 address = TARG_VTOP(ar->target_type,
324 le32_to_cpu(debug_buf.next)); 328 le32_to_cpu(debug_buf.next));
325 ath6kl_diag_read(ar, address, &debug_buf, sizeof(debug_buf)); 329 ret = ath6kl_diag_read(ar, address, &debug_buf,
330 sizeof(debug_buf));
326 if (ret) 331 if (ret)
327 goto out; 332 goto out;
328 333
@@ -436,12 +441,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
436 break; 441 break;
437 } 442 }
438 443
439 if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) { 444 if (ar->last_ch != channel)
440 ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
441 /* we actually don't know the phymode, default to HT20 */ 445 /* we actually don't know the phymode, default to HT20 */
442 ath6kl_cfg80211_ch_switch_notify(vif, channel, 446 ath6kl_cfg80211_ch_switch_notify(vif, channel, WMI_11G_HT20);
443 WMI_11G_HT20);
444 }
445 447
446 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0); 448 ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
447 set_bit(CONNECTED, &vif->flags); 449 set_bit(CONNECTED, &vif->flags);
@@ -606,6 +608,18 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
606 608
607 switch (vif->nw_type) { 609 switch (vif->nw_type) {
608 case AP_NETWORK: 610 case AP_NETWORK:
611 /*
612 * reconfigure any saved RSN IE capabilites in the beacon /
613 * probe response to stay in sync with the supplicant.
614 */
615 if (vif->rsn_capab &&
616 test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
617 ar->fw_capabilities))
618 ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
619 WLAN_EID_RSN, WMI_RSN_IE_CAPB,
620 (const u8 *) &vif->rsn_capab,
621 sizeof(vif->rsn_capab));
622
609 return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, 623 return ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx,
610 &vif->profile); 624 &vif->profile);
611 default: 625 default:
@@ -628,6 +642,9 @@ static void ath6kl_check_ch_switch(struct ath6kl *ar, u16 channel)
628 if (ar->want_ch_switch & (1 << vif->fw_vif_idx)) 642 if (ar->want_ch_switch & (1 << vif->fw_vif_idx))
629 res = ath6kl_commit_ch_switch(vif, channel); 643 res = ath6kl_commit_ch_switch(vif, channel);
630 644
645 /* if channel switch failed, oh well we tried */
646 ar->want_ch_switch &= ~(1 << vif->fw_vif_idx);
647
631 if (res) 648 if (res)
632 ath6kl_err("channel switch failed nw_type %d res %d\n", 649 ath6kl_err("channel switch failed nw_type %d res %d\n",
633 vif->nw_type, res); 650 vif->nw_type, res);
@@ -981,8 +998,25 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
981 if (vif->nw_type == AP_NETWORK) { 998 if (vif->nw_type == AP_NETWORK) {
982 /* disconnect due to other STA vif switching channels */ 999 /* disconnect due to other STA vif switching channels */
983 if (reason == BSS_DISCONNECTED && 1000 if (reason == BSS_DISCONNECTED &&
984 prot_reason_status == WMI_AP_REASON_STA_ROAM) 1001 prot_reason_status == WMI_AP_REASON_STA_ROAM) {
985 ar->want_ch_switch |= 1 << vif->fw_vif_idx; 1002 ar->want_ch_switch |= 1 << vif->fw_vif_idx;
1003 /* bail back to this channel if STA vif fails connect */
1004 ar->last_ch = le16_to_cpu(vif->profile.ch);
1005 }
1006
1007 if (prot_reason_status == WMI_AP_REASON_MAX_STA) {
1008 /* send max client reached notification to user space */
1009 cfg80211_conn_failed(vif->ndev, bssid,
1010 NL80211_CONN_FAIL_MAX_CLIENTS,
1011 GFP_KERNEL);
1012 }
1013
1014 if (prot_reason_status == WMI_AP_REASON_ACL) {
1015 /* send blocked client notification to user space */
1016 cfg80211_conn_failed(vif->ndev, bssid,
1017 NL80211_CONN_FAIL_BLOCKED_CLIENT,
1018 GFP_KERNEL);
1019 }
986 1020
987 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status)) 1021 if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
988 return; 1022 return;
@@ -1041,6 +1075,9 @@ void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
1041 } 1075 }
1042 } 1076 }
1043 1077
1078 /* restart disconnected concurrent vifs waiting for new channel */
1079 ath6kl_check_ch_switch(ar, ar->last_ch);
1080
1044 /* update connect & link status atomically */ 1081 /* update connect & link status atomically */
1045 spin_lock_bh(&vif->if_lock); 1082 spin_lock_bh(&vif->if_lock);
1046 clear_bit(CONNECTED, &vif->flags); 1083 clear_bit(CONNECTED, &vif->flags);
diff --git a/drivers/net/wireless/ath/ath6kl/recovery.c b/drivers/net/wireless/ath/ath6kl/recovery.c
new file mode 100644
index 000000000000..3a8d5e97dc8e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/recovery.c
@@ -0,0 +1,160 @@
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "cfg80211.h"
19#include "debug.h"
20
21static void ath6kl_recovery_work(struct work_struct *work)
22{
23 struct ath6kl *ar = container_of(work, struct ath6kl,
24 fw_recovery.recovery_work);
25
26 ar->state = ATH6KL_STATE_RECOVERY;
27
28 del_timer_sync(&ar->fw_recovery.hb_timer);
29
30 ath6kl_init_hw_restart(ar);
31
32 ar->state = ATH6KL_STATE_ON;
33 clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
34
35 ar->fw_recovery.err_reason = 0;
36
37 if (ar->fw_recovery.hb_poll)
38 mod_timer(&ar->fw_recovery.hb_timer, jiffies +
39 msecs_to_jiffies(ar->fw_recovery.hb_poll));
40}
41
42void ath6kl_recovery_err_notify(struct ath6kl *ar, enum ath6kl_fw_err reason)
43{
44 if (!ar->fw_recovery.enable)
45 return;
46
47 ath6kl_dbg(ATH6KL_DBG_RECOVERY, "Fw error detected, reason:%d\n",
48 reason);
49
50 set_bit(reason, &ar->fw_recovery.err_reason);
51
52 if (!test_bit(RECOVERY_CLEANUP, &ar->flag) &&
53 ar->state != ATH6KL_STATE_RECOVERY)
54 queue_work(ar->ath6kl_wq, &ar->fw_recovery.recovery_work);
55}
56
57void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie)
58{
59 if (cookie == ar->fw_recovery.seq_num)
60 ar->fw_recovery.hb_pending = false;
61}
62
63static void ath6kl_recovery_hb_timer(unsigned long data)
64{
65 struct ath6kl *ar = (struct ath6kl *) data;
66 int err;
67
68 if (test_bit(RECOVERY_CLEANUP, &ar->flag) ||
69 (ar->state == ATH6KL_STATE_RECOVERY))
70 return;
71
72 if (ar->fw_recovery.hb_pending)
73 ar->fw_recovery.hb_misscnt++;
74 else
75 ar->fw_recovery.hb_misscnt = 0;
76
77 if (ar->fw_recovery.hb_misscnt > ATH6KL_HB_RESP_MISS_THRES) {
78 ar->fw_recovery.hb_misscnt = 0;
79 ar->fw_recovery.seq_num = 0;
80 ar->fw_recovery.hb_pending = false;
81 ath6kl_recovery_err_notify(ar, ATH6KL_FW_HB_RESP_FAILURE);
82 return;
83 }
84
85 ar->fw_recovery.seq_num++;
86 ar->fw_recovery.hb_pending = true;
87
88 err = ath6kl_wmi_get_challenge_resp_cmd(ar->wmi,
89 ar->fw_recovery.seq_num, 0);
90 if (err)
91 ath6kl_warn("Failed to send hb challenge request, err:%d\n",
92 err);
93
94 mod_timer(&ar->fw_recovery.hb_timer, jiffies +
95 msecs_to_jiffies(ar->fw_recovery.hb_poll));
96}
97
98void ath6kl_recovery_init(struct ath6kl *ar)
99{
100 struct ath6kl_fw_recovery *recovery = &ar->fw_recovery;
101
102 clear_bit(RECOVERY_CLEANUP, &ar->flag);
103 INIT_WORK(&recovery->recovery_work, ath6kl_recovery_work);
104 recovery->seq_num = 0;
105 recovery->hb_misscnt = 0;
106 ar->fw_recovery.hb_pending = false;
107 ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer;
108 ar->fw_recovery.hb_timer.data = (unsigned long) ar;
109 init_timer_deferrable(&ar->fw_recovery.hb_timer);
110
111 if (ar->fw_recovery.hb_poll)
112 mod_timer(&ar->fw_recovery.hb_timer, jiffies +
113 msecs_to_jiffies(ar->fw_recovery.hb_poll));
114}
115
116void ath6kl_recovery_cleanup(struct ath6kl *ar)
117{
118 if (!ar->fw_recovery.enable)
119 return;
120
121 set_bit(RECOVERY_CLEANUP, &ar->flag);
122
123 del_timer_sync(&ar->fw_recovery.hb_timer);
124 cancel_work_sync(&ar->fw_recovery.recovery_work);
125}
126
127void ath6kl_recovery_suspend(struct ath6kl *ar)
128{
129 if (!ar->fw_recovery.enable)
130 return;
131
132 ath6kl_recovery_cleanup(ar);
133
134 if (!ar->fw_recovery.err_reason)
135 return;
136
137 /* Process pending fw error detection */
138 ar->fw_recovery.err_reason = 0;
139 WARN_ON(ar->state != ATH6KL_STATE_ON);
140 ar->state = ATH6KL_STATE_RECOVERY;
141 ath6kl_init_hw_restart(ar);
142 ar->state = ATH6KL_STATE_ON;
143}
144
145void ath6kl_recovery_resume(struct ath6kl *ar)
146{
147 if (!ar->fw_recovery.enable)
148 return;
149
150 clear_bit(RECOVERY_CLEANUP, &ar->flag);
151
152 if (!ar->fw_recovery.hb_poll)
153 return;
154
155 ar->fw_recovery.hb_pending = false;
156 ar->fw_recovery.seq_num = 0;
157 ar->fw_recovery.hb_misscnt = 0;
158 mod_timer(&ar->fw_recovery.hb_timer,
159 jiffies + msecs_to_jiffies(ar->fw_recovery.hb_poll));
160}
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 05b95405f7b5..d111980d44c0 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -709,7 +709,7 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
709{ 709{
710 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar); 710 struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
711 struct htc_target *target = ar->htc_target; 711 struct htc_target *target = ar->htc_target;
712 int ret; 712 int ret = 0;
713 bool virt_scat = false; 713 bool virt_scat = false;
714 714
715 if (ar_sdio->scatter_enabled) 715 if (ar_sdio->scatter_enabled)
@@ -844,22 +844,6 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
844 bool try_deepsleep = false; 844 bool try_deepsleep = false;
845 int ret; 845 int ret;
846 846
847 if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
848 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sched scan is in progress\n");
849
850 ret = ath6kl_set_sdio_pm_caps(ar);
851 if (ret)
852 goto cut_pwr;
853
854 ret = ath6kl_cfg80211_suspend(ar,
855 ATH6KL_CFG_SUSPEND_SCHED_SCAN,
856 NULL);
857 if (ret)
858 goto cut_pwr;
859
860 return 0;
861 }
862
863 if (ar->suspend_mode == WLAN_POWER_STATE_WOW || 847 if (ar->suspend_mode == WLAN_POWER_STATE_WOW ||
864 (!ar->suspend_mode && wow)) { 848 (!ar->suspend_mode && wow)) {
865 849
@@ -942,14 +926,14 @@ static int ath6kl_sdio_resume(struct ath6kl *ar)
942 case ATH6KL_STATE_WOW: 926 case ATH6KL_STATE_WOW:
943 break; 927 break;
944 928
945 case ATH6KL_STATE_SCHED_SCAN:
946 break;
947
948 case ATH6KL_STATE_SUSPENDING: 929 case ATH6KL_STATE_SUSPENDING:
949 break; 930 break;
950 931
951 case ATH6KL_STATE_RESUMING: 932 case ATH6KL_STATE_RESUMING:
952 break; 933 break;
934
935 case ATH6KL_STATE_RECOVERY:
936 break;
953 } 937 }
954 938
955 ath6kl_cfg80211_resume(ar); 939 ath6kl_cfg80211_resume(ar);
@@ -1462,3 +1446,6 @@ MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
1462MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE); 1446MODULE_FIRMWARE(AR6004_HW_1_2_FW_DIR "/" AR6004_HW_1_2_FIRMWARE_FILE);
1463MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE); 1447MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
1464MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE); 1448MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
1449MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
1450MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
1451MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 7dfa0fd86d7b..78b369286579 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -288,8 +288,16 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
288 int status = 0; 288 int status = 0;
289 struct ath6kl_cookie *cookie = NULL; 289 struct ath6kl_cookie *cookie = NULL;
290 290
291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) 291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
292 dev_kfree_skb(skb);
292 return -EACCES; 293 return -EACCES;
294 }
295
296 if (WARN_ON_ONCE(eid == ENDPOINT_UNUSED ||
297 eid >= ENDPOINT_MAX)) {
298 status = -EINVAL;
299 goto fail_ctrl_tx;
300 }
293 301
294 spin_lock_bh(&ar->lock); 302 spin_lock_bh(&ar->lock);
295 303
@@ -591,6 +599,7 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
591 */ 599 */
592 set_bit(WMI_CTRL_EP_FULL, &ar->flag); 600 set_bit(WMI_CTRL_EP_FULL, &ar->flag);
593 ath6kl_err("wmi ctrl ep is full\n"); 601 ath6kl_err("wmi ctrl ep is full\n");
602 ath6kl_recovery_err_notify(ar, ATH6KL_FW_EP_FULL);
594 return action; 603 return action;
595 } 604 }
596 605
@@ -695,22 +704,31 @@ void ath6kl_tx_complete(struct htc_target *target,
695 list); 704 list);
696 list_del(&packet->list); 705 list_del(&packet->list);
697 706
707 if (WARN_ON_ONCE(packet->endpoint == ENDPOINT_UNUSED ||
708 packet->endpoint >= ENDPOINT_MAX))
709 continue;
710
698 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt; 711 ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
699 if (!ath6kl_cookie) 712 if (WARN_ON_ONCE(!ath6kl_cookie))
700 goto fatal; 713 continue;
701 714
702 status = packet->status; 715 status = packet->status;
703 skb = ath6kl_cookie->skb; 716 skb = ath6kl_cookie->skb;
704 eid = packet->endpoint; 717 eid = packet->endpoint;
705 map_no = ath6kl_cookie->map_no; 718 map_no = ath6kl_cookie->map_no;
706 719
707 if (!skb || !skb->data) 720 if (WARN_ON_ONCE(!skb || !skb->data)) {
708 goto fatal; 721 dev_kfree_skb(skb);
722 ath6kl_free_cookie(ar, ath6kl_cookie);
723 continue;
724 }
709 725
710 __skb_queue_tail(&skb_queue, skb); 726 __skb_queue_tail(&skb_queue, skb);
711 727
712 if (!status && (packet->act_len != skb->len)) 728 if (WARN_ON_ONCE(!status && (packet->act_len != skb->len))) {
713 goto fatal; 729 ath6kl_free_cookie(ar, ath6kl_cookie);
730 continue;
731 }
714 732
715 ar->tx_pending[eid]--; 733 ar->tx_pending[eid]--;
716 734
@@ -792,11 +810,6 @@ void ath6kl_tx_complete(struct htc_target *target,
792 wake_up(&ar->event_wq); 810 wake_up(&ar->event_wq);
793 811
794 return; 812 return;
795
796fatal:
797 WARN_ON(1);
798 spin_unlock_bh(&ar->lock);
799 return;
800} 813}
801 814
802void ath6kl_tx_data_cleanup(struct ath6kl *ar) 815void ath6kl_tx_data_cleanup(struct ath6kl *ar)
@@ -885,8 +898,11 @@ void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
885 break; 898 break;
886 899
887 packet = (struct htc_packet *) skb->head; 900 packet = (struct htc_packet *) skb->head;
888 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 901 if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
902 size_t len = skb_headlen(skb);
889 skb->data = PTR_ALIGN(skb->data - 4, 4); 903 skb->data = PTR_ALIGN(skb->data - 4, 4);
904 skb_set_tail_pointer(skb, len);
905 }
890 set_htc_rxpkt_info(packet, skb, skb->data, 906 set_htc_rxpkt_info(packet, skb, skb->data,
891 ATH6KL_BUFFER_SIZE, endpoint); 907 ATH6KL_BUFFER_SIZE, endpoint);
892 packet->skb = skb; 908 packet->skb = skb;
@@ -908,8 +924,11 @@ void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
908 return; 924 return;
909 925
910 packet = (struct htc_packet *) skb->head; 926 packet = (struct htc_packet *) skb->head;
911 if (!IS_ALIGNED((unsigned long) skb->data, 4)) 927 if (!IS_ALIGNED((unsigned long) skb->data, 4)) {
928 size_t len = skb_headlen(skb);
912 skb->data = PTR_ALIGN(skb->data - 4, 4); 929 skb->data = PTR_ALIGN(skb->data - 4, 4);
930 skb_set_tail_pointer(skb, len);
931 }
913 set_htc_rxpkt_info(packet, skb, skb->data, 932 set_htc_rxpkt_info(packet, skb, skb->data,
914 ATH6KL_AMSDU_BUFFER_SIZE, 0); 933 ATH6KL_AMSDU_BUFFER_SIZE, 0);
915 packet->skb = skb; 934 packet->skb = skb;
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 3740c3d6ab88..62bcc0d5bc23 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -185,9 +185,10 @@ static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
185 for (i = 0; i < urb_cnt; i++) { 185 for (i = 0; i < urb_cnt; i++) {
186 urb_context = kzalloc(sizeof(struct ath6kl_urb_context), 186 urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
187 GFP_KERNEL); 187 GFP_KERNEL);
188 if (urb_context == NULL) 188 if (urb_context == NULL) {
189 /* FIXME: set status to -ENOMEM */ 189 status = -ENOMEM;
190 break; 190 goto fail_alloc_pipe_resources;
191 }
191 192
192 urb_context->pipe = pipe; 193 urb_context->pipe = pipe;
193 194
@@ -204,6 +205,7 @@ static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
204 pipe->logical_pipe_num, pipe->usb_pipe_handle, 205 pipe->logical_pipe_num, pipe->usb_pipe_handle,
205 pipe->urb_alloc); 206 pipe->urb_alloc);
206 207
208fail_alloc_pipe_resources:
207 return status; 209 return status;
208} 210}
209 211
@@ -803,7 +805,11 @@ static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
803 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; 805 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
804 break; 806 break;
805 case WMI_DATA_VI_SVC: 807 case WMI_DATA_VI_SVC:
806 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP; 808
809 if (ar->hw.flags & ATH6KL_HW_MAP_LP_ENDPOINT)
810 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
811 else
812 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
807 /* 813 /*
808 * Disable rxdata2 directly, it will be enabled 814 * Disable rxdata2 directly, it will be enabled
809 * if FW enable rxdata2 815 * if FW enable rxdata2
@@ -811,7 +817,11 @@ static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
811 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA; 817 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
812 break; 818 break;
813 case WMI_DATA_VO_SVC: 819 case WMI_DATA_VO_SVC:
814 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_HP; 820
821 if (ar->hw.flags & ATH6KL_HW_MAP_LP_ENDPOINT)
822 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
823 else
824 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
815 /* 825 /*
816 * Disable rxdata2 directly, it will be enabled 826 * Disable rxdata2 directly, it will be enabled
817 * if FW enable rxdata2 827 * if FW enable rxdata2
@@ -1196,7 +1206,14 @@ static struct usb_driver ath6kl_usb_driver = {
1196 1206
1197static int ath6kl_usb_init(void) 1207static int ath6kl_usb_init(void)
1198{ 1208{
1199 usb_register(&ath6kl_usb_driver); 1209 int ret;
1210
1211 ret = usb_register(&ath6kl_usb_driver);
1212 if (ret) {
1213 ath6kl_err("usb registration failed: %d\n", ret);
1214 return ret;
1215 }
1216
1200 return 0; 1217 return 0;
1201} 1218}
1202 1219
@@ -1220,3 +1237,6 @@ MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
1220MODULE_FIRMWARE(AR6004_HW_1_2_FIRMWARE_FILE); 1237MODULE_FIRMWARE(AR6004_HW_1_2_FIRMWARE_FILE);
1221MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE); 1238MODULE_FIRMWARE(AR6004_HW_1_2_BOARD_DATA_FILE);
1222MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE); 1239MODULE_FIRMWARE(AR6004_HW_1_2_DEFAULT_BOARD_DATA_FILE);
1240MODULE_FIRMWARE(AR6004_HW_1_3_FW_DIR "/" AR6004_HW_1_3_FIRMWARE_FILE);
1241MODULE_FIRMWARE(AR6004_HW_1_3_BOARD_DATA_FILE);
1242MODULE_FIRMWARE(AR6004_HW_1_3_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index c30ab4b11d61..998f8b0f62fd 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -474,7 +474,7 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
474 return -EINVAL; 474 return -EINVAL;
475 } 475 }
476 id = vif->last_roc_id; 476 id = vif->last_roc_id;
477 cfg80211_ready_on_channel(&vif->wdev, id, chan, NL80211_CHAN_NO_HT, 477 cfg80211_ready_on_channel(&vif->wdev, id, chan,
478 dur, GFP_ATOMIC); 478 dur, GFP_ATOMIC);
479 479
480 return 0; 480 return 0;
@@ -513,8 +513,7 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
513 else 513 else
514 id = vif->last_roc_id; /* timeout on uncanceled r-o-c */ 514 id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
515 vif->last_cancel_roc_id = 0; 515 vif->last_cancel_roc_id = 0;
516 cfg80211_remain_on_channel_expired(&vif->wdev, id, chan, 516 cfg80211_remain_on_channel_expired(&vif->wdev, id, chan, GFP_ATOMIC);
517 NL80211_CHAN_NO_HT, GFP_ATOMIC);
518 517
519 return 0; 518 return 0;
520} 519}
@@ -936,8 +935,12 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
936 935
937 regpair = ath6kl_get_regpair((u16) reg_code); 936 regpair = ath6kl_get_regpair((u16) reg_code);
938 country = ath6kl_regd_find_country_by_rd((u16) reg_code); 937 country = ath6kl_regd_find_country_by_rd((u16) reg_code);
939 ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n", 938 if (regpair)
940 regpair->regDmnEnum); 939 ath6kl_dbg(ATH6KL_DBG_WMI, "Regpair used: 0x%0x\n",
940 regpair->regDmnEnum);
941 else
942 ath6kl_warn("Regpair not found reg_code 0x%0x\n",
943 reg_code);
941 } 944 }
942 945
943 if (country && wmi->parent_dev->wiphy_registered) { 946 if (country && wmi->parent_dev->wiphy_registered) {
@@ -1116,7 +1119,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
1116 * the timer would not ever fire if the scan interval is short 1119 * the timer would not ever fire if the scan interval is short
1117 * enough. 1120 * enough.
1118 */ 1121 */
1119 if (ar->state == ATH6KL_STATE_SCHED_SCAN && 1122 if (test_bit(SCHED_SCANNING, &vif->flags) &&
1120 !timer_pending(&vif->sched_scan_timer)) { 1123 !timer_pending(&vif->sched_scan_timer)) {
1121 mod_timer(&vif->sched_scan_timer, jiffies + 1124 mod_timer(&vif->sched_scan_timer, jiffies +
1122 msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY)); 1125 msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
@@ -1170,6 +1173,9 @@ static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
1170 rate = RATE_AUTO; 1173 rate = RATE_AUTO;
1171 } else { 1174 } else {
1172 index = reply->rate_index & 0x7f; 1175 index = reply->rate_index & 0x7f;
1176 if (WARN_ON_ONCE(index > (RATE_MCS_7_40 + 1)))
1177 return -EINVAL;
1178
1173 sgi = (reply->rate_index & 0x80) ? 1 : 0; 1179 sgi = (reply->rate_index & 0x80) ? 1 : 0;
1174 rate = wmi_rate_tbl[index][sgi]; 1180 rate = wmi_rate_tbl[index][sgi];
1175 } 1181 }
@@ -1531,6 +1537,68 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
1531 return 0; 1537 return 0;
1532} 1538}
1533 1539
1540static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
1541 struct ath6kl_vif *vif)
1542{
1543 struct wmi_txe_notify_event *ev;
1544 u32 rate, pkts;
1545
1546 if (len < sizeof(*ev))
1547 return -EINVAL;
1548
1549 if (vif->sme_state != SME_CONNECTED)
1550 return -ENOTCONN;
1551
1552 ev = (struct wmi_txe_notify_event *) datap;
1553 rate = le32_to_cpu(ev->rate);
1554 pkts = le32_to_cpu(ev->pkts);
1555
1556 ath6kl_dbg(ATH6KL_DBG_WMI, "TXE notify event: peer %pM rate %d% pkts %d intvl %ds\n",
1557 vif->bssid, rate, pkts, vif->txe_intvl);
1558
1559 cfg80211_cqm_txe_notify(vif->ndev, vif->bssid, pkts,
1560 rate, vif->txe_intvl, GFP_KERNEL);
1561
1562 return 0;
1563}
1564
1565int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
1566 u32 rate, u32 pkts, u32 intvl)
1567{
1568 struct sk_buff *skb;
1569 struct wmi_txe_notify_cmd *cmd;
1570
1571 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1572 if (!skb)
1573 return -ENOMEM;
1574
1575 cmd = (struct wmi_txe_notify_cmd *) skb->data;
1576 cmd->rate = cpu_to_le32(rate);
1577 cmd->pkts = cpu_to_le32(pkts);
1578 cmd->intvl = cpu_to_le32(intvl);
1579
1580 return ath6kl_wmi_cmd_send(wmi, idx, skb, WMI_SET_TXE_NOTIFY_CMDID,
1581 NO_SYNC_WMIFLAG);
1582}
1583
1584int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi)
1585{
1586 struct sk_buff *skb;
1587 struct wmi_set_rssi_filter_cmd *cmd;
1588 int ret;
1589
1590 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
1591 if (!skb)
1592 return -ENOMEM;
1593
1594 cmd = (struct wmi_set_rssi_filter_cmd *) skb->data;
1595 cmd->rssi = rssi;
1596
1597 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_RSSI_FILTER_CMDID,
1598 NO_SYNC_WMIFLAG);
1599 return ret;
1600}
1601
1534static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi, 1602static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
1535 struct wmi_snr_threshold_params_cmd *snr_cmd) 1603 struct wmi_snr_threshold_params_cmd *snr_cmd)
1536{ 1604{
@@ -1677,8 +1745,11 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
1677 int ret; 1745 int ret;
1678 u16 info1; 1746 u16 info1;
1679 1747
1680 if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1)))) 1748 if (WARN_ON(skb == NULL ||
1749 (if_idx > (wmi->parent_dev->vif_max - 1)))) {
1750 dev_kfree_skb(skb);
1681 return -EINVAL; 1751 return -EINVAL;
1752 }
1682 1753
1683 ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n", 1754 ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
1684 cmd_id, skb->len, sync_flag); 1755 cmd_id, skb->len, sync_flag);
@@ -1833,6 +1904,59 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
1833 return ret; 1904 return ret;
1834} 1905}
1835 1906
1907/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
1908 * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
1909 * mgmt operations using station interface.
1910 */
1911static int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
1912 enum wmi_scan_type scan_type,
1913 u32 force_fgscan, u32 is_legacy,
1914 u32 home_dwell_time,
1915 u32 force_scan_interval,
1916 s8 num_chan, u16 *ch_list)
1917{
1918 struct sk_buff *skb;
1919 struct wmi_start_scan_cmd *sc;
1920 s8 size;
1921 int i, ret;
1922
1923 size = sizeof(struct wmi_start_scan_cmd);
1924
1925 if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
1926 return -EINVAL;
1927
1928 if (num_chan > WMI_MAX_CHANNELS)
1929 return -EINVAL;
1930
1931 if (num_chan)
1932 size += sizeof(u16) * (num_chan - 1);
1933
1934 skb = ath6kl_wmi_get_new_buf(size);
1935 if (!skb)
1936 return -ENOMEM;
1937
1938 sc = (struct wmi_start_scan_cmd *) skb->data;
1939 sc->scan_type = scan_type;
1940 sc->force_fg_scan = cpu_to_le32(force_fgscan);
1941 sc->is_legacy = cpu_to_le32(is_legacy);
1942 sc->home_dwell_time = cpu_to_le32(home_dwell_time);
1943 sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
1944 sc->num_ch = num_chan;
1945
1946 for (i = 0; i < num_chan; i++)
1947 sc->ch_list[i] = cpu_to_le16(ch_list[i]);
1948
1949 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
1950 NO_SYNC_WMIFLAG);
1951
1952 return ret;
1953}
1954
1955/*
1956 * beginscan supports (compared to old startscan) P2P mgmt operations using
1957 * station interface, send additional information like supported rates to
1958 * advertise and xmit rates for probe requests
1959 */
1836int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, 1960int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
1837 enum wmi_scan_type scan_type, 1961 enum wmi_scan_type scan_type,
1838 u32 force_fgscan, u32 is_legacy, 1962 u32 force_fgscan, u32 is_legacy,
@@ -1848,6 +1972,15 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
1848 int num_rates; 1972 int num_rates;
1849 u32 ratemask; 1973 u32 ratemask;
1850 1974
1975 if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
1976 ar->fw_capabilities)) {
1977 return ath6kl_wmi_startscan_cmd(wmi, if_idx,
1978 scan_type, force_fgscan,
1979 is_legacy, home_dwell_time,
1980 force_scan_interval,
1981 num_chan, ch_list);
1982 }
1983
1851 size = sizeof(struct wmi_begin_scan_cmd); 1984 size = sizeof(struct wmi_begin_scan_cmd);
1852 1985
1853 if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN)) 1986 if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
@@ -1900,50 +2033,24 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
1900 return ret; 2033 return ret;
1901} 2034}
1902 2035
1903/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use 2036int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable)
1904 * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
1905 * mgmt operations using station interface.
1906 */
1907int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
1908 enum wmi_scan_type scan_type,
1909 u32 force_fgscan, u32 is_legacy,
1910 u32 home_dwell_time, u32 force_scan_interval,
1911 s8 num_chan, u16 *ch_list)
1912{ 2037{
1913 struct sk_buff *skb; 2038 struct sk_buff *skb;
1914 struct wmi_start_scan_cmd *sc; 2039 struct wmi_enable_sched_scan_cmd *sc;
1915 s8 size; 2040 int ret;
1916 int i, ret;
1917
1918 size = sizeof(struct wmi_start_scan_cmd);
1919
1920 if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
1921 return -EINVAL;
1922
1923 if (num_chan > WMI_MAX_CHANNELS)
1924 return -EINVAL;
1925
1926 if (num_chan)
1927 size += sizeof(u16) * (num_chan - 1);
1928 2041
1929 skb = ath6kl_wmi_get_new_buf(size); 2042 skb = ath6kl_wmi_get_new_buf(sizeof(*sc));
1930 if (!skb) 2043 if (!skb)
1931 return -ENOMEM; 2044 return -ENOMEM;
1932 2045
1933 sc = (struct wmi_start_scan_cmd *) skb->data; 2046 ath6kl_dbg(ATH6KL_DBG_WMI, "%s scheduled scan on vif %d\n",
1934 sc->scan_type = scan_type; 2047 enable ? "enabling" : "disabling", if_idx);
1935 sc->force_fg_scan = cpu_to_le32(force_fgscan); 2048 sc = (struct wmi_enable_sched_scan_cmd *) skb->data;
1936 sc->is_legacy = cpu_to_le32(is_legacy); 2049 sc->enable = enable ? 1 : 0;
1937 sc->home_dwell_time = cpu_to_le32(home_dwell_time);
1938 sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
1939 sc->num_ch = num_chan;
1940
1941 for (i = 0; i < num_chan; i++)
1942 sc->ch_list[i] = cpu_to_le16(ch_list[i]);
1943 2050
1944 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID, 2051 ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
2052 WMI_ENABLE_SCHED_SCAN_CMDID,
1945 NO_SYNC_WMIFLAG); 2053 NO_SYNC_WMIFLAG);
1946
1947 return ret; 2054 return ret;
1948} 2055}
1949 2056
@@ -2275,8 +2382,10 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
2275 struct wmi_data_hdr *data_hdr; 2382 struct wmi_data_hdr *data_hdr;
2276 int ret; 2383 int ret;
2277 2384
2278 if (WARN_ON(skb == NULL || ep_id == wmi->ep_id)) 2385 if (WARN_ON(skb == NULL || ep_id == wmi->ep_id)) {
2386 dev_kfree_skb(skb);
2279 return -EINVAL; 2387 return -EINVAL;
2388 }
2280 2389
2281 skb_push(skb, sizeof(struct wmi_data_hdr)); 2390 skb_push(skb, sizeof(struct wmi_data_hdr));
2282 2391
@@ -2313,10 +2422,8 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
2313 spin_unlock_bh(&wmi->lock); 2422 spin_unlock_bh(&wmi->lock);
2314 2423
2315 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); 2424 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2316 if (!skb) { 2425 if (!skb)
2317 ret = -ENOMEM; 2426 return -ENOMEM;
2318 goto free_skb;
2319 }
2320 2427
2321 cmd = (struct wmi_sync_cmd *) skb->data; 2428 cmd = (struct wmi_sync_cmd *) skb->data;
2322 2429
@@ -2339,7 +2446,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
2339 * then do not send the Synchronize cmd on the control ep 2446 * then do not send the Synchronize cmd on the control ep
2340 */ 2447 */
2341 if (ret) 2448 if (ret)
2342 goto free_skb; 2449 goto free_cmd_skb;
2343 2450
2344 /* 2451 /*
2345 * Send sync cmd followed by sync data messages on all 2452 * Send sync cmd followed by sync data messages on all
@@ -2349,15 +2456,12 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
2349 NO_SYNC_WMIFLAG); 2456 NO_SYNC_WMIFLAG);
2350 2457
2351 if (ret) 2458 if (ret)
2352 goto free_skb; 2459 goto free_data_skb;
2353
2354 /* cmd buffer sent, we no longer own it */
2355 skb = NULL;
2356 2460
2357 for (index = 0; index < num_pri_streams; index++) { 2461 for (index = 0; index < num_pri_streams; index++) {
2358 2462
2359 if (WARN_ON(!data_sync_bufs[index].skb)) 2463 if (WARN_ON(!data_sync_bufs[index].skb))
2360 break; 2464 goto free_data_skb;
2361 2465
2362 ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev, 2466 ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
2363 data_sync_bufs[index]. 2467 data_sync_bufs[index].
@@ -2366,17 +2470,20 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
2366 ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb, 2470 ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
2367 ep_id, if_idx); 2471 ep_id, if_idx);
2368 2472
2369 if (ret)
2370 break;
2371
2372 data_sync_bufs[index].skb = NULL; 2473 data_sync_bufs[index].skb = NULL;
2474
2475 if (ret)
2476 goto free_data_skb;
2373 } 2477 }
2374 2478
2375free_skb: 2479 return 0;
2480
2481free_cmd_skb:
2376 /* free up any resources left over (possibly due to an error) */ 2482 /* free up any resources left over (possibly due to an error) */
2377 if (skb) 2483 if (skb)
2378 dev_kfree_skb(skb); 2484 dev_kfree_skb(skb);
2379 2485
2486free_data_skb:
2380 for (index = 0; index < num_pri_streams; index++) { 2487 for (index = 0; index < num_pri_streams; index++) {
2381 if (data_sync_bufs[index].skb != NULL) { 2488 if (data_sync_bufs[index].skb != NULL) {
2382 dev_kfree_skb((struct sk_buff *)data_sync_bufs[index]. 2489 dev_kfree_skb((struct sk_buff *)data_sync_bufs[index].
@@ -2618,11 +2725,13 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
2618{ 2725{
2619 struct sk_buff *skb; 2726 struct sk_buff *skb;
2620 int ret, mode, band; 2727 int ret, mode, band;
2621 u64 mcsrate, ratemask[IEEE80211_NUM_BANDS]; 2728 u64 mcsrate, ratemask[ATH6KL_NUM_BANDS];
2622 struct wmi_set_tx_select_rates64_cmd *cmd; 2729 struct wmi_set_tx_select_rates64_cmd *cmd;
2623 2730
2624 memset(&ratemask, 0, sizeof(ratemask)); 2731 memset(&ratemask, 0, sizeof(ratemask));
2625 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 2732
2733 /* only check 2.4 and 5 GHz bands, skip the rest */
2734 for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
2626 /* copy legacy rate mask */ 2735 /* copy legacy rate mask */
2627 ratemask[band] = mask->control[band].legacy; 2736 ratemask[band] = mask->control[band].legacy;
2628 if (band == IEEE80211_BAND_5GHZ) 2737 if (band == IEEE80211_BAND_5GHZ)
@@ -2668,11 +2777,13 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
2668{ 2777{
2669 struct sk_buff *skb; 2778 struct sk_buff *skb;
2670 int ret, mode, band; 2779 int ret, mode, band;
2671 u32 mcsrate, ratemask[IEEE80211_NUM_BANDS]; 2780 u32 mcsrate, ratemask[ATH6KL_NUM_BANDS];
2672 struct wmi_set_tx_select_rates32_cmd *cmd; 2781 struct wmi_set_tx_select_rates32_cmd *cmd;
2673 2782
2674 memset(&ratemask, 0, sizeof(ratemask)); 2783 memset(&ratemask, 0, sizeof(ratemask));
2675 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 2784
2785 /* only check 2.4 and 5 GHz bands, skip the rest */
2786 for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
2676 /* copy legacy rate mask */ 2787 /* copy legacy rate mask */
2677 ratemask[band] = mask->control[band].legacy; 2788 ratemask[band] = mask->control[band].legacy;
2678 if (band == IEEE80211_BAND_5GHZ) 2789 if (band == IEEE80211_BAND_5GHZ)
@@ -2716,7 +2827,7 @@ int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
2716{ 2827{
2717 struct ath6kl *ar = wmi->parent_dev; 2828 struct ath6kl *ar = wmi->parent_dev;
2718 2829
2719 if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES) 2830 if (ar->hw.flags & ATH6KL_HW_64BIT_RATES)
2720 return ath6kl_set_bitrate_mask64(wmi, if_idx, mask); 2831 return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
2721 else 2832 else
2722 return ath6kl_set_bitrate_mask32(wmi, if_idx, mask); 2833 return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
@@ -3139,12 +3250,40 @@ int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enhance)
3139 return ret; 3250 return ret;
3140} 3251}
3141 3252
3253int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2)
3254{
3255 struct sk_buff *skb;
3256 struct wmi_set_regdomain_cmd *cmd;
3257
3258 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
3259 if (!skb)
3260 return -ENOMEM;
3261
3262 cmd = (struct wmi_set_regdomain_cmd *) skb->data;
3263 memcpy(cmd->iso_name, alpha2, 2);
3264
3265 return ath6kl_wmi_cmd_send(wmi, 0, skb,
3266 WMI_SET_REGDOMAIN_CMDID,
3267 NO_SYNC_WMIFLAG);
3268}
3269
3142s32 ath6kl_wmi_get_rate(s8 rate_index) 3270s32 ath6kl_wmi_get_rate(s8 rate_index)
3143{ 3271{
3272 u8 sgi = 0;
3273
3144 if (rate_index == RATE_AUTO) 3274 if (rate_index == RATE_AUTO)
3145 return 0; 3275 return 0;
3146 3276
3147 return wmi_rate_tbl[(u32) rate_index][0]; 3277 /* SGI is stored as the MSB of the rate_index */
3278 if (rate_index & RATE_INDEX_MSB) {
3279 rate_index &= RATE_INDEX_WITHOUT_SGI_MASK;
3280 sgi = 1;
3281 }
3282
3283 if (WARN_ON(rate_index > RATE_MCS_7_40))
3284 rate_index = RATE_MCS_7_40;
3285
3286 return wmi_rate_tbl[(u32) rate_index][sgi];
3148} 3287}
3149 3288
3150static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap, 3289static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
@@ -3634,6 +3773,19 @@ int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout)
3634 NO_SYNC_WMIFLAG); 3773 NO_SYNC_WMIFLAG);
3635} 3774}
3636 3775
3776static void ath6kl_wmi_hb_challenge_resp_event(struct wmi *wmi, u8 *datap,
3777 int len)
3778{
3779 struct wmix_hb_challenge_resp_cmd *cmd;
3780
3781 if (len < sizeof(struct wmix_hb_challenge_resp_cmd))
3782 return;
3783
3784 cmd = (struct wmix_hb_challenge_resp_cmd *) datap;
3785 ath6kl_recovery_hb_event(wmi->parent_dev,
3786 le32_to_cpu(cmd->cookie));
3787}
3788
3637static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) 3789static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
3638{ 3790{
3639 struct wmix_cmd_hdr *cmd; 3791 struct wmix_cmd_hdr *cmd;
@@ -3658,6 +3810,7 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
3658 switch (id) { 3810 switch (id) {
3659 case WMIX_HB_CHALLENGE_RESP_EVENTID: 3811 case WMIX_HB_CHALLENGE_RESP_EVENTID:
3660 ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event hb challenge resp\n"); 3812 ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event hb challenge resp\n");
3813 ath6kl_wmi_hb_challenge_resp_event(wmi, datap, len);
3661 break; 3814 break;
3662 case WMIX_DBGLOG_EVENTID: 3815 case WMIX_DBGLOG_EVENTID:
3663 ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event dbglog len %d\n", len); 3816 ath6kl_dbg(ATH6KL_DBG_WMI, "wmi event dbglog len %d\n", len);
@@ -3750,6 +3903,9 @@ static int ath6kl_wmi_proc_events_vif(struct wmi *wmi, u16 if_idx, u16 cmd_id,
3750 case WMI_RX_ACTION_EVENTID: 3903 case WMI_RX_ACTION_EVENTID:
3751 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n"); 3904 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
3752 return ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif); 3905 return ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
3906 case WMI_TXE_NOTIFY_EVENTID:
3907 ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TXE_NOTIFY_EVENTID\n");
3908 return ath6kl_wmi_txe_notify_event_rx(wmi, datap, len, vif);
3753 default: 3909 default:
3754 ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", cmd_id); 3910 ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", cmd_id);
3755 return -EINVAL; 3911 return -EINVAL;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 43339aca585d..98b1755e67f4 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -48,7 +48,7 @@
48 48
49#define A_BAND_24GHZ 0 49#define A_BAND_24GHZ 0
50#define A_BAND_5GHZ 1 50#define A_BAND_5GHZ 1
51#define A_NUM_BANDS 2 51#define ATH6KL_NUM_BANDS 2
52 52
53/* in ms */ 53/* in ms */
54#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000 54#define WMI_IMPLICIT_PSTREAM_INACTIVITY_INT 5000
@@ -628,6 +628,20 @@ enum wmi_cmd_id {
628 WMI_SET_MCASTRATE, 628 WMI_SET_MCASTRATE,
629 629
630 WMI_STA_BMISS_ENHANCE_CMDID, 630 WMI_STA_BMISS_ENHANCE_CMDID,
631
632 WMI_SET_REGDOMAIN_CMDID,
633
634 WMI_SET_RSSI_FILTER_CMDID,
635
636 WMI_SET_KEEP_ALIVE_EXT,
637
638 WMI_VOICE_DETECTION_ENABLE_CMDID,
639
640 WMI_SET_TXE_NOTIFY_CMDID,
641
642 WMI_SET_RECOVERY_TEST_PARAMETER_CMDID, /*0xf094*/
643
644 WMI_ENABLE_SCHED_SCAN_CMDID,
631}; 645};
632 646
633enum wmi_mgmt_frame_type { 647enum wmi_mgmt_frame_type {
@@ -843,7 +857,7 @@ struct wmi_begin_scan_cmd {
843 u8 scan_type; 857 u8 scan_type;
844 858
845 /* Supported rates to advertise in the probe request frames */ 859 /* Supported rates to advertise in the probe request frames */
846 struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS]; 860 struct wmi_supp_rates supp_rates[ATH6KL_NUM_BANDS];
847 861
848 /* how many channels follow */ 862 /* how many channels follow */
849 u8 num_ch; 863 u8 num_ch;
@@ -941,6 +955,11 @@ struct wmi_scan_params_cmd {
941 __le32 max_dfsch_act_time; 955 __le32 max_dfsch_act_time;
942} __packed; 956} __packed;
943 957
958/* WMI_ENABLE_SCHED_SCAN_CMDID */
959struct wmi_enable_sched_scan_cmd {
960 u8 enable;
961} __packed;
962
944/* WMI_SET_BSS_FILTER_CMDID */ 963/* WMI_SET_BSS_FILTER_CMDID */
945enum wmi_bss_filter { 964enum wmi_bss_filter {
946 /* no beacons forwarded */ 965 /* no beacons forwarded */
@@ -1032,6 +1051,11 @@ struct wmi_sta_bmiss_enhance_cmd {
1032 u8 enable; 1051 u8 enable;
1033} __packed; 1052} __packed;
1034 1053
1054struct wmi_set_regdomain_cmd {
1055 u8 length;
1056 u8 iso_name[2];
1057} __packed;
1058
1035/* WMI_SET_POWER_MODE_CMDID */ 1059/* WMI_SET_POWER_MODE_CMDID */
1036enum wmi_power_mode { 1060enum wmi_power_mode {
1037 REC_POWER = 0x01, 1061 REC_POWER = 0x01,
@@ -1276,6 +1300,11 @@ struct wmi_snr_threshold_params_cmd {
1276 u8 reserved[3]; 1300 u8 reserved[3];
1277} __packed; 1301} __packed;
1278 1302
1303/* Don't report BSSs with signal (RSSI) below this threshold */
1304struct wmi_set_rssi_filter_cmd {
1305 s8 rssi;
1306} __packed;
1307
1279enum wmi_preamble_policy { 1308enum wmi_preamble_policy {
1280 WMI_IGNORE_BARKER_IN_ERP = 0, 1309 WMI_IGNORE_BARKER_IN_ERP = 0,
1281 WMI_FOLLOW_BARKER_IN_ERP, 1310 WMI_FOLLOW_BARKER_IN_ERP,
@@ -1455,6 +1484,20 @@ enum wmi_event_id {
1455 WMI_P2P_CAPABILITIES_EVENTID, 1484 WMI_P2P_CAPABILITIES_EVENTID,
1456 WMI_RX_ACTION_EVENTID, 1485 WMI_RX_ACTION_EVENTID,
1457 WMI_P2P_INFO_EVENTID, 1486 WMI_P2P_INFO_EVENTID,
1487
1488 /* WPS Events */
1489 WMI_WPS_GET_STATUS_EVENTID,
1490 WMI_WPS_PROFILE_EVENTID,
1491
1492 /* more P2P events */
1493 WMI_NOA_INFO_EVENTID,
1494 WMI_OPPPS_INFO_EVENTID,
1495 WMI_PORT_STATUS_EVENTID,
1496
1497 /* 802.11w */
1498 WMI_GET_RSN_CAP_EVENTID,
1499
1500 WMI_TXE_NOTIFY_EVENTID,
1458}; 1501};
1459 1502
1460struct wmi_ready_event_2 { 1503struct wmi_ready_event_2 {
@@ -1749,6 +1792,9 @@ struct rx_stats {
1749 a_sle32 ucast_rate; 1792 a_sle32 ucast_rate;
1750} __packed; 1793} __packed;
1751 1794
1795#define RATE_INDEX_WITHOUT_SGI_MASK 0x7f
1796#define RATE_INDEX_MSB 0x80
1797
1752struct tkip_ccmp_stats { 1798struct tkip_ccmp_stats {
1753 __le32 tkip_local_mic_fail; 1799 __le32 tkip_local_mic_fail;
1754 __le32 tkip_cnter_measures_invoked; 1800 __le32 tkip_cnter_measures_invoked;
@@ -2019,7 +2065,6 @@ struct wmi_set_ie_cmd {
2019 2065
2020#define WOW_MAX_FILTERS_PER_LIST 4 2066#define WOW_MAX_FILTERS_PER_LIST 4
2021#define WOW_PATTERN_SIZE 64 2067#define WOW_PATTERN_SIZE 64
2022#define WOW_MASK_SIZE 64
2023 2068
2024#define MAC_MAX_FILTERS_PER_LIST 4 2069#define MAC_MAX_FILTERS_PER_LIST 4
2025 2070
@@ -2028,7 +2073,7 @@ struct wow_filter {
2028 u8 wow_filter_id; 2073 u8 wow_filter_id;
2029 u8 wow_filter_size; 2074 u8 wow_filter_size;
2030 u8 wow_filter_offset; 2075 u8 wow_filter_offset;
2031 u8 wow_filter_mask[WOW_MASK_SIZE]; 2076 u8 wow_filter_mask[WOW_PATTERN_SIZE];
2032 u8 wow_filter_pattern[WOW_PATTERN_SIZE]; 2077 u8 wow_filter_pattern[WOW_PATTERN_SIZE];
2033} __packed; 2078} __packed;
2034 2079
@@ -2087,6 +2132,19 @@ struct wmi_del_wow_pattern_cmd {
2087 __le16 filter_id; 2132 __le16 filter_id;
2088} __packed; 2133} __packed;
2089 2134
2135/* WMI_SET_TXE_NOTIFY_CMDID */
2136struct wmi_txe_notify_cmd {
2137 __le32 rate;
2138 __le32 pkts;
2139 __le32 intvl;
2140} __packed;
2141
2142/* WMI_TXE_NOTIFY_EVENTID */
2143struct wmi_txe_notify_event {
2144 __le32 rate;
2145 __le32 pkts;
2146} __packed;
2147
2090/* WMI_SET_AKMP_PARAMS_CMD */ 2148/* WMI_SET_AKMP_PARAMS_CMD */
2091 2149
2092struct wmi_pmkid { 2150struct wmi_pmkid {
@@ -2505,11 +2563,6 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
2505int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid, 2563int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
2506 u16 channel); 2564 u16 channel);
2507int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx); 2565int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
2508int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
2509 enum wmi_scan_type scan_type,
2510 u32 force_fgscan, u32 is_legacy,
2511 u32 home_dwell_time, u32 force_scan_interval,
2512 s8 num_chan, u16 *ch_list);
2513 2566
2514int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx, 2567int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
2515 enum wmi_scan_type scan_type, 2568 enum wmi_scan_type scan_type,
@@ -2517,6 +2570,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
2517 u32 home_dwell_time, u32 force_scan_interval, 2570 u32 home_dwell_time, u32 force_scan_interval,
2518 s8 num_chan, u16 *ch_list, u32 no_cck, 2571 s8 num_chan, u16 *ch_list, u32 no_cck,
2519 u32 *rates); 2572 u32 *rates);
2573int ath6kl_wmi_enable_sched_scan_cmd(struct wmi *wmi, u8 if_idx, bool enable);
2520 2574
2521int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec, 2575int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
2522 u16 fg_end_sec, u16 bg_sec, 2576 u16 fg_end_sec, u16 bg_sec,
@@ -2592,6 +2646,7 @@ int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2592 const u8 *mask); 2646 const u8 *mask);
2593int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx, 2647int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
2594 u16 list_id, u16 filter_id); 2648 u16 list_id, u16 filter_id);
2649int ath6kl_wmi_set_rssi_filter_cmd(struct wmi *wmi, u8 if_idx, s8 rssi);
2595int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi); 2650int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
2596int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period); 2651int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
2597int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid); 2652int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
@@ -2600,6 +2655,9 @@ int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
2600int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, 2655int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
2601 u8 *filter, bool add_filter); 2656 u8 *filter, bool add_filter);
2602int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable); 2657int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable);
2658int ath6kl_wmi_set_txe_notify(struct wmi *wmi, u8 idx,
2659 u32 rate, u32 pkts, u32 intvl);
2660int ath6kl_wmi_set_regdomain_cmd(struct wmi *wmi, const char *alpha2);
2603 2661
2604/* AP mode uAPSD */ 2662/* AP mode uAPSD */
2605int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable); 2663int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
@@ -2658,6 +2716,8 @@ int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout);
2658 2716
2659void ath6kl_wmi_sscan_timer(unsigned long ptr); 2717void ath6kl_wmi_sscan_timer(unsigned long ptr);
2660 2718
2719int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
2720
2661struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx); 2721struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
2662void *ath6kl_wmi_init(struct ath6kl *devt); 2722void *ath6kl_wmi_init(struct ath6kl *devt);
2663void ath6kl_wmi_shutdown(struct wmi *wmi); 2723void ath6kl_wmi_shutdown(struct wmi *wmi);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index c7aa6646123e..5fc15bf8be09 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -17,6 +17,7 @@ config ATH9K_BTCOEX_SUPPORT
17config ATH9K 17config ATH9K
18 tristate "Atheros 802.11n wireless cards support" 18 tristate "Atheros 802.11n wireless cards support"
19 depends on MAC80211 19 depends on MAC80211
20 select ATH_COMMON
20 select ATH9K_HW 21 select ATH9K_HW
21 select MAC80211_LEDS 22 select MAC80211_LEDS
22 select LEDS_CLASS 23 select LEDS_CLASS
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 6f7cf49eff4d..262e1e036fd7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -534,98 +534,98 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
534 534
535static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = { 535static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
536 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 536 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
537 {0x0000a2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, 537 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
538 {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, 538 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
539 {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, 539 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
540 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 540 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
541 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 541 {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
542 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 542 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
543 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002}, 543 {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
544 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004}, 544 {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
545 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200}, 545 {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
546 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202}, 546 {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
547 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400}, 547 {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
548 {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402}, 548 {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
549 {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404}, 549 {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
550 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603}, 550 {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
551 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02}, 551 {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
552 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04}, 552 {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
553 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20}, 553 {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
554 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20}, 554 {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
555 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22}, 555 {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
556 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24}, 556 {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
557 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640}, 557 {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
558 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, 558 {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
559 {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861}, 559 {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
560 {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81}, 560 {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
561 {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83}, 561 {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
562 {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84}, 562 {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
563 {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3}, 563 {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
564 {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5}, 564 {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
565 {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9}, 565 {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
566 {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb}, 566 {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
567 {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, 567 {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
568 {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, 568 {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
569 {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, 569 {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
570 {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, 570 {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
571 {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, 571 {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
572 {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, 572 {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
573 {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec}, 573 {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
574 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 574 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
575 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 575 {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
576 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 576 {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
577 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200}, 577 {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
578 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202}, 578 {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
579 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400}, 579 {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
580 {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402}, 580 {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
581 {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404}, 581 {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
582 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603}, 582 {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
583 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02}, 583 {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
584 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04}, 584 {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
585 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20}, 585 {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
586 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20}, 586 {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
587 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22}, 587 {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
588 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24}, 588 {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
589 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640}, 589 {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
590 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660}, 590 {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
591 {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861}, 591 {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
592 {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81}, 592 {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
593 {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83}, 593 {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
594 {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84}, 594 {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
595 {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3}, 595 {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
596 {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5}, 596 {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
597 {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9}, 597 {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
598 {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb}, 598 {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
599 {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, 599 {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
600 {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, 600 {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
601 {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, 601 {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
602 {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, 602 {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
603 {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, 603 {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
604 {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, 604 {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
605 {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec}, 605 {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
606 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 606 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
607 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 607 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
608 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 608 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
609 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 609 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
610 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 610 {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
611 {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000}, 611 {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
612 {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501}, 612 {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
613 {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501}, 613 {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
614 {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03}, 614 {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
615 {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04}, 615 {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
616 {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04}, 616 {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
617 {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, 617 {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
618 {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, 618 {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
619 {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, 619 {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
620 {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, 620 {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
621 {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005}, 621 {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
622 {0x0000b2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, 622 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
623 {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, 623 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
624 {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, 624 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
625 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 625 {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
626 {0x0000c2dc, 0x00033800, 0x00033800, 0x03aaa352, 0x03aaa352}, 626 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
627 {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03ccc584, 0x03ccc584}, 627 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
628 {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03f0f800, 0x03f0f800}, 628 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
629 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 629 {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
630 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 630 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
631 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001}, 631 {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 84b558d126ca..8b0d8dcd7625 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -276,6 +276,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
276 offset_array[i], 276 offset_array[i],
277 REG_READ(ah, offset_array[i])); 277 REG_READ(ah, offset_array[i]));
278 278
279 if (AR_SREV_9565(ah) &&
280 (iCoff == 63 || qCoff == 63 ||
281 iCoff == -63 || qCoff == -63))
282 return;
283
279 REG_RMW_FIELD(ah, offset_array[i], 284 REG_RMW_FIELD(ah, offset_array[i],
280 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF, 285 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
281 iCoff); 286 iCoff);
@@ -886,6 +891,74 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
886 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1); 891 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
887} 892}
888 893
894static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
895{
896 int offset[8], total = 0, test;
897 int agc_out, i;
898
899 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
900 AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1);
901 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
902 AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0);
903 if (is_2g)
904 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
905 AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
906 else
907 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
908 AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
909
910 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
911 AR_PHY_65NM_RXTX2_RXON_OVR, 0x1);
912 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
913 AR_PHY_65NM_RXTX2_RXON, 0x0);
914
915 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
916 AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
917 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
918 AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR, 0x1);
919 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
920 AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
921 if (is_2g)
922 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
923 AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0);
924 else
925 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
926 AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0);
927
928 for (i = 6; i > 0; i--) {
929 offset[i] = BIT(i - 1);
930 test = total + offset[i];
931
932 if (is_2g)
933 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
934 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
935 test);
936 else
937 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
938 AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
939 test);
940 udelay(100);
941 agc_out = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
942 AR_PHY_65NM_RXRF_AGC_AGC_OUT);
943 offset[i] = (agc_out) ? 0 : 1;
944 total += (offset[i] << (i - 1));
945 }
946
947 if (is_2g)
948 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
949 AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, total);
950 else
951 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
952 AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total);
953
954 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
955 AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0);
956 REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
957 AR_PHY_65NM_RXTX2_RXON_OVR, 0);
958 REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
959 AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
960}
961
889static bool ar9003_hw_init_cal(struct ath_hw *ah, 962static bool ar9003_hw_init_cal(struct ath_hw *ah,
890 struct ath9k_channel *chan) 963 struct ath9k_channel *chan)
891{ 964{
@@ -984,6 +1057,14 @@ skip_tx_iqcal:
984 status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, 1057 status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
985 AR_PHY_AGC_CONTROL_CAL, 1058 AR_PHY_AGC_CONTROL_CAL,
986 0, AH_WAIT_TIMEOUT); 1059 0, AH_WAIT_TIMEOUT);
1060 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
1061 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
1062 if (!(ah->rxchainmask & (1 << i)))
1063 continue;
1064 ar9003_hw_manual_peak_cal(ah, i,
1065 IS_CHAN_2GHZ(chan));
1066 }
1067 }
987 } 1068 }
988 1069
989 if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal) 1070 if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 5bbe5057ba18..562186ca9b52 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -18,6 +18,7 @@
18#include "hw.h" 18#include "hw.h"
19#include "ar9003_phy.h" 19#include "ar9003_phy.h"
20#include "ar9003_eeprom.h" 20#include "ar9003_eeprom.h"
21#include "ar9003_mci.h"
21 22
22#define COMP_HDR_LEN 4 23#define COMP_HDR_LEN 4
23#define COMP_CKSUM_LEN 2 24#define COMP_CKSUM_LEN 2
@@ -41,7 +42,6 @@
41static int ar9003_hw_power_interpolate(int32_t x, 42static int ar9003_hw_power_interpolate(int32_t x,
42 int32_t *px, int32_t *py, u_int16_t np); 43 int32_t *px, int32_t *py, u_int16_t np);
43 44
44
45static const struct ar9300_eeprom ar9300_default = { 45static const struct ar9300_eeprom ar9300_default = {
46 .eepromVersion = 2, 46 .eepromVersion = 2,
47 .templateVersion = 2, 47 .templateVersion = 2,
@@ -2987,10 +2987,6 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
2987 case EEP_RX_MASK: 2987 case EEP_RX_MASK:
2988 return pBase->txrxMask & 0xf; 2988 return pBase->txrxMask & 0xf;
2989 case EEP_PAPRD: 2989 case EEP_PAPRD:
2990 if (AR_SREV_9462(ah))
2991 return false;
2992 if (!ah->config.enable_paprd);
2993 return false;
2994 return !!(pBase->featureEnable & BIT(5)); 2990 return !!(pBase->featureEnable & BIT(5));
2995 case EEP_CHAIN_MASK_REDUCE: 2991 case EEP_CHAIN_MASK_REDUCE:
2996 return (pBase->miscConfiguration >> 0x3) & 0x1; 2992 return (pBase->miscConfiguration >> 0x3) & 0x1;
@@ -3005,24 +3001,24 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
3005 } 3001 }
3006} 3002}
3007 3003
3008static bool ar9300_eeprom_read_byte(struct ath_common *common, int address, 3004static bool ar9300_eeprom_read_byte(struct ath_hw *ah, int address,
3009 u8 *buffer) 3005 u8 *buffer)
3010{ 3006{
3011 u16 val; 3007 u16 val;
3012 3008
3013 if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val))) 3009 if (unlikely(!ath9k_hw_nvram_read(ah, address / 2, &val)))
3014 return false; 3010 return false;
3015 3011
3016 *buffer = (val >> (8 * (address % 2))) & 0xff; 3012 *buffer = (val >> (8 * (address % 2))) & 0xff;
3017 return true; 3013 return true;
3018} 3014}
3019 3015
3020static bool ar9300_eeprom_read_word(struct ath_common *common, int address, 3016static bool ar9300_eeprom_read_word(struct ath_hw *ah, int address,
3021 u8 *buffer) 3017 u8 *buffer)
3022{ 3018{
3023 u16 val; 3019 u16 val;
3024 3020
3025 if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val))) 3021 if (unlikely(!ath9k_hw_nvram_read(ah, address / 2, &val)))
3026 return false; 3022 return false;
3027 3023
3028 buffer[0] = val >> 8; 3024 buffer[0] = val >> 8;
@@ -3048,14 +3044,14 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
3048 * the 16-bit word at that address 3044 * the 16-bit word at that address
3049 */ 3045 */
3050 if (address % 2 == 0) { 3046 if (address % 2 == 0) {
3051 if (!ar9300_eeprom_read_byte(common, address--, buffer++)) 3047 if (!ar9300_eeprom_read_byte(ah, address--, buffer++))
3052 goto error; 3048 goto error;
3053 3049
3054 count--; 3050 count--;
3055 } 3051 }
3056 3052
3057 for (i = 0; i < count / 2; i++) { 3053 for (i = 0; i < count / 2; i++) {
3058 if (!ar9300_eeprom_read_word(common, address, buffer)) 3054 if (!ar9300_eeprom_read_word(ah, address, buffer))
3059 goto error; 3055 goto error;
3060 3056
3061 address -= 2; 3057 address -= 2;
@@ -3063,7 +3059,7 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
3063 } 3059 }
3064 3060
3065 if (count % 2) 3061 if (count % 2)
3066 if (!ar9300_eeprom_read_byte(common, address, buffer)) 3062 if (!ar9300_eeprom_read_byte(ah, address, buffer))
3067 goto error; 3063 goto error;
3068 3064
3069 return true; 3065 return true;
@@ -3240,12 +3236,11 @@ static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read,
3240static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr, 3236static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
3241 int mdata_size) 3237 int mdata_size)
3242{ 3238{
3243 struct ath_common *common = ath9k_hw_common(ah);
3244 u16 *data = (u16 *) mptr; 3239 u16 *data = (u16 *) mptr;
3245 int i; 3240 int i;
3246 3241
3247 for (i = 0; i < mdata_size / 2; i++, data++) 3242 for (i = 0; i < mdata_size / 2; i++, data++)
3248 ath9k_hw_nvram_read(common, i, data); 3243 ath9k_hw_nvram_read(ah, i, data);
3249 3244
3250 return 0; 3245 return 0;
3251} 3246}
@@ -3601,7 +3596,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3601 * 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE 3596 * 7:4 R/W SWITCH_TABLE_COM_SPDT_WLAN_IDLE
3602 * SWITCH_TABLE_COM_SPDT_WLAN_IDLE 3597 * SWITCH_TABLE_COM_SPDT_WLAN_IDLE
3603 */ 3598 */
3604 if (AR_SREV_9462_20_OR_LATER(ah)) { 3599 if (AR_SREV_9462_20(ah) || AR_SREV_9565(ah)) {
3605 value = ar9003_switch_com_spdt_get(ah, is2ghz); 3600 value = ar9003_switch_com_spdt_get(ah, is2ghz);
3606 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, 3601 REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
3607 AR_SWITCH_TABLE_COM_SPDT_ALL, value); 3602 AR_SWITCH_TABLE_COM_SPDT_ALL, value);
@@ -5037,16 +5032,28 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
5037 case CTL_5GHT20: 5032 case CTL_5GHT20:
5038 case CTL_2GHT20: 5033 case CTL_2GHT20:
5039 for (i = ALL_TARGET_HT20_0_8_16; 5034 for (i = ALL_TARGET_HT20_0_8_16;
5040 i <= ALL_TARGET_HT20_23; i++) 5035 i <= ALL_TARGET_HT20_23; i++) {
5041 pPwrArray[i] = (u8)min((u16)pPwrArray[i], 5036 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
5042 minCtlPower); 5037 minCtlPower);
5038 if (ath9k_hw_mci_is_enabled(ah))
5039 pPwrArray[i] =
5040 (u8)min((u16)pPwrArray[i],
5041 ar9003_mci_get_max_txpower(ah,
5042 pCtlMode[ctlMode]));
5043 }
5043 break; 5044 break;
5044 case CTL_5GHT40: 5045 case CTL_5GHT40:
5045 case CTL_2GHT40: 5046 case CTL_2GHT40:
5046 for (i = ALL_TARGET_HT40_0_8_16; 5047 for (i = ALL_TARGET_HT40_0_8_16;
5047 i <= ALL_TARGET_HT40_23; i++) 5048 i <= ALL_TARGET_HT40_23; i++) {
5048 pPwrArray[i] = (u8)min((u16)pPwrArray[i], 5049 pPwrArray[i] = (u8)min((u16)pPwrArray[i],
5049 minCtlPower); 5050 minCtlPower);
5051 if (ath9k_hw_mci_is_enabled(ah))
5052 pPwrArray[i] =
5053 (u8)min((u16)pPwrArray[i],
5054 ar9003_mci_get_max_txpower(ah,
5055 pCtlMode[ctlMode]));
5056 }
5050 break; 5057 break;
5051 default: 5058 default:
5052 break; 5059 break;
@@ -5064,6 +5071,33 @@ static inline u8 mcsidx_to_tgtpwridx(unsigned int mcs_idx, u8 base_pwridx)
5064 return base_pwridx + 4 * (mcs_idx / 8) + mod_idx - 2; 5071 return base_pwridx + 4 * (mcs_idx / 8) + mod_idx - 2;
5065} 5072}
5066 5073
5074static void ar9003_paprd_set_txpower(struct ath_hw *ah,
5075 struct ath9k_channel *chan,
5076 u8 *targetPowerValT2)
5077{
5078 int i;
5079
5080 if (!ar9003_is_paprd_enabled(ah))
5081 return;
5082
5083 if (IS_CHAN_HT40(chan))
5084 i = ALL_TARGET_HT40_7;
5085 else
5086 i = ALL_TARGET_HT20_7;
5087
5088 if (IS_CHAN_2GHZ(chan)) {
5089 if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) &&
5090 !AR_SREV_9462(ah) && !AR_SREV_9565(ah)) {
5091 if (IS_CHAN_HT40(chan))
5092 i = ALL_TARGET_HT40_0_8_16;
5093 else
5094 i = ALL_TARGET_HT20_0_8_16;
5095 }
5096 }
5097
5098 ah->paprd_target_power = targetPowerValT2[i];
5099}
5100
5067static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, 5101static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
5068 struct ath9k_channel *chan, u16 cfgCtl, 5102 struct ath9k_channel *chan, u16 cfgCtl,
5069 u8 twiceAntennaReduction, 5103 u8 twiceAntennaReduction,
@@ -5085,7 +5119,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
5085 */ 5119 */
5086 ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2); 5120 ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
5087 5121
5088 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { 5122 if (ar9003_is_paprd_enabled(ah)) {
5089 if (IS_CHAN_2GHZ(chan)) 5123 if (IS_CHAN_2GHZ(chan))
5090 modal_hdr = &eep->modalHeader2G; 5124 modal_hdr = &eep->modalHeader2G;
5091 else 5125 else
@@ -5126,7 +5160,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
5126 twiceAntennaReduction, 5160 twiceAntennaReduction,
5127 powerLimit); 5161 powerLimit);
5128 5162
5129 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { 5163 if (ar9003_is_paprd_enabled(ah)) {
5130 for (i = 0; i < ar9300RateSize; i++) { 5164 for (i = 0; i < ar9300RateSize; i++) {
5131 if ((ah->paprd_ratemask & (1 << i)) && 5165 if ((ah->paprd_ratemask & (1 << i)) &&
5132 (abs(targetPowerValT2[i] - 5166 (abs(targetPowerValT2[i] -
@@ -5158,19 +5192,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
5158 /* Write target power array to registers */ 5192 /* Write target power array to registers */
5159 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); 5193 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
5160 ar9003_hw_calibration_apply(ah, chan->channel); 5194 ar9003_hw_calibration_apply(ah, chan->channel);
5161 5195 ar9003_paprd_set_txpower(ah, chan, targetPowerValT2);
5162 if (IS_CHAN_2GHZ(chan)) {
5163 if (IS_CHAN_HT40(chan))
5164 i = ALL_TARGET_HT40_0_8_16;
5165 else
5166 i = ALL_TARGET_HT20_0_8_16;
5167 } else {
5168 if (IS_CHAN_HT40(chan))
5169 i = ALL_TARGET_HT40_7;
5170 else
5171 i = ALL_TARGET_HT20_7;
5172 }
5173 ah->paprd_target_power = targetPowerValT2[i];
5174} 5196}
5175 5197
5176static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah, 5198static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 41b1a75e6bec..54ba42f4108a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -68,13 +68,13 @@
68#define AR9300_BASE_ADDR 0x3ff 68#define AR9300_BASE_ADDR 0x3ff
69#define AR9300_BASE_ADDR_512 0x1ff 69#define AR9300_BASE_ADDR_512 0x1ff
70 70
71#define AR9300_OTP_BASE 0x14000 71#define AR9300_OTP_BASE (AR_SREV_9340(ah) ? 0x30000 : 0x14000)
72#define AR9300_OTP_STATUS 0x15f18 72#define AR9300_OTP_STATUS (AR_SREV_9340(ah) ? 0x30018 : 0x15f18)
73#define AR9300_OTP_STATUS_TYPE 0x7 73#define AR9300_OTP_STATUS_TYPE 0x7
74#define AR9300_OTP_STATUS_VALID 0x4 74#define AR9300_OTP_STATUS_VALID 0x4
75#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2 75#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
76#define AR9300_OTP_STATUS_SM_BUSY 0x1 76#define AR9300_OTP_STATUS_SM_BUSY 0x1
77#define AR9300_OTP_READ_DATA 0x15f1c 77#define AR9300_OTP_READ_DATA (AR_SREV_9340(ah) ? 0x3001c : 0x15f1c)
78 78
79enum targetPowerHTRates { 79enum targetPowerHTRates {
80 HT_TARGET_RATE_0_8_16, 80 HT_TARGET_RATE_0_8_16,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 1a36fa262639..74fd3977feeb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -35,12 +35,6 @@
35 */ 35 */
36static void ar9003_hw_init_mode_regs(struct ath_hw *ah) 36static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
37{ 37{
38#define AR9462_BB_CTX_COEFJ(x) \
39 ar9462_##x##_baseband_core_txfir_coeff_japan_2484
40
41#define AR9462_BBC_TXIFR_COEFFJ \
42 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
43
44 if (AR_SREV_9330_11(ah)) { 38 if (AR_SREV_9330_11(ah)) {
45 /* mac */ 39 /* mac */
46 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 40 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -70,6 +64,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
70 INIT_INI_ARRAY(&ah->iniModesTxGain, 64 INIT_INI_ARRAY(&ah->iniModesTxGain,
71 ar9331_modes_lowest_ob_db_tx_gain_1p1); 65 ar9331_modes_lowest_ob_db_tx_gain_1p1);
72 66
67 /* Japan 2484 Mhz CCK */
68 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
69 ar9331_1p1_baseband_core_txfir_coeff_japan_2484);
70
73 /* additional clock settings */ 71 /* additional clock settings */
74 if (ah->is_clk_25mhz) 72 if (ah->is_clk_25mhz)
75 INIT_INI_ARRAY(&ah->iniAdditional, 73 INIT_INI_ARRAY(&ah->iniAdditional,
@@ -106,6 +104,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
106 INIT_INI_ARRAY(&ah->iniModesTxGain, 104 INIT_INI_ARRAY(&ah->iniModesTxGain,
107 ar9331_modes_lowest_ob_db_tx_gain_1p2); 105 ar9331_modes_lowest_ob_db_tx_gain_1p2);
108 106
107 /* Japan 2484 Mhz CCK */
108 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
109 ar9331_1p2_baseband_core_txfir_coeff_japan_2484);
110
109 /* additional clock settings */ 111 /* additional clock settings */
110 if (ah->is_clk_25mhz) 112 if (ah->is_clk_25mhz)
111 INIT_INI_ARRAY(&ah->iniAdditional, 113 INIT_INI_ARRAY(&ah->iniAdditional,
@@ -180,6 +182,10 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
180 INIT_INI_ARRAY(&ah->iniModesTxGain, 182 INIT_INI_ARRAY(&ah->iniModesTxGain,
181 ar9485_modes_lowest_ob_db_tx_gain_1_1); 183 ar9485_modes_lowest_ob_db_tx_gain_1_1);
182 184
185 /* Japan 2484 Mhz CCK */
186 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
187 ar9485_1_1_baseband_core_txfir_coeff_japan_2484);
188
183 /* Load PCIE SERDES settings from INI */ 189 /* Load PCIE SERDES settings from INI */
184 190
185 /* Awake Setting */ 191 /* Awake Setting */
@@ -219,19 +225,17 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
219 225
220 /* Awake -> Sleep Setting */ 226 /* Awake -> Sleep Setting */
221 INIT_INI_ARRAY(&ah->iniPcieSerdes, 227 INIT_INI_ARRAY(&ah->iniPcieSerdes,
222 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0); 228 ar9462_pciephy_clkreq_disable_L1_2p0);
223 /* Sleep -> Awake Setting */ 229 /* Sleep -> Awake Setting */
224 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 230 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
225 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0); 231 ar9462_pciephy_clkreq_disable_L1_2p0);
226 232
227 /* Fast clock modal settings */ 233 /* Fast clock modal settings */
228 INIT_INI_ARRAY(&ah->iniModesFastClock, 234 INIT_INI_ARRAY(&ah->iniModesFastClock,
229 ar9462_modes_fast_clock_2p0); 235 ar9462_modes_fast_clock_2p0);
230 236
231 INIT_INI_ARRAY(&ah->iniCckfirJapan2484, 237 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
232 AR9462_BB_CTX_COEFJ(2p0)); 238 ar9462_2p0_baseband_core_txfir_coeff_japan_2484);
233
234 INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ);
235 } else if (AR_SREV_9550(ah)) { 239 } else if (AR_SREV_9550(ah)) {
236 /* mac */ 240 /* mac */
237 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 241 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -328,9 +332,9 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
328 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table); 332 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table);
329 333
330 INIT_INI_ARRAY(&ah->iniPcieSerdes, 334 INIT_INI_ARRAY(&ah->iniPcieSerdes,
331 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1); 335 ar9565_1p0_pciephy_clkreq_disable_L1);
332 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 336 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
333 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1); 337 ar9565_1p0_pciephy_clkreq_disable_L1);
334 338
335 INIT_INI_ARRAY(&ah->iniModesFastClock, 339 INIT_INI_ARRAY(&ah->iniModesFastClock,
336 ar9565_1p0_modes_fast_clock); 340 ar9565_1p0_modes_fast_clock);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 44c202ce6c66..8dd069259e7b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -714,7 +714,6 @@ bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan)
714 714
715 return true; 715 return true;
716} 716}
717EXPORT_SYMBOL(ar9003_mci_start_reset);
718 717
719int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, 718int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
720 struct ath9k_hw_cal_data *caldata) 719 struct ath9k_hw_cal_data *caldata)
@@ -750,6 +749,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
750 749
751 mci_hw->bt_state = MCI_BT_AWAKE; 750 mci_hw->bt_state = MCI_BT_AWAKE;
752 751
752 REG_CLR_BIT(ah, AR_PHY_TIMING4,
753 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
754
753 if (caldata) { 755 if (caldata) {
754 caldata->done_txiqcal_once = false; 756 caldata->done_txiqcal_once = false;
755 caldata->done_txclcal_once = false; 757 caldata->done_txclcal_once = false;
@@ -759,6 +761,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
759 if (!ath9k_hw_init_cal(ah, chan)) 761 if (!ath9k_hw_init_cal(ah, chan))
760 return -EIO; 762 return -EIO;
761 763
764 REG_SET_BIT(ah, AR_PHY_TIMING4,
765 1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
766
762exit: 767exit:
763 ar9003_mci_enable_interrupt(ah); 768 ar9003_mci_enable_interrupt(ah);
764 return 0; 769 return 0;
@@ -799,6 +804,9 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
799 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, 804 REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
800 AR_MCI_SCHD_TABLE_2_MEM_BASED, 1); 805 AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
801 806
807 if (AR_SREV_9565(ah))
808 REG_RMW_FIELD(ah, AR_MCI_MISC, AR_MCI_MISC_HW_FIX_EN, 1);
809
802 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) { 810 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
803 thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH); 811 thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
804 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL, 812 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
@@ -818,7 +826,7 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
818{ 826{
819 struct ath_common *common = ath9k_hw_common(ah); 827 struct ath_common *common = ath9k_hw_common(ah);
820 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 828 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
821 u32 regval; 829 u32 regval, i;
822 830
823 ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n", 831 ath_dbg(common, MCI, "MCI Reset (full_sleep = %d, is_2g = %d)\n",
824 is_full_sleep, is_2g); 832 is_full_sleep, is_2g);
@@ -847,11 +855,18 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
847 SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) | 855 SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
848 SM(1, AR_BTCOEX_CTRL_PA_SHARED) | 856 SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
849 SM(1, AR_BTCOEX_CTRL_LNA_SHARED) | 857 SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
850 SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
851 SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
852 SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) | 858 SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
853 SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) | 859 SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
854 SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); 860 SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
861 if (AR_SREV_9565(ah)) {
862 regval |= SM(1, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
863 SM(1, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
864 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
865 AR_BTCOEX_CTRL2_TX_CHAIN_MASK, 0x1);
866 } else {
867 regval |= SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
868 SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK);
869 }
855 870
856 REG_WRITE(ah, AR_BTCOEX_CTRL, regval); 871 REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
857 872
@@ -865,9 +880,24 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
865 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3, 880 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
866 AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20); 881 AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
867 882
868 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1); 883 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 0);
869 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0); 884 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
870 885
886 /* Set the time out to 3.125ms (5 BT slots) */
887 REG_RMW_FIELD(ah, AR_BTCOEX_WL_LNA, AR_BTCOEX_WL_LNA_TIMEOUT, 0x3D090);
888
889 /* concurrent tx priority */
890 if (mci->config & ATH_MCI_CONFIG_CONCUR_TX) {
891 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
892 AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE, 0);
893 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2,
894 AR_BTCOEX_CTRL2_TXPWR_THRESH, 0x7f);
895 REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
896 AR_BTCOEX_CTRL_REDUCE_TXPWR, 0);
897 for (i = 0; i < 8; i++)
898 REG_WRITE(ah, AR_BTCOEX_MAX_TXPWR(i), 0x7f7f7f7f);
899 }
900
871 regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV); 901 regval = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
872 REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval); 902 REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, regval);
873 REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN); 903 REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
@@ -910,6 +940,9 @@ int ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
910 mci->ready = true; 940 mci->ready = true;
911 ar9003_mci_prep_interface(ah); 941 ar9003_mci_prep_interface(ah);
912 942
943 if (AR_SREV_9565(ah))
944 REG_RMW_FIELD(ah, AR_MCI_DBG_CNT_CTRL,
945 AR_MCI_DBG_CNT_CTRL_ENABLE, 0);
913 if (en_int) 946 if (en_int)
914 ar9003_mci_enable_interrupt(ah); 947 ar9003_mci_enable_interrupt(ah);
915 948
@@ -1028,7 +1061,9 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
1028 1061
1029 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) 1062 if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
1030 ar9003_mci_osla_setup(ah, true); 1063 ar9003_mci_osla_setup(ah, true);
1031 REG_WRITE(ah, AR_SELFGEN_MASK, 0x02); 1064
1065 if (AR_SREV_9462(ah))
1066 REG_WRITE(ah, AR_SELFGEN_MASK, 0x02);
1032 } else { 1067 } else {
1033 ar9003_mci_send_lna_take(ah, true); 1068 ar9003_mci_send_lna_take(ah, true);
1034 udelay(5); 1069 udelay(5);
@@ -1170,7 +1205,7 @@ EXPORT_SYMBOL(ar9003_mci_cleanup);
1170u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) 1205u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1171{ 1206{
1172 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; 1207 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
1173 u32 value = 0; 1208 u32 value = 0, tsf;
1174 u8 query_type; 1209 u8 query_type;
1175 1210
1176 switch (state_type) { 1211 switch (state_type) {
@@ -1228,6 +1263,14 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
1228 ar9003_mci_send_coex_bt_status_query(ah, true, query_type); 1263 ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
1229 break; 1264 break;
1230 case MCI_STATE_RECOVER_RX: 1265 case MCI_STATE_RECOVER_RX:
1266 tsf = ath9k_hw_gettsf32(ah);
1267 if ((tsf - mci->last_recovery) <= MCI_RECOVERY_DUR_TSF) {
1268 ath_dbg(ath9k_hw_common(ah), MCI,
1269 "(MCI) ignore Rx recovery\n");
1270 break;
1271 }
1272 ath_dbg(ath9k_hw_common(ah), MCI, "(MCI) RECOVER RX\n");
1273 mci->last_recovery = tsf;
1231 ar9003_mci_prep_interface(ah); 1274 ar9003_mci_prep_interface(ah);
1232 mci->query_bt = true; 1275 mci->query_bt = true;
1233 mci->need_flush_btinfo = true; 1276 mci->need_flush_btinfo = true;
@@ -1426,3 +1469,17 @@ void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
1426 ar9003_mci_send_coex_wlan_channels(ah, true); 1469 ar9003_mci_send_coex_wlan_channels(ah, true);
1427} 1470}
1428EXPORT_SYMBOL(ar9003_mci_send_wlan_channels); 1471EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
1472
1473u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
1474{
1475 if (!ah->btcoex_hw.mci.concur_tx)
1476 goto out;
1477
1478 if (ctlmode == CTL_2GHT20)
1479 return ATH_BTCOEX_HT20_MAX_TXPOWER;
1480 else if (ctlmode == CTL_2GHT40)
1481 return ATH_BTCOEX_HT40_MAX_TXPOWER;
1482
1483out:
1484 return -1;
1485}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 2a2d01889613..66d7ab9f920d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -18,6 +18,7 @@
18#define AR9003_MCI_H 18#define AR9003_MCI_H
19 19
20#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */ 20#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
21#define MCI_RECOVERY_DUR_TSF (100 * 1000) /* 100 ms */
21 22
22/* Default remote BT device MCI COEX version */ 23/* Default remote BT device MCI COEX version */
23#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3 24#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
@@ -125,6 +126,7 @@ enum ath_mci_gpm_coex_profile_type {
125 MCI_GPM_COEX_PROFILE_HID, 126 MCI_GPM_COEX_PROFILE_HID,
126 MCI_GPM_COEX_PROFILE_BNEP, 127 MCI_GPM_COEX_PROFILE_BNEP,
127 MCI_GPM_COEX_PROFILE_VOICE, 128 MCI_GPM_COEX_PROFILE_VOICE,
129 MCI_GPM_COEX_PROFILE_A2DPVO,
128 MCI_GPM_COEX_PROFILE_MAX 130 MCI_GPM_COEX_PROFILE_MAX
129}; 131};
130 132
@@ -196,7 +198,6 @@ enum mci_state_type {
196 MCI_STATE_SEND_WLAN_COEX_VERSION, 198 MCI_STATE_SEND_WLAN_COEX_VERSION,
197 MCI_STATE_SEND_VERSION_QUERY, 199 MCI_STATE_SEND_VERSION_QUERY,
198 MCI_STATE_SEND_STATUS_QUERY, 200 MCI_STATE_SEND_STATUS_QUERY,
199 MCI_STATE_SET_CONCUR_TX_PRI,
200 MCI_STATE_RECOVER_RX, 201 MCI_STATE_RECOVER_RX,
201 MCI_STATE_NEED_FTP_STOMP, 202 MCI_STATE_NEED_FTP_STOMP,
202 MCI_STATE_DEBUG, 203 MCI_STATE_DEBUG,
@@ -278,6 +279,7 @@ void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
278void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah); 279void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
279void ar9003_mci_set_power_awake(struct ath_hw *ah); 280void ar9003_mci_set_power_awake(struct ath_hw *ah);
280void ar9003_mci_check_gpm_offset(struct ath_hw *ah); 281void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
282u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode);
281 283
282#else 284#else
283 285
@@ -324,6 +326,10 @@ static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
324static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah) 326static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
325{ 327{
326} 328}
329static inline u16 ar9003_mci_get_max_txpower(struct ath_hw *ah, u8 ctlmode)
330{
331 return -1;
332}
327#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 333#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
328 334
329#endif 335#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 0ed3846f9cbb..09c1f9da67a0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -74,15 +74,23 @@ static int ar9003_get_training_power_2g(struct ath_hw *ah)
74 unsigned int power, scale, delta; 74 unsigned int power, scale, delta;
75 75
76 scale = ar9003_get_paprd_scale_factor(ah, chan); 76 scale = ar9003_get_paprd_scale_factor(ah, chan);
77 power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
78 AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
79 77
80 delta = abs((int) ah->paprd_target_power - (int) power); 78 if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
81 if (delta > scale) 79 AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
82 return -1; 80 power = ah->paprd_target_power + 2;
83 81 } else if (AR_SREV_9485(ah)) {
84 if (delta < 4) 82 power = 25;
85 power -= 4 - delta; 83 } else {
84 power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
85 AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
86
87 delta = abs((int) ah->paprd_target_power - (int) power);
88 if (delta > scale)
89 return -1;
90
91 if (delta < 4)
92 power -= 4 - delta;
93 }
86 94
87 return power; 95 return power;
88} 96}
@@ -169,6 +177,9 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
169 REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, 177 REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK,
170 ah->paprd_ratemask_ht40); 178 ah->paprd_ratemask_ht40);
171 179
180 ath_dbg(common, CALIBRATE, "PAPRD HT20 mask: 0x%x, HT40 mask: 0x%x\n",
181 ah->paprd_ratemask, ah->paprd_ratemask_ht40);
182
172 for (i = 0; i < ah->caps.max_txchains; i++) { 183 for (i = 0; i < ah->caps.max_txchains; i++) {
173 REG_RMW_FIELD(ah, ctrl0[i], 184 REG_RMW_FIELD(ah, ctrl0[i],
174 AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1); 185 AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
@@ -204,7 +215,20 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
204 AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28); 215 AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
205 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1, 216 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
206 AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1); 217 AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
207 val = AR_SREV_9462(ah) ? 0x91 : 147; 218
219 if (AR_SREV_9485(ah)) {
220 val = 148;
221 } else {
222 if (IS_CHAN_2GHZ(ah->curchan)) {
223 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
224 val = 145;
225 else
226 val = 147;
227 } else {
228 val = 137;
229 }
230 }
231
208 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2, 232 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
209 AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, val); 233 AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, val);
210 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 234 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
@@ -215,15 +239,24 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
215 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7); 239 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
216 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 240 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
217 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1); 241 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
218 if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9550(ah)) 242
243 if (AR_SREV_9485(ah) ||
244 AR_SREV_9462(ah) ||
245 AR_SREV_9565(ah) ||
246 AR_SREV_9550(ah) ||
247 AR_SREV_9330(ah) ||
248 AR_SREV_9340(ah))
219 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 249 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
220 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, 250 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -3);
221 -3);
222 else 251 else
223 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 252 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
224 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, 253 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
225 -6); 254
226 val = AR_SREV_9462(ah) ? -10 : -15; 255 val = -10;
256
257 if (IS_CHAN_2GHZ(ah->curchan) && !AR_SREV_9462(ah) && !AR_SREV_9565(ah))
258 val = -15;
259
227 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 260 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
228 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE, 261 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
229 val); 262 val);
@@ -262,9 +295,6 @@ static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
262 u32 reg = AR_PHY_TXGAIN_TABLE; 295 u32 reg = AR_PHY_TXGAIN_TABLE;
263 int i; 296 int i;
264 297
265 memset(entry, 0, sizeof(ah->paprd_gain_table_entries));
266 memset(index, 0, sizeof(ah->paprd_gain_table_index));
267
268 for (i = 0; i < PAPRD_GAIN_TABLE_ENTRIES; i++) { 298 for (i = 0; i < PAPRD_GAIN_TABLE_ENTRIES; i++) {
269 entry[i] = REG_READ(ah, reg); 299 entry[i] = REG_READ(ah, reg);
270 index[i] = (entry[i] >> 24) & 0xff; 300 index[i] = (entry[i] >> 24) & 0xff;
@@ -763,7 +793,7 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
763} 793}
764EXPORT_SYMBOL(ar9003_paprd_populate_single_table); 794EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
765 795
766int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain) 796void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
767{ 797{
768 unsigned int i, desired_gain, gain_index; 798 unsigned int i, desired_gain, gain_index;
769 unsigned int train_power = ah->paprd_training_power; 799 unsigned int train_power = ah->paprd_training_power;
@@ -781,8 +811,6 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
781 811
782 REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1, 812 REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
783 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); 813 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
784
785 return 0;
786} 814}
787EXPORT_SYMBOL(ar9003_paprd_setup_gain_table); 815EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
788 816
@@ -894,7 +922,7 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
894 922
895 memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain])); 923 memset(caldata->pa_table[chain], 0, sizeof(caldata->pa_table[chain]));
896 924
897 buf = kmalloc(2 * 48 * sizeof(u32), GFP_ATOMIC); 925 buf = kmalloc(2 * 48 * sizeof(u32), GFP_KERNEL);
898 if (!buf) 926 if (!buf)
899 return -ENOMEM; 927 return -ENOMEM;
900 928
@@ -945,9 +973,13 @@ EXPORT_SYMBOL(ar9003_paprd_init_table);
945bool ar9003_paprd_is_done(struct ath_hw *ah) 973bool ar9003_paprd_is_done(struct ath_hw *ah)
946{ 974{
947 int paprd_done, agc2_pwr; 975 int paprd_done, agc2_pwr;
976
948 paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1, 977 paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
949 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); 978 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
950 979
980 if (AR_SREV_9485(ah))
981 goto exit;
982
951 if (paprd_done == 0x1) { 983 if (paprd_done == 0x1) {
952 agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1, 984 agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
953 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR); 985 AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
@@ -963,7 +995,16 @@ bool ar9003_paprd_is_done(struct ath_hw *ah)
963 if (agc2_pwr <= PAPRD_IDEAL_AGC2_PWR_RANGE) 995 if (agc2_pwr <= PAPRD_IDEAL_AGC2_PWR_RANGE)
964 paprd_done = 0; 996 paprd_done = 0;
965 } 997 }
966 998exit:
967 return !!paprd_done; 999 return !!paprd_done;
968} 1000}
969EXPORT_SYMBOL(ar9003_paprd_is_done); 1001EXPORT_SYMBOL(ar9003_paprd_is_done);
1002
1003bool ar9003_is_paprd_enabled(struct ath_hw *ah)
1004{
1005 if ((ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->config.enable_paprd)
1006 return true;
1007
1008 return false;
1009}
1010EXPORT_SYMBOL(ar9003_is_paprd_enabled);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 759f5f5a7154..ce19c09fa8e8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -784,7 +784,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
784 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); 784 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
785 785
786 if (chan->channel == 2484) 786 if (chan->channel == 2484)
787 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); 787 ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
788 788
789 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) 789 if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
790 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE, 790 REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 9a48e3d2f231..107956298488 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -32,6 +32,7 @@
32#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c) 32#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
33#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc) 33#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
34#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0) 34#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
35#define AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT 16
35 36
36#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000 37#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
37#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20 38#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
@@ -697,13 +698,6 @@
697#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00 698#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
698#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8 699#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
699 700
700#define AR_PHY_65NM_CH0_RXTX1 0x16100
701#define AR_PHY_65NM_CH0_RXTX2 0x16104
702#define AR_PHY_65NM_CH1_RXTX1 0x16500
703#define AR_PHY_65NM_CH1_RXTX2 0x16504
704#define AR_PHY_65NM_CH2_RXTX1 0x16900
705#define AR_PHY_65NM_CH2_RXTX2 0x16904
706
707#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ 701#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
708 (AR_SREV_9462(ah) ? 0x16290 : 0x16284)) 702 (AR_SREV_9462(ah) ? 0x16290 : 0x16284))
709#define AR_CH0_TOP2_XPABIASLVL 0xf000 703#define AR_CH0_TOP2_XPABIASLVL 0xf000
@@ -1151,9 +1145,8 @@
1151#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000 1145#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
1152#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17 1146#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
1153 1147
1154#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + \ 1148#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x580 : 0x490))
1155 (AR_SREV_9485(ah) ? \ 1149
1156 0x580 : 0x490))
1157#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001 1150#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
1158#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0 1151#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
1159#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e 1152#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e
@@ -1169,15 +1162,13 @@
1169#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000 1162#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
1170#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12 1163#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
1171 1164
1172#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + \ 1165#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x584 : 0x494))
1173 (AR_SREV_9485(ah) ? \ 1166
1174 0x584 : 0x494))
1175#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF 1167#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
1176#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0 1168#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
1177 1169
1178#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + \ 1170#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x588 : 0x498))
1179 (AR_SREV_9485(ah) ? \ 1171
1180 0x588 : 0x498))
1181#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f 1172#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
1182#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0 1173#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
1183#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0 1174#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0
@@ -1193,9 +1184,8 @@
1193#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000 1184#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
1194#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29 1185#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
1195 1186
1196#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + \ 1187#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x58c : 0x49c))
1197 (AR_SREV_9485(ah) ? \ 1188
1198 0x58c : 0x49c))
1199#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000 1189#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
1200#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16 1190#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
1201#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000 1191#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000
@@ -1214,7 +1204,8 @@
1214#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF 1204#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF
1215#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0 1205#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0
1216 1206
1217#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + 0x4a0) 1207#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x590 : 0x4a0))
1208
1218#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001 1209#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001
1219#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0 1210#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0
1220#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE 0x00000002 1211#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_INCOMPLETE 0x00000002
@@ -1228,7 +1219,8 @@
1228#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00 1219#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00
1229#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9 1220#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9
1230 1221
1231#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + 0x4a4) 1222#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x594 : 0x4a4))
1223
1232#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff 1224#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff
1233#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0 1225#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0
1234#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX 0x001f0000 1226#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_COARSE_IDX 0x001f0000
@@ -1236,7 +1228,8 @@
1236#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000 1228#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000
1237#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21 1229#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21
1238 1230
1239#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + 0x4a8) 1231#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x598 : 0x4a8))
1232
1240#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff 1233#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff
1241#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0 1234#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0
1242 1235
@@ -1285,4 +1278,43 @@
1285#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD 0xFC000000 1278#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD 0xFC000000
1286#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S 26 1279#define AR_BTCOEX_WL_LNADIV_BT_INACTIVE_THRESHOLD_S 26
1287 1280
1281/* Manual Peak detector calibration */
1282#define AR_PHY_65NM_BASE 0x16000
1283#define AR_PHY_65NM_RXRF_GAINSTAGES(i) (AR_PHY_65NM_BASE + \
1284 (i * 0x400) + 0x8)
1285#define AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE 0x80000000
1286#define AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE_S 31
1287#define AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC 0x00000002
1288#define AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC_S 1
1289#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR 0x70000000
1290#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR_S 28
1291#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR 0x03800000
1292#define AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR_S 23
1293
1294#define AR_PHY_65NM_RXTX2(i) (AR_PHY_65NM_BASE + \
1295 (i * 0x400) + 0x104)
1296#define AR_PHY_65NM_RXTX2_RXON_OVR 0x00001000
1297#define AR_PHY_65NM_RXTX2_RXON_OVR_S 12
1298#define AR_PHY_65NM_RXTX2_RXON 0x00000800
1299#define AR_PHY_65NM_RXTX2_RXON_S 11
1300
1301#define AR_PHY_65NM_RXRF_AGC(i) (AR_PHY_65NM_BASE + \
1302 (i * 0x400) + 0xc)
1303#define AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE 0x80000000
1304#define AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE_S 31
1305#define AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR 0x40000000
1306#define AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR_S 30
1307#define AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR 0x20000000
1308#define AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR_S 29
1309#define AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR 0x1E000000
1310#define AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR_S 25
1311#define AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR 0x00078000
1312#define AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR_S 15
1313#define AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR 0x01F80000
1314#define AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR_S 19
1315#define AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR 0x00007e00
1316#define AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR_S 9
1317#define AR_PHY_65NM_RXRF_AGC_AGC_OUT 0x00000004
1318#define AR_PHY_65NM_RXRF_AGC_AGC_OUT_S 2
1319
1288#endif /* AR9003_PHY_H */ 1320#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 1d8235e19f0f..f69d292bdc02 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -211,6 +211,8 @@ static const u32 ar9340_1p0_radio_core_40M[][2] = {
211 {0x0001609c, 0x02566f3a}, 211 {0x0001609c, 0x02566f3a},
212 {0x000160ac, 0xa4647c00}, 212 {0x000160ac, 0xa4647c00},
213 {0x000160b0, 0x01885f5a}, 213 {0x000160b0, 0x01885f5a},
214 {0x00008244, 0x0010f400},
215 {0x0000824c, 0x0001e800},
214}; 216};
215 217
216#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble 218#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
@@ -1273,9 +1275,9 @@ static const u32 ar9340_1p0_mac_core[][2] = {
1273 {0x000081f8, 0x00000000}, 1275 {0x000081f8, 0x00000000},
1274 {0x000081fc, 0x00000000}, 1276 {0x000081fc, 0x00000000},
1275 {0x00008240, 0x00100000}, 1277 {0x00008240, 0x00100000},
1276 {0x00008244, 0x0010f424}, 1278 {0x00008244, 0x0010f3d7},
1277 {0x00008248, 0x00000800}, 1279 {0x00008248, 0x00000800},
1278 {0x0000824c, 0x0001e848}, 1280 {0x0000824c, 0x0001e7ae},
1279 {0x00008250, 0x00000000}, 1281 {0x00008250, 0x00000000},
1280 {0x00008254, 0x00000000}, 1282 {0x00008254, 0x00000000},
1281 {0x00008258, 0x00000000}, 1283 {0x00008258, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 58f30f65c6b6..ccc42a71b436 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -78,7 +78,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
78 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150}, 78 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
79 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, 79 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
80 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, 80 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
81 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 81 {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
82 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, 82 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
83 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 83 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
84 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 84 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index fb4497fc7a3d..a3710f3bb90c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -18,7 +18,7 @@
18#ifndef INITVALS_9485_H 18#ifndef INITVALS_9485_H
19#define INITVALS_9485_H 19#define INITVALS_9485_H
20 20
21/* AR9485 1.0 */ 21/* AR9485 1.1 */
22 22
23#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble 23#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
24 24
@@ -31,6 +31,11 @@ static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
31 31
32static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { 32static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
33 /* Addr allmodes */ 33 /* Addr allmodes */
34 {0x00009e00, 0x037216a0},
35 {0x00009e04, 0x00182020},
36 {0x00009e18, 0x00000000},
37 {0x00009e2c, 0x00004121},
38 {0x00009e44, 0x02282324},
34 {0x0000a000, 0x00060005}, 39 {0x0000a000, 0x00060005},
35 {0x0000a004, 0x00810080}, 40 {0x0000a004, 0x00810080},
36 {0x0000a008, 0x00830082}, 41 {0x0000a008, 0x00830082},
@@ -164,6 +169,11 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
164static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = { 169static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
165 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 170 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
166 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, 171 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
172 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
173 {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
174 {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
175 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
176 {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
167 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, 177 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
168 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 178 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
169 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, 179 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -198,6 +208,22 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
198 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 208 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
199 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 209 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
200 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, 210 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
211 {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
212 {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
213 {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
214 {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
215 {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
216 {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
217 {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
218 {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
219 {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
220 {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
221 {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
222 {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
223 {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
224 {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
225 {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
226 {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
201 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 227 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
202 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 228 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
203 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 229 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -234,9 +260,193 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
234 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, 260 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
235}; 261};
236 262
237#define ar9485Modes_high_ob_db_tx_gain_1_1 ar9485Modes_high_power_tx_gain_1_1 263static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
264 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
265 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
266 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
267 {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
268 {0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
269 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
270 {0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
271 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
272 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
273 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
274 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
275 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
276 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
277 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
278 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
279 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
280 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
281 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
282 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
283 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
284 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
285 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
286 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
287 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
288 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
289 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
290 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
291 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
292 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
293 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
294 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
295 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
296 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
297 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
298 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
299 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
300 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
301 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
302 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
303 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
304 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
305 {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
306 {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
307 {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
308 {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
309 {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
310 {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
311 {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
312 {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
313 {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
314 {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
315 {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
316 {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
317 {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
318 {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
319 {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
320 {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
321 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
322 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
323 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
324 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
325 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
326 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
327 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
328 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
329 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
330 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
331 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
332 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
333 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
334 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
335 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
336 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
337 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
338 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
339 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
340 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
341 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
342 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
343 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
344 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
345 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
346 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
347 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
348 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
349 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
350 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
351 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
352 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
353 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
354 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
355};
238 356
239#define ar9485Modes_low_ob_db_tx_gain_1_1 ar9485Modes_high_ob_db_tx_gain_1_1 357static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
358 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
359 {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
360 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
361 {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
362 {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
363 {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
364 {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
365 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
366 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
367 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
368 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
369 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
370 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
371 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
372 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
373 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
374 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
375 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
376 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
377 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
378 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
379 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
380 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
381 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
382 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
383 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
384 {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
385 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
386 {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
387 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
388 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
389 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
390 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
391 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
392 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
393 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
394 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
395 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
396 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
397 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
398 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
399 {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
400 {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
401 {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
402 {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
403 {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
404 {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
405 {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
406 {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
407 {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
408 {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
409 {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
410 {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
411 {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
412 {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
413 {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
414 {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
415 {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
416 {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
417 {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
418 {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
419 {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
420 {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
421 {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
422 {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
423 {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
424 {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
425 {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
426 {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
427 {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
428 {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
429 {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
430 {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
431 {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
432 {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
433 {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
434 {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
435 {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
436 {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
437 {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
438 {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
439 {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
440 {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
441 {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
442 {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
443 {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
444 {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
445 {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
446 {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
447 {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
448 {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
449};
240 450
241#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1 451#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
242 452
@@ -245,19 +455,19 @@ static const u32 ar9485_1_1[][2] = {
245 {0x0000a580, 0x00000000}, 455 {0x0000a580, 0x00000000},
246 {0x0000a584, 0x00000000}, 456 {0x0000a584, 0x00000000},
247 {0x0000a588, 0x00000000}, 457 {0x0000a588, 0x00000000},
248 {0x0000a58c, 0x00000000}, 458 {0x0000a58c, 0x01804000},
249 {0x0000a590, 0x00000000}, 459 {0x0000a590, 0x02808a02},
250 {0x0000a594, 0x00000000}, 460 {0x0000a594, 0x0340ca02},
251 {0x0000a598, 0x00000000}, 461 {0x0000a598, 0x0340cd03},
252 {0x0000a59c, 0x00000000}, 462 {0x0000a59c, 0x0340cd03},
253 {0x0000a5a0, 0x00000000}, 463 {0x0000a5a0, 0x06415304},
254 {0x0000a5a4, 0x00000000}, 464 {0x0000a5a4, 0x04c11905},
255 {0x0000a5a8, 0x00000000}, 465 {0x0000a5a8, 0x06415905},
256 {0x0000a5ac, 0x00000000}, 466 {0x0000a5ac, 0x06415905},
257 {0x0000a5b0, 0x00000000}, 467 {0x0000a5b0, 0x06415905},
258 {0x0000a5b4, 0x00000000}, 468 {0x0000a5b4, 0x06415905},
259 {0x0000a5b8, 0x00000000}, 469 {0x0000a5b8, 0x06415905},
260 {0x0000a5bc, 0x00000000}, 470 {0x0000a5bc, 0x06415905},
261}; 471};
262 472
263static const u32 ar9485_1_1_radio_core[][2] = { 473static const u32 ar9485_1_1_radio_core[][2] = {
@@ -340,7 +550,7 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
340 {0x00009880, 0x201fff00}, 550 {0x00009880, 0x201fff00},
341 {0x00009884, 0x00001042}, 551 {0x00009884, 0x00001042},
342 {0x000098a4, 0x00200400}, 552 {0x000098a4, 0x00200400},
343 {0x000098b0, 0x52440bbe}, 553 {0x000098b0, 0x32840bbe},
344 {0x000098d0, 0x004b6a8e}, 554 {0x000098d0, 0x004b6a8e},
345 {0x000098d4, 0x00000820}, 555 {0x000098d4, 0x00000820},
346 {0x000098dc, 0x00000000}, 556 {0x000098dc, 0x00000000},
@@ -362,7 +572,7 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
362 {0x00009d18, 0x00000000}, 572 {0x00009d18, 0x00000000},
363 {0x00009d1c, 0x00000000}, 573 {0x00009d1c, 0x00000000},
364 {0x00009e08, 0x0038233c}, 574 {0x00009e08, 0x0038233c},
365 {0x00009e24, 0x9927b515}, 575 {0x00009e24, 0x992bb515},
366 {0x00009e28, 0x12ef0200}, 576 {0x00009e28, 0x12ef0200},
367 {0x00009e30, 0x06336f77}, 577 {0x00009e30, 0x06336f77},
368 {0x00009e34, 0x6af6532f}, 578 {0x00009e34, 0x6af6532f},
@@ -427,7 +637,7 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
427 {0x0000a408, 0x0e79e5c6}, 637 {0x0000a408, 0x0e79e5c6},
428 {0x0000a40c, 0x00820820}, 638 {0x0000a40c, 0x00820820},
429 {0x0000a414, 0x1ce739cf}, 639 {0x0000a414, 0x1ce739cf},
430 {0x0000a418, 0x2d0019ce}, 640 {0x0000a418, 0x2d0021ce},
431 {0x0000a41c, 0x1ce739ce}, 641 {0x0000a41c, 0x1ce739ce},
432 {0x0000a420, 0x000001ce}, 642 {0x0000a420, 0x000001ce},
433 {0x0000a424, 0x1ce739ce}, 643 {0x0000a424, 0x1ce739ce},
@@ -443,8 +653,8 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
443 {0x0000a44c, 0x00000001}, 653 {0x0000a44c, 0x00000001},
444 {0x0000a450, 0x00010000}, 654 {0x0000a450, 0x00010000},
445 {0x0000a5c4, 0xbfad9d74}, 655 {0x0000a5c4, 0xbfad9d74},
446 {0x0000a5c8, 0x0048060a}, 656 {0x0000a5c8, 0x00480605},
447 {0x0000a5cc, 0x00000637}, 657 {0x0000a5cc, 0x00002e37},
448 {0x0000a760, 0x03020100}, 658 {0x0000a760, 0x03020100},
449 {0x0000a764, 0x09080504}, 659 {0x0000a764, 0x09080504},
450 {0x0000a768, 0x0d0c0b0a}, 660 {0x0000a768, 0x0d0c0b0a},
@@ -464,17 +674,22 @@ static const u32 ar9485_1_1_baseband_core[][2] = {
464 674
465static const u32 ar9485_common_rx_gain_1_1[][2] = { 675static const u32 ar9485_common_rx_gain_1_1[][2] = {
466 /* Addr allmodes */ 676 /* Addr allmodes */
467 {0x0000a000, 0x00010000}, 677 {0x00009e00, 0x03721b20},
468 {0x0000a004, 0x00030002}, 678 {0x00009e04, 0x00082020},
469 {0x0000a008, 0x00050004}, 679 {0x00009e18, 0x0300501e},
470 {0x0000a00c, 0x00810080}, 680 {0x00009e2c, 0x00002e21},
471 {0x0000a010, 0x01800082}, 681 {0x00009e44, 0x02182324},
472 {0x0000a014, 0x01820181}, 682 {0x0000a000, 0x00060005},
473 {0x0000a018, 0x01840183}, 683 {0x0000a004, 0x00810080},
474 {0x0000a01c, 0x01880185}, 684 {0x0000a008, 0x00830082},
475 {0x0000a020, 0x018a0189}, 685 {0x0000a00c, 0x00850084},
476 {0x0000a024, 0x02850284}, 686 {0x0000a010, 0x01820181},
477 {0x0000a028, 0x02890288}, 687 {0x0000a014, 0x01840183},
688 {0x0000a018, 0x01880185},
689 {0x0000a01c, 0x018a0189},
690 {0x0000a020, 0x02850284},
691 {0x0000a024, 0x02890288},
692 {0x0000a028, 0x028b028a},
478 {0x0000a02c, 0x03850384}, 693 {0x0000a02c, 0x03850384},
479 {0x0000a030, 0x03890388}, 694 {0x0000a030, 0x03890388},
480 {0x0000a034, 0x038b038a}, 695 {0x0000a034, 0x038b038a},
@@ -496,15 +711,15 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
496 {0x0000a074, 0x00000000}, 711 {0x0000a074, 0x00000000},
497 {0x0000a078, 0x00000000}, 712 {0x0000a078, 0x00000000},
498 {0x0000a07c, 0x00000000}, 713 {0x0000a07c, 0x00000000},
499 {0x0000a080, 0x28282828}, 714 {0x0000a080, 0x18181818},
500 {0x0000a084, 0x28282828}, 715 {0x0000a084, 0x18181818},
501 {0x0000a088, 0x28282828}, 716 {0x0000a088, 0x18181818},
502 {0x0000a08c, 0x28282828}, 717 {0x0000a08c, 0x18181818},
503 {0x0000a090, 0x28282828}, 718 {0x0000a090, 0x18181818},
504 {0x0000a094, 0x21212128}, 719 {0x0000a094, 0x18181818},
505 {0x0000a098, 0x171c1c1c}, 720 {0x0000a098, 0x17181818},
506 {0x0000a09c, 0x02020212}, 721 {0x0000a09c, 0x02020b0b},
507 {0x0000a0a0, 0x00000202}, 722 {0x0000a0a0, 0x02020202},
508 {0x0000a0a4, 0x00000000}, 723 {0x0000a0a4, 0x00000000},
509 {0x0000a0a8, 0x00000000}, 724 {0x0000a0a8, 0x00000000},
510 {0x0000a0ac, 0x00000000}, 725 {0x0000a0ac, 0x00000000},
@@ -512,22 +727,22 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
512 {0x0000a0b4, 0x00000000}, 727 {0x0000a0b4, 0x00000000},
513 {0x0000a0b8, 0x00000000}, 728 {0x0000a0b8, 0x00000000},
514 {0x0000a0bc, 0x00000000}, 729 {0x0000a0bc, 0x00000000},
515 {0x0000a0c0, 0x001f0000}, 730 {0x0000a0c0, 0x22072208},
516 {0x0000a0c4, 0x111f1100}, 731 {0x0000a0c4, 0x22052206},
517 {0x0000a0c8, 0x111d111e}, 732 {0x0000a0c8, 0x22032204},
518 {0x0000a0cc, 0x111b111c}, 733 {0x0000a0cc, 0x22012202},
519 {0x0000a0d0, 0x22032204}, 734 {0x0000a0d0, 0x221f2200},
520 {0x0000a0d4, 0x22012202}, 735 {0x0000a0d4, 0x221d221e},
521 {0x0000a0d8, 0x221f2200}, 736 {0x0000a0d8, 0x33023303},
522 {0x0000a0dc, 0x221d221e}, 737 {0x0000a0dc, 0x33003301},
523 {0x0000a0e0, 0x33013302}, 738 {0x0000a0e0, 0x331e331f},
524 {0x0000a0e4, 0x331f3300}, 739 {0x0000a0e4, 0x4402331d},
525 {0x0000a0e8, 0x4402331e}, 740 {0x0000a0e8, 0x44004401},
526 {0x0000a0ec, 0x44004401}, 741 {0x0000a0ec, 0x441e441f},
527 {0x0000a0f0, 0x441e441f}, 742 {0x0000a0f0, 0x55025503},
528 {0x0000a0f4, 0x55015502}, 743 {0x0000a0f4, 0x55005501},
529 {0x0000a0f8, 0x551f5500}, 744 {0x0000a0f8, 0x551e551f},
530 {0x0000a0fc, 0x6602551e}, 745 {0x0000a0fc, 0x6602551d},
531 {0x0000a100, 0x66006601}, 746 {0x0000a100, 0x66006601},
532 {0x0000a104, 0x661e661f}, 747 {0x0000a104, 0x661e661f},
533 {0x0000a108, 0x7703661d}, 748 {0x0000a108, 0x7703661d},
@@ -636,17 +851,12 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
636 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 851 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
637 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, 852 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
638 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044}, 853 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
639 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
640 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
641 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 854 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
642 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e}, 855 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
643 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, 856 {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
644 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
645 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 857 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
646 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 858 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
647 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
648 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, 859 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
649 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
650 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, 860 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
651 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 861 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
652 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0}, 862 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
@@ -850,4 +1060,6 @@ static const u32 ar9485_1_1_mac_core[][2] = {
850 {0x000083d0, 0x000301ff}, 1060 {0x000083d0, 0x000301ff},
851}; 1061};
852 1062
1063#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
1064
853#endif /* INITVALS_9485_H */ 1065#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 843e79f67ff2..0c2ac0c6dc89 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -768,9 +768,9 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
768 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 768 {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
769}; 769};
770 770
771static const u32 ar9565_1p0_pciephy_pll_on_clkreq_disable_L1[][2] = { 771static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
772 /* Addr allmodes */ 772 /* Addr allmodes */
773 {0x00018c00, 0x18212ede}, 773 {0x00018c00, 0x18213ede},
774 {0x00018c04, 0x000801d8}, 774 {0x00018c04, 0x000801d8},
775 {0x00018c08, 0x0003780c}, 775 {0x00018c08, 0x0003780c},
776}; 776};
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index dfe6a4707fd2..86e26a19efda 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -129,10 +129,10 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
129#define ATH_TXMAXTRY 13 129#define ATH_TXMAXTRY 13
130 130
131#define TID_TO_WME_AC(_tid) \ 131#define TID_TO_WME_AC(_tid) \
132 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ 132 ((((_tid) == 0) || ((_tid) == 3)) ? IEEE80211_AC_BE : \
133 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ 133 (((_tid) == 1) || ((_tid) == 2)) ? IEEE80211_AC_BK : \
134 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ 134 (((_tid) == 4) || ((_tid) == 5)) ? IEEE80211_AC_VI : \
135 WME_AC_VO) 135 IEEE80211_AC_VO)
136 136
137#define ATH_AGGR_DELIM_SZ 4 137#define ATH_AGGR_DELIM_SZ 4
138#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ 138#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
@@ -259,19 +259,21 @@ struct ath_atx_tid {
259}; 259};
260 260
261struct ath_node { 261struct ath_node {
262#ifdef CONFIG_ATH9K_DEBUGFS 262 struct ath_softc *sc;
263 struct list_head list; /* for sc->nodes */
264#endif
265 struct ieee80211_sta *sta; /* station struct we're part of */ 263 struct ieee80211_sta *sta; /* station struct we're part of */
266 struct ieee80211_vif *vif; /* interface with which we're associated */ 264 struct ieee80211_vif *vif; /* interface with which we're associated */
267 struct ath_atx_tid tid[WME_NUM_TID]; 265 struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
268 struct ath_atx_ac ac[WME_NUM_AC]; 266 struct ath_atx_ac ac[IEEE80211_NUM_ACS];
269 int ps_key; 267 int ps_key;
270 268
271 u16 maxampdu; 269 u16 maxampdu;
272 u8 mpdudensity; 270 u8 mpdudensity;
273 271
274 bool sleeping; 272 bool sleeping;
273
274#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
275 struct dentry *node_stat;
276#endif
275}; 277};
276 278
277#define AGGR_CLEANUP BIT(1) 279#define AGGR_CLEANUP BIT(1)
@@ -299,9 +301,9 @@ struct ath_tx {
299 struct list_head txbuf; 301 struct list_head txbuf;
300 struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; 302 struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
301 struct ath_descdma txdma; 303 struct ath_descdma txdma;
302 struct ath_txq *txq_map[WME_NUM_AC]; 304 struct ath_txq *txq_map[IEEE80211_NUM_ACS];
303 u32 txq_max_pending[WME_NUM_AC]; 305 u32 txq_max_pending[IEEE80211_NUM_ACS];
304 u16 max_aggr_framelen[WME_NUM_AC][4][32]; 306 u16 max_aggr_framelen[IEEE80211_NUM_ACS][4][32];
305}; 307};
306 308
307struct ath_rx_edma { 309struct ath_rx_edma {
@@ -437,6 +439,7 @@ void ath9k_set_beacon(struct ath_softc *sc);
437#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */ 439#define ATH_LONG_CALINTERVAL_INT 1000 /* 1000 ms */
438#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 440#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
439#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 441#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
442#define ATH_ANI_MAX_SKIP_COUNT 10
440 443
441#define ATH_PAPRD_TIMEOUT 100 /* msecs */ 444#define ATH_PAPRD_TIMEOUT 100 /* msecs */
442#define ATH_PLL_WORK_INTERVAL 100 445#define ATH_PLL_WORK_INTERVAL 100
@@ -460,6 +463,12 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
460/* BTCOEX */ 463/* BTCOEX */
461/**********/ 464/**********/
462 465
466#define ATH_DUMP_BTCOEX(_s, _val) \
467 do { \
468 len += snprintf(buf + len, size - len, \
469 "%20s : %10d\n", _s, (_val)); \
470 } while (0)
471
463enum bt_op_flags { 472enum bt_op_flags {
464 BT_OP_PRIORITY_DETECTED, 473 BT_OP_PRIORITY_DETECTED,
465 BT_OP_SCAN, 474 BT_OP_SCAN,
@@ -478,8 +487,10 @@ struct ath_btcoex {
478 u32 btscan_no_stomp; /* in usec */ 487 u32 btscan_no_stomp; /* in usec */
479 u32 duty_cycle; 488 u32 duty_cycle;
480 u32 bt_wait_time; 489 u32 bt_wait_time;
490 int rssi_count;
481 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */ 491 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
482 struct ath_mci_profile mci; 492 struct ath_mci_profile mci;
493 u8 stomp_audio;
483}; 494};
484 495
485#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 496#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -492,6 +503,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
492void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status); 503void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
493u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen); 504u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
494void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc); 505void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
506int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size);
495#else 507#else
496static inline int ath9k_init_btcoex(struct ath_softc *sc) 508static inline int ath9k_init_btcoex(struct ath_softc *sc)
497{ 509{
@@ -518,6 +530,10 @@ static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
518static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc) 530static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
519{ 531{
520} 532}
533static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
534{
535 return 0;
536}
521#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 537#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
522 538
523struct ath9k_wow_pattern { 539struct ath9k_wow_pattern {
@@ -642,6 +658,7 @@ enum sc_op_flags {
642#define PS_WAIT_FOR_PSPOLL_DATA BIT(2) 658#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
643#define PS_WAIT_FOR_TX_ACK BIT(3) 659#define PS_WAIT_FOR_TX_ACK BIT(3)
644#define PS_BEACON_SYNC BIT(4) 660#define PS_BEACON_SYNC BIT(4)
661#define PS_WAIT_FOR_ANI BIT(5)
645 662
646struct ath_rate_table; 663struct ath_rate_table;
647 664
@@ -708,9 +725,6 @@ struct ath_softc {
708 725
709#ifdef CONFIG_ATH9K_DEBUGFS 726#ifdef CONFIG_ATH9K_DEBUGFS
710 struct ath9k_debug debug; 727 struct ath9k_debug debug;
711 spinlock_t nodes_lock;
712 struct list_head nodes; /* basically, stations */
713 unsigned int tx_complete_poll_work_seen;
714#endif 728#endif
715 struct ath_beacon_config cur_beacon_conf; 729 struct ath_beacon_config cur_beacon_conf;
716 struct delayed_work tx_complete_work; 730 struct delayed_work tx_complete_work;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1b48414dca95..531fffd801a3 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -46,7 +46,7 @@ static void ath9k_beaconq_config(struct ath_softc *sc)
46 qi.tqi_cwmax = 0; 46 qi.tqi_cwmax = 0;
47 } else { 47 } else {
48 /* Adhoc mode; important thing is to use 2x cwmin. */ 48 /* Adhoc mode; important thing is to use 2x cwmin. */
49 txq = sc->tx.txq_map[WME_AC_BE]; 49 txq = sc->tx.txq_map[IEEE80211_AC_BE];
50 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be); 50 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
51 qi.tqi_aifs = qi_be.tqi_aifs; 51 qi.tqi_aifs = qi_be.tqi_aifs;
52 if (ah->slottime == ATH9K_SLOT_TIME_20) 52 if (ah->slottime == ATH9K_SLOT_TIME_20)
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 419e9a3f2fed..9963b0bf9f72 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -49,6 +49,7 @@ static const u32 mci_wlan_weights[ATH_BTCOEX_STOMP_MAX]
49 { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */ 49 { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
50 { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */ 50 { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
51 { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */ 51 { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
52 { 0xffffff01, 0xffffffff, 0xffffff01, 0xffffffff }, /* STOMP_AUDIO */
52}; 53};
53 54
54void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) 55void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
@@ -195,7 +196,7 @@ void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
195 ah->btcoex_hw.mci.need_flush_btinfo = false; 196 ah->btcoex_hw.mci.need_flush_btinfo = false;
196 ah->btcoex_hw.mci.wlan_cal_seq = 0; 197 ah->btcoex_hw.mci.wlan_cal_seq = 0;
197 ah->btcoex_hw.mci.wlan_cal_done = 0; 198 ah->btcoex_hw.mci.wlan_cal_done = 0;
198 ah->btcoex_hw.mci.config = 0x2201; 199 ah->btcoex_hw.mci.config = (AR_SREV_9462(ah)) ? 0x2201 : 0xa4c1;
199} 200}
200EXPORT_SYMBOL(ath9k_hw_btcoex_init_mci); 201EXPORT_SYMBOL(ath9k_hw_btcoex_init_mci);
201 202
@@ -218,27 +219,45 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
218 enum ath_stomp_type stomp_type) 219 enum ath_stomp_type stomp_type)
219{ 220{
220 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 221 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
222 struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
223 u8 txprio_shift[] = { 24, 16, 16, 0 }; /* tx priority weight */
224 bool concur_tx = (mci_hw->concur_tx && btcoex_hw->tx_prio[stomp_type]);
225 const u32 *weight = ar9003_wlan_weights[stomp_type];
226 int i;
221 227
222 if (AR_SREV_9300_20_OR_LATER(ah)) { 228 if (!AR_SREV_9300_20_OR_LATER(ah)) {
223 const u32 *weight = ar9003_wlan_weights[stomp_type];
224 int i;
225
226 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
227 if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
228 btcoex_hw->mci.stomp_ftp)
229 stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
230 weight = mci_wlan_weights[stomp_type];
231 }
232
233 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
234 btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
235 btcoex_hw->wlan_weight[i] = weight[i];
236 }
237 } else {
238 btcoex_hw->bt_coex_weights = 229 btcoex_hw->bt_coex_weights =
239 SM(bt_weight, AR_BTCOEX_BT_WGHT) | 230 SM(bt_weight, AR_BTCOEX_BT_WGHT) |
240 SM(wlan_weight, AR_BTCOEX_WL_WGHT); 231 SM(wlan_weight, AR_BTCOEX_WL_WGHT);
232 return;
233 }
234
235 if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
236 enum ath_stomp_type stype =
237 ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
238 btcoex_hw->mci.stomp_ftp) ?
239 ATH_BTCOEX_STOMP_LOW_FTP : stomp_type;
240 weight = mci_wlan_weights[stype];
241 } 241 }
242
243 for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
244 btcoex_hw->bt_weight[i] = AR9300_BT_WGHT;
245 btcoex_hw->wlan_weight[i] = weight[i];
246 if (concur_tx && i) {
247 btcoex_hw->wlan_weight[i] &=
248 ~(0xff << txprio_shift[i-1]);
249 btcoex_hw->wlan_weight[i] |=
250 (btcoex_hw->tx_prio[stomp_type] <<
251 txprio_shift[i-1]);
252 }
253 }
254 /* Last WLAN weight has to be adjusted wrt tx priority */
255 if (concur_tx) {
256 btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
257 btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
258 << txprio_shift[i-1]);
259 }
260
242} 261}
243EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight); 262EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
244 263
@@ -385,3 +404,13 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
385 } 404 }
386} 405}
387EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp); 406EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
407
408void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio)
409{
410 struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
411 int i;
412
413 for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
414 btcoex->tx_prio[i] = stomp_txprio[i];
415}
416EXPORT_SYMBOL(ath9k_hw_btcoex_set_concur_txprio);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 385197ad79b0..6de26ea5d5fa 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -39,6 +39,9 @@
39#define ATH_BTCOEX_RX_WAIT_TIME 100 39#define ATH_BTCOEX_RX_WAIT_TIME 100
40#define ATH_BTCOEX_STOMP_FTP_THRESH 5 40#define ATH_BTCOEX_STOMP_FTP_THRESH 5
41 41
42#define ATH_BTCOEX_HT20_MAX_TXPOWER 0x14
43#define ATH_BTCOEX_HT40_MAX_TXPOWER 0x10
44
42#define AR9300_NUM_BT_WEIGHTS 4 45#define AR9300_NUM_BT_WEIGHTS 4
43#define AR9300_NUM_WLAN_WEIGHTS 4 46#define AR9300_NUM_WLAN_WEIGHTS 4
44/* Defines the BT AR_BT_COEX_WGHT used */ 47/* Defines the BT AR_BT_COEX_WGHT used */
@@ -47,6 +50,7 @@ enum ath_stomp_type {
47 ATH_BTCOEX_STOMP_LOW, 50 ATH_BTCOEX_STOMP_LOW,
48 ATH_BTCOEX_STOMP_NONE, 51 ATH_BTCOEX_STOMP_NONE,
49 ATH_BTCOEX_STOMP_LOW_FTP, 52 ATH_BTCOEX_STOMP_LOW_FTP,
53 ATH_BTCOEX_STOMP_AUDIO,
50 ATH_BTCOEX_STOMP_MAX 54 ATH_BTCOEX_STOMP_MAX
51}; 55};
52 56
@@ -84,6 +88,8 @@ struct ath9k_hw_mci {
84 u8 bt_ver_minor; 88 u8 bt_ver_minor;
85 u8 bt_state; 89 u8 bt_state;
86 u8 stomp_ftp; 90 u8 stomp_ftp;
91 bool concur_tx;
92 u32 last_recovery;
87}; 93};
88 94
89struct ath_btcoex_hw { 95struct ath_btcoex_hw {
@@ -98,6 +104,7 @@ struct ath_btcoex_hw {
98 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ 104 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
99 u32 bt_weight[AR9300_NUM_BT_WEIGHTS]; 105 u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
100 u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS]; 106 u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
107 u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
101}; 108};
102 109
103void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah); 110void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
@@ -112,5 +119,6 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
112void ath9k_hw_btcoex_disable(struct ath_hw *ah); 119void ath9k_hw_btcoex_disable(struct ath_hw *ah);
113void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah, 120void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
114 enum ath_stomp_type stomp_type); 121 enum ath_stomp_type stomp_type);
122void ath9k_hw_btcoex_set_concur_txprio(struct ath_hw *ah, u8 *stomp_txprio);
115 123
116#endif 124#endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index e5cceb077574..1e8508530e98 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -69,6 +69,7 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
69 69
70 if (chan && chan->noisefloor) { 70 if (chan && chan->noisefloor) {
71 s8 delta = chan->noisefloor - 71 s8 delta = chan->noisefloor -
72 ATH9K_NF_CAL_NOISE_THRESH -
72 ath9k_hw_get_default_nf(ah, chan); 73 ath9k_hw_get_default_nf(ah, chan);
73 if (delta > 0) 74 if (delta > 0)
74 noise += delta; 75 noise += delta;
@@ -410,6 +411,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
410 411
411 ah->caldata->channel = chan->channel; 412 ah->caldata->channel = chan->channel;
412 ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT; 413 ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
414 ah->caldata->chanmode = chan->chanmode;
413 h = ah->caldata->nfCalHist; 415 h = ah->caldata->nfCalHist;
414 default_nf = ath9k_hw_get_default_nf(ah, chan); 416 default_nf = ath9k_hw_get_default_nf(ah, chan);
415 for (i = 0; i < NUM_NF_READINGS; i++) { 417 for (i = 0; i < NUM_NF_READINGS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 1060c19a5012..60dcb6c22db9 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -21,6 +21,9 @@
21 21
22#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5 22#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
23 23
24/* Internal noise floor can vary by about 6db depending on the frequency */
25#define ATH9K_NF_CAL_NOISE_THRESH 6
26
24#define NUM_NF_READINGS 6 27#define NUM_NF_READINGS 6
25#define ATH9K_NF_CAL_HIST_MAX 5 28#define ATH9K_NF_CAL_HIST_MAX 5
26 29
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index ad14fecc76c6..5f845beeb18b 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,18 +23,10 @@
23 23
24/* Common header for Atheros 802.11n base driver cores */ 24/* Common header for Atheros 802.11n base driver cores */
25 25
26#define WME_NUM_TID 16
27#define WME_BA_BMP_SIZE 64 26#define WME_BA_BMP_SIZE 64
28#define WME_MAX_BA WME_BA_BMP_SIZE 27#define WME_MAX_BA WME_BA_BMP_SIZE
29#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) 28#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
30 29
31/* These must match mac80211 skb queue mapping numbers */
32#define WME_AC_VO 0
33#define WME_AC_VI 1
34#define WME_AC_BE 2
35#define WME_AC_BK 3
36#define WME_NUM_AC 4
37
38#define ATH_RSSI_DUMMY_MARKER 0x127 30#define ATH_RSSI_DUMMY_MARKER 0x127
39#define ATH_RSSI_LPF_LEN 10 31#define ATH_RSSI_LPF_LEN 10
40#define RSSI_LPF_THRESHOLD -20 32#define RSSI_LPF_THRESHOLD -20
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6727b566d294..13ff9edc2401 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -512,62 +512,19 @@ static const struct file_operations fops_interrupt = {
512 .llseek = default_llseek, 512 .llseek = default_llseek,
513}; 513};
514 514
515#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
516#define PR(str, elem) \
517 do { \
518 len += snprintf(buf + len, size - len, \
519 "%s%13u%11u%10u%10u\n", str, \
520 sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \
521 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \
522 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \
523 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \
524 if (len >= size) \
525 goto done; \
526} while(0)
527
528#define PRX(str, elem) \
529do { \
530 len += snprintf(buf + len, size - len, \
531 "%s%13u%11u%10u%10u\n", str, \
532 (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \
533 (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \
534 (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \
535 (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \
536 if (len >= size) \
537 goto done; \
538} while(0)
539
540#define PRQLE(str, elem) \
541do { \
542 len += snprintf(buf + len, size - len, \
543 "%s%13i%11i%10i%10i\n", str, \
544 list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \
545 list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \
546 list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \
547 list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \
548 if (len >= size) \
549 goto done; \
550} while (0)
551
552static ssize_t read_file_xmit(struct file *file, char __user *user_buf, 515static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
553 size_t count, loff_t *ppos) 516 size_t count, loff_t *ppos)
554{ 517{
555 struct ath_softc *sc = file->private_data; 518 struct ath_softc *sc = file->private_data;
556 char *buf; 519 char *buf;
557 unsigned int len = 0, size = 8000; 520 unsigned int len = 0, size = 2048;
558 int i;
559 ssize_t retval = 0; 521 ssize_t retval = 0;
560 char tmp[32];
561 522
562 buf = kzalloc(size, GFP_KERNEL); 523 buf = kzalloc(size, GFP_KERNEL);
563 if (buf == NULL) 524 if (buf == NULL)
564 return -ENOMEM; 525 return -ENOMEM;
565 526
566 len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x" 527 len += sprintf(buf, "%30s %10s%10s%10s\n\n",
567 " poll-work-seen: %u\n"
568 "%30s %10s%10s%10s\n\n",
569 ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup,
570 sc->tx_complete_poll_work_seen,
571 "BE", "BK", "VI", "VO"); 528 "BE", "BK", "VI", "VO");
572 529
573 PR("MPDUs Queued: ", queued); 530 PR("MPDUs Queued: ", queued);
@@ -587,62 +544,11 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
587 PR("DELIM Underrun: ", delim_underrun); 544 PR("DELIM Underrun: ", delim_underrun);
588 PR("TX-Pkts-All: ", tx_pkts_all); 545 PR("TX-Pkts-All: ", tx_pkts_all);
589 PR("TX-Bytes-All: ", tx_bytes_all); 546 PR("TX-Bytes-All: ", tx_bytes_all);
590 PR("hw-put-tx-buf: ", puttxbuf); 547 PR("HW-put-tx-buf: ", puttxbuf);
591 PR("hw-tx-start: ", txstart); 548 PR("HW-tx-start: ", txstart);
592 PR("hw-tx-proc-desc: ", txprocdesc); 549 PR("HW-tx-proc-desc: ", txprocdesc);
593 PR("TX-Failed: ", txfailed); 550 PR("TX-Failed: ", txfailed);
594 len += snprintf(buf + len, size - len,
595 "%s%11p%11p%10p%10p\n", "txq-memory-address:",
596 sc->tx.txq_map[WME_AC_BE],
597 sc->tx.txq_map[WME_AC_BK],
598 sc->tx.txq_map[WME_AC_VI],
599 sc->tx.txq_map[WME_AC_VO]);
600 if (len >= size)
601 goto done;
602
603 PRX("axq-qnum: ", axq_qnum);
604 PRX("axq-depth: ", axq_depth);
605 PRX("axq-ampdu_depth: ", axq_ampdu_depth);
606 PRX("axq-stopped ", stopped);
607 PRX("tx-in-progress ", axq_tx_inprogress);
608 PRX("pending-frames ", pending_frames);
609 PRX("txq_headidx: ", txq_headidx);
610 PRX("txq_tailidx: ", txq_headidx);
611
612 PRQLE("axq_q empty: ", axq_q);
613 PRQLE("axq_acq empty: ", axq_acq);
614 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) {
615 snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i);
616 PRQLE(tmp, txq_fifo[i]);
617 }
618
619 /* Print out more detailed queue-info */
620 for (i = 0; i <= WME_AC_BK; i++) {
621 struct ath_txq *txq = &(sc->tx.txq[i]);
622 struct ath_atx_ac *ac;
623 struct ath_atx_tid *tid;
624 if (len >= size)
625 goto done;
626 spin_lock_bh(&txq->axq_lock);
627 if (!list_empty(&txq->axq_acq)) {
628 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac,
629 list);
630 len += snprintf(buf + len, size - len,
631 "txq[%i] first-ac: %p sched: %i\n",
632 i, ac, ac->sched);
633 if (list_empty(&ac->tid_q) || (len >= size))
634 goto done_for;
635 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
636 list);
637 len += snprintf(buf + len, size - len,
638 " first-tid: %p sched: %i paused: %i\n",
639 tid, tid->sched, tid->paused);
640 }
641 done_for:
642 spin_unlock_bh(&txq->axq_lock);
643 }
644 551
645done:
646 if (len > size) 552 if (len > size)
647 len = size; 553 len = size;
648 554
@@ -652,62 +558,41 @@ done:
652 return retval; 558 return retval;
653} 559}
654 560
655static ssize_t read_file_stations(struct file *file, char __user *user_buf, 561static ssize_t read_file_queues(struct file *file, char __user *user_buf,
656 size_t count, loff_t *ppos) 562 size_t count, loff_t *ppos)
657{ 563{
658 struct ath_softc *sc = file->private_data; 564 struct ath_softc *sc = file->private_data;
565 struct ath_txq *txq;
659 char *buf; 566 char *buf;
660 unsigned int len = 0, size = 64000; 567 unsigned int len = 0, size = 1024;
661 struct ath_node *an = NULL;
662 ssize_t retval = 0; 568 ssize_t retval = 0;
663 int q; 569 int i;
570 char *qname[4] = {"VO", "VI", "BE", "BK"};
664 571
665 buf = kzalloc(size, GFP_KERNEL); 572 buf = kzalloc(size, GFP_KERNEL);
666 if (buf == NULL) 573 if (buf == NULL)
667 return -ENOMEM; 574 return -ENOMEM;
668 575
669 len += snprintf(buf + len, size - len, 576 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
670 "Stations:\n" 577 txq = sc->tx.txq_map[i];
671 " tid: addr sched paused buf_q-empty an ac baw\n" 578 len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
672 " ac: addr sched tid_q-empty txq\n");
673
674 spin_lock(&sc->nodes_lock);
675 list_for_each_entry(an, &sc->nodes, list) {
676 unsigned short ma = an->maxampdu;
677 if (ma == 0)
678 ma = 65535; /* see ath_lookup_rate */
679 len += snprintf(buf + len, size - len,
680 "iface: %pM sta: %pM max-ampdu: %hu mpdu-density: %uus\n",
681 an->vif->addr, an->sta->addr, ma,
682 (unsigned int)(an->mpdudensity));
683 if (len >= size)
684 goto done;
685
686 for (q = 0; q < WME_NUM_TID; q++) {
687 struct ath_atx_tid *tid = &(an->tid[q]);
688 len += snprintf(buf + len, size - len,
689 " tid: %p %s %s %i %p %p %hu\n",
690 tid, tid->sched ? "sched" : "idle",
691 tid->paused ? "paused" : "running",
692 skb_queue_empty(&tid->buf_q),
693 tid->an, tid->ac, tid->baw_size);
694 if (len >= size)
695 goto done;
696 }
697 579
698 for (q = 0; q < WME_NUM_AC; q++) { 580 ath_txq_lock(sc, txq);
699 struct ath_atx_ac *ac = &(an->ac[q]); 581
700 len += snprintf(buf + len, size - len, 582 len += snprintf(buf + len, size - len, "%s: %d ",
701 " ac: %p %s %i %p\n", 583 "qnum", txq->axq_qnum);
702 ac, ac->sched ? "sched" : "idle", 584 len += snprintf(buf + len, size - len, "%s: %2d ",
703 list_empty(&ac->tid_q), ac->txq); 585 "qdepth", txq->axq_depth);
704 if (len >= size) 586 len += snprintf(buf + len, size - len, "%s: %2d ",
705 goto done; 587 "ampdu-depth", txq->axq_ampdu_depth);
706 } 588 len += snprintf(buf + len, size - len, "%s: %3d ",
589 "pending", txq->pending_frames);
590 len += snprintf(buf + len, size - len, "%s: %d\n",
591 "stopped", txq->stopped);
592
593 ath_txq_unlock(sc, txq);
707 } 594 }
708 595
709done:
710 spin_unlock(&sc->nodes_lock);
711 if (len > size) 596 if (len > size)
712 len = size; 597 len = size;
713 598
@@ -837,6 +722,9 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
837 len += snprintf(buf + len, sizeof(buf) - len, 722 len += snprintf(buf + len, sizeof(buf) - len,
838 "%17s: %2d\n", "PLL RX Hang", 723 "%17s: %2d\n", "PLL RX Hang",
839 sc->debug.stats.reset[RESET_TYPE_PLL_HANG]); 724 sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
725 len += snprintf(buf + len, sizeof(buf) - len,
726 "%17s: %2d\n", "MCI Reset",
727 sc->debug.stats.reset[RESET_TYPE_MCI]);
840 728
841 if (len > sizeof(buf)) 729 if (len > sizeof(buf))
842 len = sizeof(buf); 730 len = sizeof(buf);
@@ -919,8 +807,8 @@ static const struct file_operations fops_xmit = {
919 .llseek = default_llseek, 807 .llseek = default_llseek,
920}; 808};
921 809
922static const struct file_operations fops_stations = { 810static const struct file_operations fops_queues = {
923 .read = read_file_stations, 811 .read = read_file_queues,
924 .open = simple_open, 812 .open = simple_open,
925 .owner = THIS_MODULE, 813 .owner = THIS_MODULE,
926 .llseek = default_llseek, 814 .llseek = default_llseek,
@@ -1586,6 +1474,250 @@ static const struct file_operations fops_samps = {
1586 1474
1587#endif 1475#endif
1588 1476
1477#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1478static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
1479 size_t count, loff_t *ppos)
1480{
1481 struct ath_softc *sc = file->private_data;
1482 u32 len = 0, size = 1500;
1483 char *buf;
1484 size_t retval;
1485
1486 buf = kzalloc(size, GFP_KERNEL);
1487 if (buf == NULL)
1488 return -ENOMEM;
1489
1490 if (!sc->sc_ah->common.btcoex_enabled) {
1491 len = snprintf(buf, size, "%s\n",
1492 "BTCOEX is disabled");
1493 goto exit;
1494 }
1495
1496 len = ath9k_dump_btcoex(sc, buf, size);
1497exit:
1498 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1499 kfree(buf);
1500
1501 return retval;
1502}
1503
1504static const struct file_operations fops_btcoex = {
1505 .read = read_file_btcoex,
1506 .open = simple_open,
1507 .owner = THIS_MODULE,
1508 .llseek = default_llseek,
1509};
1510#endif
1511
1512static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
1513 size_t count, loff_t *ppos)
1514{
1515 struct ath_node *an = file->private_data;
1516 struct ath_softc *sc = an->sc;
1517 struct ath_atx_tid *tid;
1518 struct ath_atx_ac *ac;
1519 struct ath_txq *txq;
1520 u32 len = 0, size = 4096;
1521 char *buf;
1522 size_t retval;
1523 int tidno, acno;
1524
1525 buf = kzalloc(size, GFP_KERNEL);
1526 if (buf == NULL)
1527 return -ENOMEM;
1528
1529 if (!an->sta->ht_cap.ht_supported) {
1530 len = snprintf(buf, size, "%s\n",
1531 "HT not supported");
1532 goto exit;
1533 }
1534
1535 len = snprintf(buf, size, "Max-AMPDU: %d\n",
1536 an->maxampdu);
1537 len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
1538 an->mpdudensity);
1539
1540 len += snprintf(buf + len, size - len,
1541 "%2s%7s\n", "AC", "SCHED");
1542
1543 for (acno = 0, ac = &an->ac[acno];
1544 acno < IEEE80211_NUM_ACS; acno++, ac++) {
1545 txq = ac->txq;
1546 ath_txq_lock(sc, txq);
1547 len += snprintf(buf + len, size - len,
1548 "%2d%7d\n",
1549 acno, ac->sched);
1550 ath_txq_unlock(sc, txq);
1551 }
1552
1553 len += snprintf(buf + len, size - len,
1554 "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
1555 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
1556 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
1557
1558 for (tidno = 0, tid = &an->tid[tidno];
1559 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1560 txq = tid->ac->txq;
1561 ath_txq_lock(sc, txq);
1562 len += snprintf(buf + len, size - len,
1563 "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
1564 tid->tidno, tid->seq_start, tid->seq_next,
1565 tid->baw_size, tid->baw_head, tid->baw_tail,
1566 tid->bar_index, tid->sched, tid->paused);
1567 ath_txq_unlock(sc, txq);
1568 }
1569exit:
1570 retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
1571 kfree(buf);
1572
1573 return retval;
1574}
1575
1576static const struct file_operations fops_node_stat = {
1577 .read = read_file_node_stat,
1578 .open = simple_open,
1579 .owner = THIS_MODULE,
1580 .llseek = default_llseek,
1581};
1582
1583void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
1584 struct ieee80211_vif *vif,
1585 struct ieee80211_sta *sta,
1586 struct dentry *dir)
1587{
1588 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1589 an->node_stat = debugfs_create_file("node_stat", S_IRUGO,
1590 dir, an, &fops_node_stat);
1591}
1592
1593void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
1594 struct ieee80211_vif *vif,
1595 struct ieee80211_sta *sta,
1596 struct dentry *dir)
1597{
1598 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1599 debugfs_remove(an->node_stat);
1600}
1601
1602/* Ethtool support for get-stats */
1603
1604#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
1605static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
1606 "tx_pkts_nic",
1607 "tx_bytes_nic",
1608 "rx_pkts_nic",
1609 "rx_bytes_nic",
1610 AMKSTR(d_tx_pkts),
1611 AMKSTR(d_tx_bytes),
1612 AMKSTR(d_tx_mpdus_queued),
1613 AMKSTR(d_tx_mpdus_completed),
1614 AMKSTR(d_tx_mpdu_xretries),
1615 AMKSTR(d_tx_aggregates),
1616 AMKSTR(d_tx_ampdus_queued_hw),
1617 AMKSTR(d_tx_ampdus_queued_sw),
1618 AMKSTR(d_tx_ampdus_completed),
1619 AMKSTR(d_tx_ampdu_retries),
1620 AMKSTR(d_tx_ampdu_xretries),
1621 AMKSTR(d_tx_fifo_underrun),
1622 AMKSTR(d_tx_op_exceeded),
1623 AMKSTR(d_tx_timer_expiry),
1624 AMKSTR(d_tx_desc_cfg_err),
1625 AMKSTR(d_tx_data_underrun),
1626 AMKSTR(d_tx_delim_underrun),
1627 "d_rx_decrypt_crc_err",
1628 "d_rx_phy_err",
1629 "d_rx_mic_err",
1630 "d_rx_pre_delim_crc_err",
1631 "d_rx_post_delim_crc_err",
1632 "d_rx_decrypt_busy_err",
1633
1634 "d_rx_phyerr_radar",
1635 "d_rx_phyerr_ofdm_timing",
1636 "d_rx_phyerr_cck_timing",
1637
1638};
1639#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
1640
1641void ath9k_get_et_strings(struct ieee80211_hw *hw,
1642 struct ieee80211_vif *vif,
1643 u32 sset, u8 *data)
1644{
1645 if (sset == ETH_SS_STATS)
1646 memcpy(data, *ath9k_gstrings_stats,
1647 sizeof(ath9k_gstrings_stats));
1648}
1649
1650int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
1651 struct ieee80211_vif *vif, int sset)
1652{
1653 if (sset == ETH_SS_STATS)
1654 return ATH9K_SSTATS_LEN;
1655 return 0;
1656}
1657
1658#define AWDATA(elem) \
1659 do { \
1660 data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].elem; \
1661 data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].elem; \
1662 data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].elem; \
1663 data[i++] = sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].elem; \
1664 } while (0)
1665
1666#define AWDATA_RX(elem) \
1667 do { \
1668 data[i++] = sc->debug.stats.rxstats.elem; \
1669 } while (0)
1670
1671void ath9k_get_et_stats(struct ieee80211_hw *hw,
1672 struct ieee80211_vif *vif,
1673 struct ethtool_stats *stats, u64 *data)
1674{
1675 struct ath_softc *sc = hw->priv;
1676 int i = 0;
1677
1678 data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_pkts_all +
1679 sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_pkts_all +
1680 sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_pkts_all +
1681 sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_pkts_all);
1682 data[i++] = (sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BE)].tx_bytes_all +
1683 sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_BK)].tx_bytes_all +
1684 sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VI)].tx_bytes_all +
1685 sc->debug.stats.txstats[PR_QNUM(IEEE80211_AC_VO)].tx_bytes_all);
1686 AWDATA_RX(rx_pkts_all);
1687 AWDATA_RX(rx_bytes_all);
1688
1689 AWDATA(tx_pkts_all);
1690 AWDATA(tx_bytes_all);
1691 AWDATA(queued);
1692 AWDATA(completed);
1693 AWDATA(xretries);
1694 AWDATA(a_aggr);
1695 AWDATA(a_queued_hw);
1696 AWDATA(a_queued_sw);
1697 AWDATA(a_completed);
1698 AWDATA(a_retries);
1699 AWDATA(a_xretries);
1700 AWDATA(fifo_underrun);
1701 AWDATA(xtxop);
1702 AWDATA(timer_exp);
1703 AWDATA(desc_cfg_err);
1704 AWDATA(data_underrun);
1705 AWDATA(delim_underrun);
1706
1707 AWDATA_RX(decrypt_crc_err);
1708 AWDATA_RX(phy_err);
1709 AWDATA_RX(mic_err);
1710 AWDATA_RX(pre_delim_crc_err);
1711 AWDATA_RX(post_delim_crc_err);
1712 AWDATA_RX(decrypt_busy_err);
1713
1714 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
1715 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
1716 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
1717
1718 WARN_ON(i != ATH9K_SSTATS_LEN);
1719}
1720
1589int ath9k_init_debug(struct ath_hw *ah) 1721int ath9k_init_debug(struct ath_hw *ah)
1590{ 1722{
1591 struct ath_common *common = ath9k_hw_common(ah); 1723 struct ath_common *common = ath9k_hw_common(ah);
@@ -1609,16 +1741,16 @@ int ath9k_init_debug(struct ath_hw *ah)
1609 &fops_interrupt); 1741 &fops_interrupt);
1610 debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc, 1742 debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
1611 &fops_xmit); 1743 &fops_xmit);
1744 debugfs_create_file("queues", S_IRUSR, sc->debug.debugfs_phy, sc,
1745 &fops_queues);
1612 debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1746 debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1613 &sc->tx.txq_max_pending[WME_AC_BK]); 1747 &sc->tx.txq_max_pending[IEEE80211_AC_BK]);
1614 debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1748 debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1615 &sc->tx.txq_max_pending[WME_AC_BE]); 1749 &sc->tx.txq_max_pending[IEEE80211_AC_BE]);
1616 debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1750 debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1617 &sc->tx.txq_max_pending[WME_AC_VI]); 1751 &sc->tx.txq_max_pending[IEEE80211_AC_VI]);
1618 debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, 1752 debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
1619 &sc->tx.txq_max_pending[WME_AC_VO]); 1753 &sc->tx.txq_max_pending[IEEE80211_AC_VO]);
1620 debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc,
1621 &fops_stations);
1622 debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc, 1754 debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
1623 &fops_misc); 1755 &fops_misc);
1624 debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc, 1756 debugfs_create_file("reset", S_IRUSR, sc->debug.debugfs_phy, sc,
@@ -1658,6 +1790,9 @@ int ath9k_init_debug(struct ath_hw *ah)
1658 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); 1790 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
1659 debugfs_create_file("diversity", S_IRUSR | S_IWUSR, 1791 debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
1660 sc->debug.debugfs_phy, sc, &fops_ant_diversity); 1792 sc->debug.debugfs_phy, sc, &fops_ant_diversity);
1661 1793#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
1794 debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
1795 &fops_btcoex);
1796#endif
1662 return 0; 1797 return 0;
1663} 1798}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 2ed9785a38fa..375c3b46411e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -41,6 +41,7 @@ enum ath_reset_type {
41 RESET_TYPE_PLL_HANG, 41 RESET_TYPE_PLL_HANG,
42 RESET_TYPE_MAC_HANG, 42 RESET_TYPE_MAC_HANG,
43 RESET_TYPE_BEACON_STUCK, 43 RESET_TYPE_BEACON_STUCK,
44 RESET_TYPE_MCI,
44 __RESET_TYPE_MAX 45 __RESET_TYPE_MAX
45}; 46};
46 47
@@ -178,6 +179,21 @@ struct ath_tx_stats {
178 u32 txfailed; 179 u32 txfailed;
179}; 180};
180 181
182/*
183 * Various utility macros to print TX/Queue counters.
184 */
185#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum
186#define TXSTATS sc->debug.stats.txstats
187#define PR(str, elem) \
188 do { \
189 len += snprintf(buf + len, size - len, \
190 "%s%13u%11u%10u%10u\n", str, \
191 TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \
192 TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \
193 TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \
194 TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
195 } while(0)
196
181#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++) 197#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
182 198
183/** 199/**
@@ -291,7 +307,22 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
291 struct ath_tx_status *ts, struct ath_txq *txq, 307 struct ath_tx_status *ts, struct ath_txq *txq,
292 unsigned int flags); 308 unsigned int flags);
293void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs); 309void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
294 310int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
311 struct ieee80211_vif *vif, int sset);
312void ath9k_get_et_stats(struct ieee80211_hw *hw,
313 struct ieee80211_vif *vif,
314 struct ethtool_stats *stats, u64 *data);
315void ath9k_get_et_strings(struct ieee80211_hw *hw,
316 struct ieee80211_vif *vif,
317 u32 sset, u8 *data);
318void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
319 struct ieee80211_vif *vif,
320 struct ieee80211_sta *sta,
321 struct dentry *dir);
322void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
323 struct ieee80211_vif *vif,
324 struct ieee80211_sta *sta,
325 struct dentry *dir);
295#else 326#else
296 327
297#define RX_STAT_INC(c) /* NOP */ 328#define RX_STAT_INC(c) /* NOP */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index ea2a6cf7ef23..24877b00cbf4 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -42,10 +42,15 @@ struct radar_types {
42#define MIN_PPB_THRESH 50 42#define MIN_PPB_THRESH 50
43#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100) 43#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
44#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF) 44#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
45/* percentage of pulse width tolerance */
46#define WIDTH_TOLERANCE 5
47#define WIDTH_LOWER(X) ((X*(100-WIDTH_TOLERANCE)+50)/100)
48#define WIDTH_UPPER(X) ((X*(100+WIDTH_TOLERANCE)+50)/100)
45 49
46#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \ 50#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
47{ \ 51{ \
48 ID, WMIN, WMAX, (PRF2PRI(PMAX) - PRI_TOLERANCE), \ 52 ID, WIDTH_LOWER(WMIN), WIDTH_UPPER(WMAX), \
53 (PRF2PRI(PMAX) - PRI_TOLERANCE), \
49 (PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \ 54 (PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
50 PPB_THRESH(PPB), PRI_TOLERANCE, \ 55 PPB_THRESH(PPB), PRI_TOLERANCE, \
51} 56}
@@ -274,7 +279,7 @@ static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
274 279
275static struct dfs_pattern_detector default_dpd = { 280static struct dfs_pattern_detector default_dpd = {
276 .exit = dpd_exit, 281 .exit = dpd_exit,
277 .set_domain = dpd_set_domain, 282 .set_dfs_domain = dpd_set_domain,
278 .add_pulse = dpd_add_pulse, 283 .add_pulse = dpd_add_pulse,
279 .region = NL80211_DFS_UNSET, 284 .region = NL80211_DFS_UNSET,
280}; 285};
@@ -291,10 +296,11 @@ dfs_pattern_detector_init(enum nl80211_dfs_regions region)
291 *dpd = default_dpd; 296 *dpd = default_dpd;
292 INIT_LIST_HEAD(&dpd->channel_detectors); 297 INIT_LIST_HEAD(&dpd->channel_detectors);
293 298
294 if (dpd->set_domain(dpd, region)) 299 if (dpd->set_dfs_domain(dpd, region))
295 return dpd; 300 return dpd;
296 301
297 pr_err("Could not set DFS domain to %d. ", region); 302 pr_err("Could not set DFS domain to %d. ", region);
303 kfree(dpd);
298 return NULL; 304 return NULL;
299} 305}
300EXPORT_SYMBOL(dfs_pattern_detector_init); 306EXPORT_SYMBOL(dfs_pattern_detector_init);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
index fd0328a30995..cda52f39f28a 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
@@ -62,7 +62,7 @@ struct radar_detector_specs {
62/** 62/**
63 * struct dfs_pattern_detector - DFS pattern detector 63 * struct dfs_pattern_detector - DFS pattern detector
64 * @exit(): destructor 64 * @exit(): destructor
65 * @set_domain(): set DFS domain, resets detector lines upon domain changes 65 * @set_dfs_domain(): set DFS domain, resets detector lines upon domain changes
66 * @add_pulse(): add radar pulse to detector, returns true on detection 66 * @add_pulse(): add radar pulse to detector, returns true on detection
67 * @region: active DFS region, NL80211_DFS_UNSET until set 67 * @region: active DFS region, NL80211_DFS_UNSET until set
68 * @num_radar_types: number of different radar types 68 * @num_radar_types: number of different radar types
@@ -72,7 +72,7 @@ struct radar_detector_specs {
72 */ 72 */
73struct dfs_pattern_detector { 73struct dfs_pattern_detector {
74 void (*exit)(struct dfs_pattern_detector *dpd); 74 void (*exit)(struct dfs_pattern_detector *dpd);
75 bool (*set_domain)(struct dfs_pattern_detector *dpd, 75 bool (*set_dfs_domain)(struct dfs_pattern_detector *dpd,
76 enum nl80211_dfs_regions region); 76 enum nl80211_dfs_regions region);
77 bool (*add_pulse)(struct dfs_pattern_detector *dpd, 77 bool (*add_pulse)(struct dfs_pattern_detector *dpd,
78 struct pulse_event *pe); 78 struct pulse_event *pe);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 0512397a293c..971d770722cf 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -113,9 +113,34 @@ void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
113 } 113 }
114} 114}
115 115
116bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data) 116static bool ath9k_hw_nvram_read_blob(struct ath_hw *ah, u32 off,
117 u16 *data)
117{ 118{
118 return common->bus_ops->eeprom_read(common, off, data); 119 u16 *blob_data;
120
121 if (off * sizeof(u16) > ah->eeprom_blob->size)
122 return false;
123
124 blob_data = (u16 *)ah->eeprom_blob->data;
125 *data = blob_data[off];
126 return true;
127}
128
129bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
130{
131 struct ath_common *common = ath9k_hw_common(ah);
132 bool ret;
133
134 if (ah->eeprom_blob)
135 ret = ath9k_hw_nvram_read_blob(ah, off, data);
136 else
137 ret = common->bus_ops->eeprom_read(common, off, data);
138
139 if (!ret)
140 ath_dbg(common, EEPROM,
141 "unable to read eeprom region at offset %u\n", off);
142
143 return ret;
119} 144}
120 145
121void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 146void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 319c651fa6c5..40d4f62d0f16 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -663,7 +663,7 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
663 int16_t targetRight); 663 int16_t targetRight);
664bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, 664bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
665 u16 *indexL, u16 *indexR); 665 u16 *indexL, u16 *indexR);
666bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data); 666bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data);
667void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, 667void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data,
668 int eep_start_loc, int size); 668 int eep_start_loc, int size);
669void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 669void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 7d075105a85d..c2bfd748eed8 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -32,16 +32,12 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
32 32
33static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) 33static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
34{ 34{
35 struct ath_common *common = ath9k_hw_common(ah);
36 u16 *eep_data = (u16 *)&ah->eeprom.map4k; 35 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
37 int addr, eep_start_loc = 64; 36 int addr, eep_start_loc = 64;
38 37
39 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 38 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
40 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { 39 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data))
41 ath_dbg(common, EEPROM,
42 "Unable to read eeprom region\n");
43 return false; 40 return false;
44 }
45 eep_data++; 41 eep_data++;
46 } 42 }
47 43
@@ -196,7 +192,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
196 192
197 193
198 if (!ath9k_hw_use_flash(ah)) { 194 if (!ath9k_hw_use_flash(ah)) {
199 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, 195 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
200 &magic)) { 196 &magic)) {
201 ath_err(common, "Reading Magic # failed\n"); 197 ath_err(common, "Reading Magic # failed\n");
202 return false; 198 return false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index cd742fb944c2..3ae1f3df0637 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -33,18 +33,13 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
33static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) 33static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
34{ 34{
35 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 35 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
36 struct ath_common *common = ath9k_hw_common(ah);
37 u16 *eep_data; 36 u16 *eep_data;
38 int addr, eep_start_loc = AR9287_EEP_START_LOC; 37 int addr, eep_start_loc = AR9287_EEP_START_LOC;
39 eep_data = (u16 *)eep; 38 eep_data = (u16 *)eep;
40 39
41 for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) { 40 for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
42 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, 41 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data))
43 eep_data)) {
44 ath_dbg(common, EEPROM,
45 "Unable to read eeprom region\n");
46 return false; 42 return false;
47 }
48 eep_data++; 43 eep_data++;
49 } 44 }
50 45
@@ -190,7 +185,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
190 struct ath_common *common = ath9k_hw_common(ah); 185 struct ath_common *common = ath9k_hw_common(ah);
191 186
192 if (!ath9k_hw_use_flash(ah)) { 187 if (!ath9k_hw_use_flash(ah)) {
193 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, 188 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET,
194 &magic)) { 189 &magic)) {
195 ath_err(common, "Reading Magic # failed\n"); 190 ath_err(common, "Reading Magic # failed\n");
196 return false; 191 return false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index a8ac30a00720..1c25368b3836 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -91,17 +91,13 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
91 91
92static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah) 92static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
93{ 93{
94 struct ath_common *common = ath9k_hw_common(ah);
95 u16 *eep_data = (u16 *)&ah->eeprom.def; 94 u16 *eep_data = (u16 *)&ah->eeprom.def;
96 int addr, ar5416_eep_start_loc = 0x100; 95 int addr, ar5416_eep_start_loc = 0x100;
97 96
98 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { 97 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
99 if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc, 98 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc,
100 eep_data)) { 99 eep_data))
101 ath_err(ath9k_hw_common(ah),
102 "Unable to read eeprom region\n");
103 return false; 100 return false;
104 }
105 eep_data++; 101 eep_data++;
106 } 102 }
107 return true; 103 return true;
@@ -271,7 +267,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
271 bool need_swap = false; 267 bool need_swap = false;
272 int i, addr, size; 268 int i, addr, size;
273 269
274 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { 270 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
275 ath_err(common, "Reading Magic # failed\n"); 271 ath_err(common, "Reading Magic # failed\n");
276 return false; 272 return false;
277 } 273 }
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index d9ed141a053e..4b412aaf4f36 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -187,6 +187,24 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
187 } 187 }
188} 188}
189 189
190static void ath_mci_ftp_adjust(struct ath_softc *sc)
191{
192 struct ath_btcoex *btcoex = &sc->btcoex;
193 struct ath_mci_profile *mci = &btcoex->mci;
194 struct ath_hw *ah = sc->sc_ah;
195
196 if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
197 if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
198 (mci->num_pan || mci->num_other_acl))
199 ah->btcoex_hw.mci.stomp_ftp =
200 (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
201 else
202 ah->btcoex_hw.mci.stomp_ftp = false;
203 btcoex->bt_wait_time = 0;
204 sc->rx.num_pkts = 0;
205 }
206}
207
190/* 208/*
191 * This is the master bt coex timer which runs for every 209 * This is the master bt coex timer which runs for every
192 * 45ms, bt traffic will be given priority during 55% of this 210 * 45ms, bt traffic will be given priority during 55% of this
@@ -197,41 +215,46 @@ static void ath_btcoex_period_timer(unsigned long data)
197 struct ath_softc *sc = (struct ath_softc *) data; 215 struct ath_softc *sc = (struct ath_softc *) data;
198 struct ath_hw *ah = sc->sc_ah; 216 struct ath_hw *ah = sc->sc_ah;
199 struct ath_btcoex *btcoex = &sc->btcoex; 217 struct ath_btcoex *btcoex = &sc->btcoex;
200 struct ath_mci_profile *mci = &btcoex->mci; 218 enum ath_stomp_type stomp_type;
201 u32 timer_period; 219 u32 timer_period;
202 bool is_btscan;
203 unsigned long flags; 220 unsigned long flags;
204 221
205 spin_lock_irqsave(&sc->sc_pm_lock, flags); 222 spin_lock_irqsave(&sc->sc_pm_lock, flags);
206 if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) { 223 if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
224 btcoex->bt_wait_time += btcoex->btcoex_period;
207 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 225 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
208 goto skip_hw_wakeup; 226 goto skip_hw_wakeup;
209 } 227 }
210 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 228 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
211 229
230 ath9k_mci_update_rssi(sc);
231
212 ath9k_ps_wakeup(sc); 232 ath9k_ps_wakeup(sc);
233
213 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) 234 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
214 ath_detect_bt_priority(sc); 235 ath_detect_bt_priority(sc);
215 is_btscan = test_bit(BT_OP_SCAN, &btcoex->op_flags);
216 236
217 btcoex->bt_wait_time += btcoex->btcoex_period; 237 if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
218 if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) { 238 ath_mci_ftp_adjust(sc);
219 if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
220 (mci->num_pan || mci->num_other_acl))
221 ah->btcoex_hw.mci.stomp_ftp =
222 (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
223 else
224 ah->btcoex_hw.mci.stomp_ftp = false;
225 btcoex->bt_wait_time = 0;
226 sc->rx.num_pkts = 0;
227 }
228 239
229 spin_lock_bh(&btcoex->btcoex_lock); 240 spin_lock_bh(&btcoex->btcoex_lock);
230 241
231 ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL : 242 stomp_type = btcoex->bt_stomp_type;
232 btcoex->bt_stomp_type); 243 timer_period = btcoex->btcoex_no_stomp;
244
245 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) {
246 if (test_bit(BT_OP_SCAN, &btcoex->op_flags)) {
247 stomp_type = ATH_BTCOEX_STOMP_ALL;
248 timer_period = btcoex->btscan_no_stomp;
249 }
250 } else if (btcoex->stomp_audio >= 5) {
251 stomp_type = ATH_BTCOEX_STOMP_AUDIO;
252 btcoex->stomp_audio = 0;
253 }
233 254
255 ath9k_hw_btcoex_bt_stomp(ah, stomp_type);
234 ath9k_hw_btcoex_enable(ah); 256 ath9k_hw_btcoex_enable(ah);
257
235 spin_unlock_bh(&btcoex->btcoex_lock); 258 spin_unlock_bh(&btcoex->btcoex_lock);
236 259
237 /* 260 /*
@@ -243,17 +266,16 @@ static void ath_btcoex_period_timer(unsigned long data)
243 if (btcoex->hw_timer_enabled) 266 if (btcoex->hw_timer_enabled)
244 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer); 267 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
245 268
246 timer_period = is_btscan ? btcoex->btscan_no_stomp :
247 btcoex->btcoex_no_stomp;
248 ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period, 269 ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, timer_period,
249 timer_period * 10); 270 timer_period * 10);
250 btcoex->hw_timer_enabled = true; 271 btcoex->hw_timer_enabled = true;
251 } 272 }
252 273
253 ath9k_ps_restore(sc); 274 ath9k_ps_restore(sc);
275
254skip_hw_wakeup: 276skip_hw_wakeup:
255 timer_period = btcoex->btcoex_period; 277 mod_timer(&btcoex->period_timer,
256 mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period)); 278 jiffies + msecs_to_jiffies(btcoex->btcoex_period));
257} 279}
258 280
259/* 281/*
@@ -273,9 +295,10 @@ static void ath_btcoex_no_stomp_timer(void *arg)
273 spin_lock_bh(&btcoex->btcoex_lock); 295 spin_lock_bh(&btcoex->btcoex_lock);
274 296
275 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || 297 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
276 test_bit(BT_OP_SCAN, &btcoex->op_flags)) 298 (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI) &&
299 test_bit(BT_OP_SCAN, &btcoex->op_flags)))
277 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 300 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
278 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL) 301 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
279 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW); 302 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
280 303
281 ath9k_hw_btcoex_enable(ah); 304 ath9k_hw_btcoex_enable(ah);
@@ -451,7 +474,7 @@ int ath9k_init_btcoex(struct ath_softc *sc)
451 r = ath_init_btcoex_timer(sc); 474 r = ath_init_btcoex_timer(sc);
452 if (r) 475 if (r)
453 return -1; 476 return -1;
454 txq = sc->tx.txq_map[WME_AC_BE]; 477 txq = sc->tx.txq_map[IEEE80211_AC_BE];
455 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); 478 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
456 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 479 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
457 if (ath9k_hw_mci_is_enabled(ah)) { 480 if (ath9k_hw_mci_is_enabled(ah)) {
@@ -474,4 +497,71 @@ int ath9k_init_btcoex(struct ath_softc *sc)
474 return 0; 497 return 0;
475} 498}
476 499
500static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
501{
502 struct ath_btcoex *btcoex = &sc->btcoex;
503 struct ath_mci_profile *mci = &btcoex->mci;
504 struct ath_hw *ah = sc->sc_ah;
505 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
506 u32 len = 0;
507 int i;
508
509 ATH_DUMP_BTCOEX("Total BT profiles", NUM_PROF(mci));
510 ATH_DUMP_BTCOEX("MGMT", mci->num_mgmt);
511 ATH_DUMP_BTCOEX("SCO", mci->num_sco);
512 ATH_DUMP_BTCOEX("A2DP", mci->num_a2dp);
513 ATH_DUMP_BTCOEX("HID", mci->num_hid);
514 ATH_DUMP_BTCOEX("PAN", mci->num_pan);
515 ATH_DUMP_BTCOEX("ACL", mci->num_other_acl);
516 ATH_DUMP_BTCOEX("BDR", mci->num_bdr);
517 ATH_DUMP_BTCOEX("Aggr. Limit", mci->aggr_limit);
518 ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
519 ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
520 ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
521 ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
522 ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
523 ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
524
525 len += snprintf(buf + len, size - len, "BT Weights: ");
526 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
527 len += snprintf(buf + len, size - len, "%08x ",
528 btcoex_hw->bt_weight[i]);
529 len += snprintf(buf + len, size - len, "\n");
530 len += snprintf(buf + len, size - len, "WLAN Weights: ");
531 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
532 len += snprintf(buf + len, size - len, "%08x ",
533 btcoex_hw->wlan_weight[i]);
534 len += snprintf(buf + len, size - len, "\n");
535 len += snprintf(buf + len, size - len, "Tx Priorities: ");
536 for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
537 len += snprintf(buf + len, size - len, "%08x ",
538 btcoex_hw->tx_prio[i]);
539
540 len += snprintf(buf + len, size - len, "\n");
541
542 return len;
543}
544
545static int ath9k_dump_legacy_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
546{
547
548 struct ath_btcoex *btcoex = &sc->btcoex;
549 u32 len = 0;
550
551 ATH_DUMP_BTCOEX("Stomp Type", btcoex->bt_stomp_type);
552 ATH_DUMP_BTCOEX("BTCoex Period (msec)", btcoex->btcoex_period);
553 ATH_DUMP_BTCOEX("Duty Cycle", btcoex->duty_cycle);
554 ATH_DUMP_BTCOEX("BT Wait time", btcoex->bt_wait_time);
555
556 return len;
557}
558
559int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
560{
561 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
562 return ath9k_dump_mci_btcoex(sc, buf, size);
563 else
564 return ath9k_dump_legacy_btcoex(sc, buf, size);
565}
566
477#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 567#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index b30596fcf73a..96bfb18078fa 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -331,7 +331,7 @@ struct ath_tx_stats {
331 u32 skb_success; 331 u32 skb_success;
332 u32 skb_failed; 332 u32 skb_failed;
333 u32 cab_queued; 333 u32 cab_queued;
334 u32 queue_stats[WME_NUM_AC]; 334 u32 queue_stats[IEEE80211_NUM_ACS];
335}; 335};
336 336
337struct ath_rx_stats { 337struct ath_rx_stats {
@@ -493,7 +493,7 @@ struct ath9k_htc_priv {
493 493
494 int beaconq; 494 int beaconq;
495 int cabq; 495 int cabq;
496 int hwq_map[WME_NUM_AC]; 496 int hwq_map[IEEE80211_NUM_ACS];
497 497
498#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 498#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
499 struct ath_btcoex btcoex; 499 struct ath_btcoex btcoex;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index f42d2eb6af99..d0ce1f5bba10 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -33,7 +33,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
33 qi.tqi_cwmin = 0; 33 qi.tqi_cwmin = 0;
34 qi.tqi_cwmax = 0; 34 qi.tqi_cwmax = 0;
35 } else if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) { 35 } else if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
36 int qnum = priv->hwq_map[WME_AC_BE]; 36 int qnum = priv->hwq_map[IEEE80211_AC_BE];
37 37
38 ath9k_hw_get_txq_props(ah, qnum, &qi_be); 38 ath9k_hw_get_txq_props(ah, qnum, &qi_be);
39 39
@@ -587,9 +587,9 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
587 (priv->num_sta_vif > 1) && 587 (priv->num_sta_vif > 1) &&
588 (vif->type == NL80211_IFTYPE_STATION)) { 588 (vif->type == NL80211_IFTYPE_STATION)) {
589 beacon_configured = false; 589 beacon_configured = false;
590 ieee80211_iterate_active_interfaces_atomic(priv->hw, 590 ieee80211_iterate_active_interfaces_atomic(
591 ath9k_htc_beacon_iter, 591 priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
592 &beacon_configured); 592 ath9k_htc_beacon_iter, &beacon_configured);
593 593
594 if (beacon_configured) { 594 if (beacon_configured) {
595 ath_dbg(common, CONFIG, 595 ath_dbg(common, CONFIG,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
index 3035deb7a0cd..87110de577ef 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
@@ -218,16 +218,16 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
218 218
219 len += snprintf(buf + len, sizeof(buf) - len, 219 len += snprintf(buf + len, sizeof(buf) - len,
220 "%20s : %10u\n", "BE queued", 220 "%20s : %10u\n", "BE queued",
221 priv->debug.tx_stats.queue_stats[WME_AC_BE]); 221 priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
222 len += snprintf(buf + len, sizeof(buf) - len, 222 len += snprintf(buf + len, sizeof(buf) - len,
223 "%20s : %10u\n", "BK queued", 223 "%20s : %10u\n", "BK queued",
224 priv->debug.tx_stats.queue_stats[WME_AC_BK]); 224 priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
225 len += snprintf(buf + len, sizeof(buf) - len, 225 len += snprintf(buf + len, sizeof(buf) - len,
226 "%20s : %10u\n", "VI queued", 226 "%20s : %10u\n", "VI queued",
227 priv->debug.tx_stats.queue_stats[WME_AC_VI]); 227 priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
228 len += snprintf(buf + len, sizeof(buf) - len, 228 len += snprintf(buf + len, sizeof(buf) - len,
229 "%20s : %10u\n", "VO queued", 229 "%20s : %10u\n", "VO queued",
230 priv->debug.tx_stats.queue_stats[WME_AC_VO]); 230 priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
231 231
232 if (len > sizeof(buf)) 232 if (len > sizeof(buf))
233 len = sizeof(buf); 233 len = sizeof(buf);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 0eacfc13c915..105582d6b714 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -207,7 +207,7 @@ void ath9k_htc_init_btcoex(struct ath9k_htc_priv *priv, char *product)
207 priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 207 priv->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
208 ath9k_hw_btcoex_init_3wire(priv->ah); 208 ath9k_hw_btcoex_init_3wire(priv->ah);
209 ath_htc_init_btcoex_work(priv); 209 ath_htc_init_btcoex_work(priv);
210 qnum = priv->hwq_map[WME_AC_BE]; 210 qnum = priv->hwq_map[IEEE80211_AC_BE];
211 ath9k_hw_init_btcoex_hw(priv->ah, qnum); 211 ath9k_hw_init_btcoex_hw(priv->ah, qnum);
212 break; 212 break;
213 default: 213 default:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index d98255eb1b9a..05d5ba66cac3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -549,20 +549,20 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
549 goto err; 549 goto err;
550 } 550 }
551 551
552 if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) { 552 if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_BE)) {
553 ath_err(common, "Unable to setup xmit queue for BE traffic\n"); 553 ath_err(common, "Unable to setup xmit queue for BE traffic\n");
554 goto err; 554 goto err;
555 } 555 }
556 556
557 if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) { 557 if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_BK)) {
558 ath_err(common, "Unable to setup xmit queue for BK traffic\n"); 558 ath_err(common, "Unable to setup xmit queue for BK traffic\n");
559 goto err; 559 goto err;
560 } 560 }
561 if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) { 561 if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_VI)) {
562 ath_err(common, "Unable to setup xmit queue for VI traffic\n"); 562 ath_err(common, "Unable to setup xmit queue for VI traffic\n");
563 goto err; 563 goto err;
564 } 564 }
565 if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) { 565 if (!ath9k_htc_txq_setup(priv, IEEE80211_AC_VO)) {
566 ath_err(common, "Unable to setup xmit queue for VO traffic\n"); 566 ath_err(common, "Unable to setup xmit queue for VO traffic\n");
567 goto err; 567 goto err;
568 } 568 }
@@ -694,6 +694,20 @@ err_hw:
694 return ret; 694 return ret;
695} 695}
696 696
697static const struct ieee80211_iface_limit if_limits[] = {
698 { .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
699 BIT(NL80211_IFTYPE_P2P_CLIENT) },
700 { .max = 2, .types = BIT(NL80211_IFTYPE_AP) |
701 BIT(NL80211_IFTYPE_P2P_GO) },
702};
703
704static const struct ieee80211_iface_combination if_comb = {
705 .limits = if_limits,
706 .n_limits = ARRAY_SIZE(if_limits),
707 .max_interfaces = 2,
708 .num_different_channels = 1,
709};
710
697static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, 711static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
698 struct ieee80211_hw *hw) 712 struct ieee80211_hw *hw)
699{ 713{
@@ -716,6 +730,9 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
716 BIT(NL80211_IFTYPE_P2P_GO) | 730 BIT(NL80211_IFTYPE_P2P_GO) |
717 BIT(NL80211_IFTYPE_P2P_CLIENT); 731 BIT(NL80211_IFTYPE_P2P_CLIENT);
718 732
733 hw->wiphy->iface_combinations = &if_comb;
734 hw->wiphy->n_iface_combinations = 1;
735
719 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 736 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
720 737
721 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN | 738 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index ca78e33ca23e..9c07a8fa5134 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,8 +127,9 @@ static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv)
127 priv->rearm_ani = false; 127 priv->rearm_ani = false;
128 priv->reconfig_beacon = false; 128 priv->reconfig_beacon = false;
129 129
130 ieee80211_iterate_active_interfaces_atomic(priv->hw, 130 ieee80211_iterate_active_interfaces_atomic(
131 ath9k_htc_vif_iter, priv); 131 priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
132 ath9k_htc_vif_iter, priv);
132 if (priv->rearm_ani) 133 if (priv->rearm_ani)
133 ath9k_htc_start_ani(priv); 134 ath9k_htc_start_ani(priv);
134 135
@@ -165,8 +166,9 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
165 ath9k_htc_bssid_iter(&iter_data, vif->addr, vif); 166 ath9k_htc_bssid_iter(&iter_data, vif->addr, vif);
166 167
167 /* Get list of all active MAC addresses */ 168 /* Get list of all active MAC addresses */
168 ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter, 169 ieee80211_iterate_active_interfaces_atomic(
169 &iter_data); 170 priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
171 ath9k_htc_bssid_iter, &iter_data);
170 172
171 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); 173 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
172 ath_hw_setbssidmask(common); 174 ath_hw_setbssidmask(common);
@@ -1036,26 +1038,6 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1036 1038
1037 mutex_lock(&priv->mutex); 1039 mutex_lock(&priv->mutex);
1038 1040
1039 if (priv->nvifs >= ATH9K_HTC_MAX_VIF) {
1040 mutex_unlock(&priv->mutex);
1041 return -ENOBUFS;
1042 }
1043
1044 if (priv->num_ibss_vif ||
1045 (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
1046 ath_err(common, "IBSS coexistence with other modes is not allowed\n");
1047 mutex_unlock(&priv->mutex);
1048 return -ENOBUFS;
1049 }
1050
1051 if (((vif->type == NL80211_IFTYPE_AP) ||
1052 (vif->type == NL80211_IFTYPE_ADHOC)) &&
1053 ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) {
1054 ath_err(common, "Max. number of beaconing interfaces reached\n");
1055 mutex_unlock(&priv->mutex);
1056 return -ENOBUFS;
1057 }
1058
1059 ath9k_htc_ps_wakeup(priv); 1041 ath9k_htc_ps_wakeup(priv);
1060 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 1042 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
1061 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN); 1043 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
@@ -1164,8 +1146,9 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1164 */ 1146 */
1165 if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) { 1147 if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) {
1166 priv->rearm_ani = false; 1148 priv->rearm_ani = false;
1167 ieee80211_iterate_active_interfaces_atomic(priv->hw, 1149 ieee80211_iterate_active_interfaces_atomic(
1168 ath9k_htc_vif_iter, priv); 1150 priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1151 ath9k_htc_vif_iter, priv);
1169 if (!priv->rearm_ani) 1152 if (!priv->rearm_ani)
1170 ath9k_htc_stop_ani(priv); 1153 ath9k_htc_stop_ani(priv);
1171 } 1154 }
@@ -1366,7 +1349,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
1366 struct ath9k_tx_queue_info qi; 1349 struct ath9k_tx_queue_info qi;
1367 int ret = 0, qnum; 1350 int ret = 0, qnum;
1368 1351
1369 if (queue >= WME_NUM_AC) 1352 if (queue >= IEEE80211_NUM_ACS)
1370 return 0; 1353 return 0;
1371 1354
1372 mutex_lock(&priv->mutex); 1355 mutex_lock(&priv->mutex);
@@ -1393,7 +1376,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
1393 } 1376 }
1394 1377
1395 if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) && 1378 if ((priv->ah->opmode == NL80211_IFTYPE_ADHOC) &&
1396 (qnum == priv->hwq_map[WME_AC_BE])) 1379 (qnum == priv->hwq_map[IEEE80211_AC_BE]))
1397 ath9k_htc_beaconq_config(priv); 1380 ath9k_htc_beaconq_config(priv);
1398out: 1381out:
1399 ath9k_htc_ps_restore(priv); 1382 ath9k_htc_ps_restore(priv);
@@ -1486,8 +1469,9 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1486static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv) 1469static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv)
1487{ 1470{
1488 if (priv->num_sta_assoc_vif == 1) { 1471 if (priv->num_sta_assoc_vif == 1) {
1489 ieee80211_iterate_active_interfaces_atomic(priv->hw, 1472 ieee80211_iterate_active_interfaces_atomic(
1490 ath9k_htc_bss_iter, priv); 1473 priv->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1474 ath9k_htc_bss_iter, priv);
1491 ath9k_htc_set_bssid(priv); 1475 ath9k_htc_set_bssid(priv);
1492 } 1476 }
1493} 1477}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 06cdcb772d78..b6a5a08810b8 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -21,10 +21,10 @@
21/******/ 21/******/
22 22
23static const int subtype_txq_to_hwq[] = { 23static const int subtype_txq_to_hwq[] = {
24 [WME_AC_BE] = ATH_TXQ_AC_BE, 24 [IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
25 [WME_AC_BK] = ATH_TXQ_AC_BK, 25 [IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
26 [WME_AC_VI] = ATH_TXQ_AC_VI, 26 [IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
27 [WME_AC_VO] = ATH_TXQ_AC_VO, 27 [IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
28}; 28};
29 29
30#define ATH9K_HTC_INIT_TXQ(subtype) do { \ 30#define ATH9K_HTC_INIT_TXQ(subtype) do { \
@@ -41,15 +41,15 @@ int get_hw_qnum(u16 queue, int *hwq_map)
41{ 41{
42 switch (queue) { 42 switch (queue) {
43 case 0: 43 case 0:
44 return hwq_map[WME_AC_VO]; 44 return hwq_map[IEEE80211_AC_VO];
45 case 1: 45 case 1:
46 return hwq_map[WME_AC_VI]; 46 return hwq_map[IEEE80211_AC_VI];
47 case 2: 47 case 2:
48 return hwq_map[WME_AC_BE]; 48 return hwq_map[IEEE80211_AC_BE];
49 case 3: 49 case 3:
50 return hwq_map[WME_AC_BK]; 50 return hwq_map[IEEE80211_AC_BK];
51 default: 51 default:
52 return hwq_map[WME_AC_BE]; 52 return hwq_map[IEEE80211_AC_BE];
53 } 53 }
54} 54}
55 55
@@ -106,20 +106,20 @@ static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv,
106 106
107 switch (qnum) { 107 switch (qnum) {
108 case 0: 108 case 0:
109 TX_QSTAT_INC(WME_AC_VO); 109 TX_QSTAT_INC(IEEE80211_AC_VO);
110 epid = priv->data_vo_ep; 110 epid = priv->data_vo_ep;
111 break; 111 break;
112 case 1: 112 case 1:
113 TX_QSTAT_INC(WME_AC_VI); 113 TX_QSTAT_INC(IEEE80211_AC_VI);
114 epid = priv->data_vi_ep; 114 epid = priv->data_vi_ep;
115 break; 115 break;
116 case 2: 116 case 2:
117 TX_QSTAT_INC(WME_AC_BE); 117 TX_QSTAT_INC(IEEE80211_AC_BE);
118 epid = priv->data_be_ep; 118 epid = priv->data_be_ep;
119 break; 119 break;
120 case 3: 120 case 3:
121 default: 121 default:
122 TX_QSTAT_INC(WME_AC_BK); 122 TX_QSTAT_INC(IEEE80211_AC_BK);
123 epid = priv->data_bk_ep; 123 epid = priv->data_bk_ep;
124 break; 124 break;
125 } 125 }
@@ -1082,7 +1082,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1082 rx_status->freq = hw->conf.channel->center_freq; 1082 rx_status->freq = hw->conf.channel->center_freq;
1083 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; 1083 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
1084 rx_status->antenna = rxbuf->rxstatus.rs_antenna; 1084 rx_status->antenna = rxbuf->rxstatus.rs_antenna;
1085 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 1085 rx_status->flag |= RX_FLAG_MACTIME_END;
1086 1086
1087 return true; 1087 return true;
1088 1088
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1829b445d0b0..7cb787065913 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2153,9 +2153,6 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
2153 AR_RTC_FORCE_WAKE_EN); 2153 AR_RTC_FORCE_WAKE_EN);
2154 udelay(50); 2154 udelay(50);
2155 2155
2156 if (ath9k_hw_mci_is_enabled(ah))
2157 ar9003_mci_set_power_awake(ah);
2158
2159 for (i = POWER_UP_TIME / 50; i > 0; i--) { 2156 for (i = POWER_UP_TIME / 50; i > 0; i--) {
2160 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; 2157 val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
2161 if (val == AR_RTC_STATUS_ON) 2158 if (val == AR_RTC_STATUS_ON)
@@ -2171,6 +2168,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
2171 return false; 2168 return false;
2172 } 2169 }
2173 2170
2171 if (ath9k_hw_mci_is_enabled(ah))
2172 ar9003_mci_set_power_awake(ah);
2173
2174 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 2174 REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2175 2175
2176 return true; 2176 return true;
@@ -2561,11 +2561,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2561 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; 2561 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
2562 } 2562 }
2563 2563
2564 if (AR_SREV_9485_10(ah)) {
2565 pCap->pcie_lcr_extsync_en = true;
2566 pCap->pcie_lcr_offset = 0x80;
2567 }
2568
2569 if (ath9k_hw_dfs_tested(ah)) 2564 if (ath9k_hw_dfs_tested(ah))
2570 pCap->hw_caps |= ATH9K_HW_CAP_DFS; 2565 pCap->hw_caps |= ATH9K_HW_CAP_DFS;
2571 2566
@@ -2604,6 +2599,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
2604 pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD; 2599 pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD;
2605 } 2600 }
2606 2601
2602 if (AR_SREV_9300_20_OR_LATER(ah) &&
2603 ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
2604 pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
2605
2607 return 0; 2606 return 0;
2608} 2607}
2609 2608
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbc1b7a4cbfd..7f1a8e91c908 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -20,6 +20,7 @@
20#include <linux/if_ether.h> 20#include <linux/if_ether.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/firmware.h>
23 24
24#include "mac.h" 25#include "mac.h"
25#include "ani.h" 26#include "ani.h"
@@ -247,6 +248,7 @@ enum ath9k_hw_caps {
247 ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17), 248 ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
248 ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18), 249 ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18),
249 ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19), 250 ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19),
251 ATH9K_HW_CAP_PAPRD = BIT(20),
250}; 252};
251 253
252/* 254/*
@@ -273,8 +275,6 @@ struct ath9k_hw_capabilities {
273 u8 rx_status_len; 275 u8 rx_status_len;
274 u8 tx_desc_len; 276 u8 tx_desc_len;
275 u8 txs_len; 277 u8 txs_len;
276 u16 pcie_lcr_offset;
277 bool pcie_lcr_extsync_en;
278}; 278};
279 279
280struct ath9k_ops_config { 280struct ath9k_ops_config {
@@ -401,6 +401,7 @@ enum ath9k_int {
401struct ath9k_hw_cal_data { 401struct ath9k_hw_cal_data {
402 u16 channel; 402 u16 channel;
403 u32 channelFlags; 403 u32 channelFlags;
404 u32 chanmode;
404 int32_t CalValid; 405 int32_t CalValid;
405 int8_t iCoff; 406 int8_t iCoff;
406 int8_t qCoff; 407 int8_t qCoff;
@@ -834,6 +835,7 @@ struct ath_hw {
834 int coarse_low[5]; 835 int coarse_low[5];
835 int firpwr[5]; 836 int firpwr[5];
836 enum ath9k_ani_cmd ani_function; 837 enum ath9k_ani_cmd ani_function;
838 u32 ani_skip_count;
837 839
838#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 840#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
839 struct ath_btcoex_hw btcoex_hw; 841 struct ath_btcoex_hw btcoex_hw;
@@ -875,7 +877,6 @@ struct ath_hw {
875 struct ar5416IniArray iniModesTxGain; 877 struct ar5416IniArray iniModesTxGain;
876 struct ar5416IniArray iniCckfirNormal; 878 struct ar5416IniArray iniCckfirNormal;
877 struct ar5416IniArray iniCckfirJapan2484; 879 struct ar5416IniArray iniCckfirJapan2484;
878 struct ar5416IniArray ini_japan2484;
879 struct ar5416IniArray iniModes_9271_ANI_reg; 880 struct ar5416IniArray iniModes_9271_ANI_reg;
880 struct ar5416IniArray ini_radio_post_sys2ant; 881 struct ar5416IniArray ini_radio_post_sys2ant;
881 882
@@ -921,6 +922,8 @@ struct ath_hw {
921 bool is_clk_25mhz; 922 bool is_clk_25mhz;
922 int (*get_mac_revision)(void); 923 int (*get_mac_revision)(void);
923 int (*external_reset)(void); 924 int (*external_reset)(void);
925
926 const struct firmware *eeprom_blob;
924}; 927};
925 928
926struct ath_bus_ops { 929struct ath_bus_ops {
@@ -928,7 +931,6 @@ struct ath_bus_ops {
928 void (*read_cachesize)(struct ath_common *common, int *csz); 931 void (*read_cachesize)(struct ath_common *common, int *csz);
929 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); 932 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
930 void (*bt_coex_prep)(struct ath_common *common); 933 void (*bt_coex_prep)(struct ath_common *common);
931 void (*extn_synch_en)(struct ath_common *common);
932 void (*aspm_init)(struct ath_common *common); 934 void (*aspm_init)(struct ath_common *common);
933}; 935};
934 936
@@ -1060,9 +1062,10 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
1060 int chain); 1062 int chain);
1061int ar9003_paprd_create_curve(struct ath_hw *ah, 1063int ar9003_paprd_create_curve(struct ath_hw *ah,
1062 struct ath9k_hw_cal_data *caldata, int chain); 1064 struct ath9k_hw_cal_data *caldata, int chain);
1063int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); 1065void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
1064int ar9003_paprd_init_table(struct ath_hw *ah); 1066int ar9003_paprd_init_table(struct ath_hw *ah);
1065bool ar9003_paprd_is_done(struct ath_hw *ah); 1067bool ar9003_paprd_is_done(struct ath_hw *ah);
1068bool ar9003_is_paprd_enabled(struct ath_hw *ah);
1066 1069
1067/* Hardware family op attach helpers */ 1070/* Hardware family op attach helpers */
1068void ar5008_hw_attach_phy_ops(struct ath_hw *ah); 1071void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index fad3ccd5cd91..f69ef5d48c7b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -23,6 +23,11 @@
23 23
24#include "ath9k.h" 24#include "ath9k.h"
25 25
26struct ath9k_eeprom_ctx {
27 struct completion complete;
28 struct ath_hw *ah;
29};
30
26static char *dev_info = "ath9k"; 31static char *dev_info = "ath9k";
27 32
28MODULE_AUTHOR("Atheros Communications"); 33MODULE_AUTHOR("Atheros Communications");
@@ -435,7 +440,7 @@ static int ath9k_init_queues(struct ath_softc *sc)
435 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 440 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
436 ath_cabq_update(sc); 441 ath_cabq_update(sc);
437 442
438 for (i = 0; i < WME_NUM_AC; i++) { 443 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
439 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); 444 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
440 sc->tx.txq_map[i]->mac80211_qnum = i; 445 sc->tx.txq_map[i]->mac80211_qnum = i;
441 sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH; 446 sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
@@ -506,6 +511,51 @@ static void ath9k_init_misc(struct ath_softc *sc)
506 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; 511 sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
507} 512}
508 513
514static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
515 void *ctx)
516{
517 struct ath9k_eeprom_ctx *ec = ctx;
518
519 if (eeprom_blob)
520 ec->ah->eeprom_blob = eeprom_blob;
521
522 complete(&ec->complete);
523}
524
525static int ath9k_eeprom_request(struct ath_softc *sc, const char *name)
526{
527 struct ath9k_eeprom_ctx ec;
528 struct ath_hw *ah = ah = sc->sc_ah;
529 int err;
530
531 /* try to load the EEPROM content asynchronously */
532 init_completion(&ec.complete);
533 ec.ah = sc->sc_ah;
534
535 err = request_firmware_nowait(THIS_MODULE, 1, name, sc->dev, GFP_KERNEL,
536 &ec, ath9k_eeprom_request_cb);
537 if (err < 0) {
538 ath_err(ath9k_hw_common(ah),
539 "EEPROM request failed\n");
540 return err;
541 }
542
543 wait_for_completion(&ec.complete);
544
545 if (!ah->eeprom_blob) {
546 ath_err(ath9k_hw_common(ah),
547 "Unable to load EEPROM file %s\n", name);
548 return -EINVAL;
549 }
550
551 return 0;
552}
553
554static void ath9k_eeprom_release(struct ath_softc *sc)
555{
556 release_firmware(sc->sc_ah->eeprom_blob);
557}
558
509static int ath9k_init_softc(u16 devid, struct ath_softc *sc, 559static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
510 const struct ath_bus_ops *bus_ops) 560 const struct ath_bus_ops *bus_ops)
511{ 561{
@@ -563,10 +613,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
563 spin_lock_init(&sc->sc_serial_rw); 613 spin_lock_init(&sc->sc_serial_rw);
564 spin_lock_init(&sc->sc_pm_lock); 614 spin_lock_init(&sc->sc_pm_lock);
565 mutex_init(&sc->mutex); 615 mutex_init(&sc->mutex);
566#ifdef CONFIG_ATH9K_DEBUGFS
567 spin_lock_init(&sc->nodes_lock);
568 INIT_LIST_HEAD(&sc->nodes);
569#endif
570#ifdef CONFIG_ATH9K_MAC_DEBUG 616#ifdef CONFIG_ATH9K_MAC_DEBUG
571 spin_lock_init(&sc->debug.samp_lock); 617 spin_lock_init(&sc->debug.samp_lock);
572#endif 618#endif
@@ -587,6 +633,12 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
587 ath_read_cachesize(common, &csz); 633 ath_read_cachesize(common, &csz);
588 common->cachelsz = csz << 2; /* convert to bytes */ 634 common->cachelsz = csz << 2; /* convert to bytes */
589 635
636 if (pdata && pdata->eeprom_name) {
637 ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
638 if (ret)
639 goto err_eeprom;
640 }
641
590 /* Initializes the hardware for all supported chipsets */ 642 /* Initializes the hardware for all supported chipsets */
591 ret = ath9k_hw_init(ah); 643 ret = ath9k_hw_init(ah);
592 if (ret) 644 if (ret)
@@ -623,7 +675,8 @@ err_btcoex:
623err_queues: 675err_queues:
624 ath9k_hw_deinit(ah); 676 ath9k_hw_deinit(ah);
625err_hw: 677err_hw:
626 678 ath9k_eeprom_release(sc);
679err_eeprom:
627 kfree(ah); 680 kfree(ah);
628 sc->sc_ah = NULL; 681 sc->sc_ah = NULL;
629 682
@@ -687,6 +740,7 @@ static const struct ieee80211_iface_combination if_comb = {
687 .n_limits = ARRAY_SIZE(if_limits), 740 .n_limits = ARRAY_SIZE(if_limits),
688 .max_interfaces = 2048, 741 .max_interfaces = 2048,
689 .num_different_channels = 1, 742 .num_different_channels = 1,
743 .beacon_int_infra_match = true,
690}; 744};
691 745
692void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 746void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
@@ -885,6 +939,7 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
885 if (sc->dfs_detector != NULL) 939 if (sc->dfs_detector != NULL)
886 sc->dfs_detector->exit(sc->dfs_detector); 940 sc->dfs_detector->exit(sc->dfs_detector);
887 941
942 ath9k_eeprom_release(sc);
888 kfree(sc->sc_ah); 943 kfree(sc->sc_ah);
889 sc->sc_ah = NULL; 944 sc->sc_ah = NULL;
890} 945}
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 7b88b9c39ccd..ade3afb21f91 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -27,9 +27,6 @@ void ath_tx_complete_poll_work(struct work_struct *work)
27 struct ath_txq *txq; 27 struct ath_txq *txq;
28 int i; 28 int i;
29 bool needreset = false; 29 bool needreset = false;
30#ifdef CONFIG_ATH9K_DEBUGFS
31 sc->tx_complete_poll_work_seen++;
32#endif
33 30
34 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 31 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
35 if (ATH_TXQ_SETUP(sc, i)) { 32 if (ATH_TXQ_SETUP(sc, i)) {
@@ -182,13 +179,15 @@ void ath_rx_poll(unsigned long data)
182static void ath_paprd_activate(struct ath_softc *sc) 179static void ath_paprd_activate(struct ath_softc *sc)
183{ 180{
184 struct ath_hw *ah = sc->sc_ah; 181 struct ath_hw *ah = sc->sc_ah;
182 struct ath_common *common = ath9k_hw_common(ah);
185 struct ath9k_hw_cal_data *caldata = ah->caldata; 183 struct ath9k_hw_cal_data *caldata = ah->caldata;
186 int chain; 184 int chain;
187 185
188 if (!caldata || !caldata->paprd_done) 186 if (!caldata || !caldata->paprd_done) {
187 ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
189 return; 188 return;
189 }
190 190
191 ath9k_ps_wakeup(sc);
192 ar9003_paprd_enable(ah, false); 191 ar9003_paprd_enable(ah, false);
193 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 192 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
194 if (!(ah->txchainmask & BIT(chain))) 193 if (!(ah->txchainmask & BIT(chain)))
@@ -197,8 +196,8 @@ static void ath_paprd_activate(struct ath_softc *sc)
197 ar9003_paprd_populate_single_table(ah, caldata, chain); 196 ar9003_paprd_populate_single_table(ah, caldata, chain);
198 } 197 }
199 198
199 ath_dbg(common, CALIBRATE, "Activating PAPRD\n");
200 ar9003_paprd_enable(ah, true); 200 ar9003_paprd_enable(ah, true);
201 ath9k_ps_restore(sc);
202} 201}
203 202
204static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain) 203static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
@@ -211,7 +210,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
211 int time_left; 210 int time_left;
212 211
213 memset(&txctl, 0, sizeof(txctl)); 212 memset(&txctl, 0, sizeof(txctl));
214 txctl.txq = sc->tx.txq_map[WME_AC_BE]; 213 txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
215 214
216 memset(tx_info, 0, sizeof(*tx_info)); 215 memset(tx_info, 0, sizeof(*tx_info));
217 tx_info->band = hw->conf.channel->band; 216 tx_info->band = hw->conf.channel->band;
@@ -256,8 +255,10 @@ void ath_paprd_calibrate(struct work_struct *work)
256 int len = 1800; 255 int len = 1800;
257 int ret; 256 int ret;
258 257
259 if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) 258 if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
259 ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
260 return; 260 return;
261 }
261 262
262 ath9k_ps_wakeup(sc); 263 ath9k_ps_wakeup(sc);
263 264
@@ -350,8 +351,18 @@ void ath_ani_calibrate(unsigned long data)
350 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; 351 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
351 352
352 /* Only calibrate if awake */ 353 /* Only calibrate if awake */
353 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) 354 if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE) {
355 if (++ah->ani_skip_count >= ATH_ANI_MAX_SKIP_COUNT) {
356 spin_lock_irqsave(&sc->sc_pm_lock, flags);
357 sc->ps_flags |= PS_WAIT_FOR_ANI;
358 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
359 }
354 goto set_timer; 360 goto set_timer;
361 }
362 ah->ani_skip_count = 0;
363 spin_lock_irqsave(&sc->sc_pm_lock, flags);
364 sc->ps_flags &= ~PS_WAIT_FOR_ANI;
365 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
355 366
356 ath9k_ps_wakeup(sc); 367 ath9k_ps_wakeup(sc);
357 368
@@ -423,11 +434,15 @@ set_timer:
423 cal_interval = min(cal_interval, (u32)short_cal_interval); 434 cal_interval = min(cal_interval, (u32)short_cal_interval);
424 435
425 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 436 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
426 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD) && ah->caldata) { 437
427 if (!ah->caldata->paprd_done) 438 if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
439 if (!ah->caldata->paprd_done) {
428 ieee80211_queue_work(sc->hw, &sc->paprd_work); 440 ieee80211_queue_work(sc->hw, &sc->paprd_work);
429 else if (!ah->paprd_table_write_done) 441 } else if (!ah->paprd_table_write_done) {
442 ath9k_ps_wakeup(sc);
430 ath_paprd_activate(sc); 443 ath_paprd_activate(sc);
444 ath9k_ps_restore(sc);
445 }
431 } 446 }
432} 447}
433 448
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dd45edfa6bae..be30a9af1528 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -131,7 +131,8 @@ void ath9k_ps_restore(struct ath_softc *sc)
131 !(sc->ps_flags & (PS_WAIT_FOR_BEACON | 131 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
132 PS_WAIT_FOR_CAB | 132 PS_WAIT_FOR_CAB |
133 PS_WAIT_FOR_PSPOLL_DATA | 133 PS_WAIT_FOR_PSPOLL_DATA |
134 PS_WAIT_FOR_TX_ACK))) { 134 PS_WAIT_FOR_TX_ACK |
135 PS_WAIT_FOR_ANI))) {
135 mode = ATH9K_PM_NETWORK_SLEEP; 136 mode = ATH9K_PM_NETWORK_SLEEP;
136 if (ath9k_hw_btcoex_is_enabled(sc->sc_ah)) 137 if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
137 ath9k_btcoex_stop_gen_timer(sc); 138 ath9k_btcoex_stop_gen_timer(sc);
@@ -292,6 +293,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
292 goto out; 293 goto out;
293 } 294 }
294 295
296 if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
297 (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
298 ath9k_mci_set_txpower(sc, true, false);
299
295 if (!ath_complete_reset(sc, true)) 300 if (!ath_complete_reset(sc, true))
296 r = -EIO; 301 r = -EIO;
297 302
@@ -326,11 +331,7 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
326 u8 density; 331 u8 density;
327 an = (struct ath_node *)sta->drv_priv; 332 an = (struct ath_node *)sta->drv_priv;
328 333
329#ifdef CONFIG_ATH9K_DEBUGFS 334 an->sc = sc;
330 spin_lock(&sc->nodes_lock);
331 list_add(&an->list, &sc->nodes);
332 spin_unlock(&sc->nodes_lock);
333#endif
334 an->sta = sta; 335 an->sta = sta;
335 an->vif = vif; 336 an->vif = vif;
336 337
@@ -347,13 +348,6 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
347{ 348{
348 struct ath_node *an = (struct ath_node *)sta->drv_priv; 349 struct ath_node *an = (struct ath_node *)sta->drv_priv;
349 350
350#ifdef CONFIG_ATH9K_DEBUGFS
351 spin_lock(&sc->nodes_lock);
352 list_del(&an->list);
353 spin_unlock(&sc->nodes_lock);
354 an->sta = NULL;
355#endif
356
357 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 351 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
358 ath_tx_node_cleanup(sc, an); 352 ath_tx_node_cleanup(sc, an);
359} 353}
@@ -489,17 +483,6 @@ irqreturn_t ath_isr(int irq, void *dev)
489 if (status & SCHED_INTR) 483 if (status & SCHED_INTR)
490 sched = true; 484 sched = true;
491 485
492#ifdef CONFIG_PM_SLEEP
493 if (status & ATH9K_INT_BMISS) {
494 if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
495 ath_dbg(common, ANY, "during WoW we got a BMISS\n");
496 atomic_inc(&sc->wow_got_bmiss_intr);
497 atomic_dec(&sc->wow_sleep_proc_intr);
498 }
499 ath_dbg(common, INTERRUPT, "beacon miss interrupt\n");
500 }
501#endif
502
503 /* 486 /*
504 * If a FATAL or RXORN interrupt is received, we have to reset the 487 * If a FATAL or RXORN interrupt is received, we have to reset the
505 * chip immediately. 488 * chip immediately.
@@ -518,7 +501,15 @@ irqreturn_t ath_isr(int irq, void *dev)
518 501
519 goto chip_reset; 502 goto chip_reset;
520 } 503 }
521 504#ifdef CONFIG_PM_SLEEP
505 if (status & ATH9K_INT_BMISS) {
506 if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
507 ath_dbg(common, ANY, "during WoW we got a BMISS\n");
508 atomic_inc(&sc->wow_got_bmiss_intr);
509 atomic_dec(&sc->wow_sleep_proc_intr);
510 }
511 }
512#endif
522 if (status & ATH9K_INT_SWBA) 513 if (status & ATH9K_INT_SWBA)
523 tasklet_schedule(&sc->bcon_tasklet); 514 tasklet_schedule(&sc->bcon_tasklet);
524 515
@@ -681,9 +672,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
681 672
682 spin_unlock_bh(&sc->sc_pcu_lock); 673 spin_unlock_bh(&sc->sc_pcu_lock);
683 674
684 if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
685 common->bus_ops->extn_synch_en(common);
686
687 mutex_unlock(&sc->mutex); 675 mutex_unlock(&sc->mutex);
688 676
689 ath9k_ps_restore(sc); 677 ath9k_ps_restore(sc);
@@ -919,8 +907,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
919 ath9k_vif_iter(iter_data, vif->addr, vif); 907 ath9k_vif_iter(iter_data, vif->addr, vif);
920 908
921 /* Get list of all active MAC addresses */ 909 /* Get list of all active MAC addresses */
922 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter, 910 ieee80211_iterate_active_interfaces_atomic(
923 iter_data); 911 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
912 ath9k_vif_iter, iter_data);
924} 913}
925 914
926/* Called with sc->mutex held. */ 915/* Called with sc->mutex held. */
@@ -970,8 +959,9 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
970 if (ah->opmode == NL80211_IFTYPE_STATION && 959 if (ah->opmode == NL80211_IFTYPE_STATION &&
971 old_opmode == NL80211_IFTYPE_AP && 960 old_opmode == NL80211_IFTYPE_AP &&
972 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { 961 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
973 ieee80211_iterate_active_interfaces_atomic(sc->hw, 962 ieee80211_iterate_active_interfaces_atomic(
974 ath9k_sta_vif_iter, sc); 963 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
964 ath9k_sta_vif_iter, sc);
975 } 965 }
976} 966}
977 967
@@ -1324,7 +1314,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
1324 struct ath9k_tx_queue_info qi; 1314 struct ath9k_tx_queue_info qi;
1325 int ret = 0; 1315 int ret = 0;
1326 1316
1327 if (queue >= WME_NUM_AC) 1317 if (queue >= IEEE80211_NUM_ACS)
1328 return 0; 1318 return 0;
1329 1319
1330 txq = sc->tx.txq_map[queue]; 1320 txq = sc->tx.txq_map[queue];
@@ -1449,6 +1439,9 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
1449 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 1439 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
1450 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1440 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1451 1441
1442 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
1443 ath9k_mci_update_wlan_channels(sc, false);
1444
1452 ath_dbg(common, CONFIG, 1445 ath_dbg(common, CONFIG,
1453 "Primary Station interface: %pM, BSSID: %pM\n", 1446 "Primary Station interface: %pM, BSSID: %pM\n",
1454 vif->addr, common->curbssid); 1447 vif->addr, common->curbssid);
@@ -1497,14 +1490,17 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1497 clear_bit(SC_OP_BEACONS, &sc->sc_flags); 1490 clear_bit(SC_OP_BEACONS, &sc->sc_flags);
1498 } 1491 }
1499 1492
1500 ieee80211_iterate_active_interfaces_atomic(sc->hw, 1493 ieee80211_iterate_active_interfaces_atomic(
1501 ath9k_bss_assoc_iter, sc); 1494 sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
1495 ath9k_bss_assoc_iter, sc);
1502 1496
1503 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) && 1497 if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
1504 ah->opmode == NL80211_IFTYPE_STATION) { 1498 ah->opmode == NL80211_IFTYPE_STATION) {
1505 memset(common->curbssid, 0, ETH_ALEN); 1499 memset(common->curbssid, 0, ETH_ALEN);
1506 common->curaid = 0; 1500 common->curaid = 0;
1507 ath9k_hw_write_associd(sc->sc_ah); 1501 ath9k_hw_write_associd(sc->sc_ah);
1502 if (ath9k_hw_mci_is_enabled(sc->sc_ah))
1503 ath9k_mci_update_wlan_channels(sc, true);
1508 } 1504 }
1509 } 1505 }
1510 1506
@@ -1887,134 +1883,6 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1887 return 0; 1883 return 0;
1888} 1884}
1889 1885
1890#ifdef CONFIG_ATH9K_DEBUGFS
1891
1892/* Ethtool support for get-stats */
1893
1894#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
1895static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
1896 "tx_pkts_nic",
1897 "tx_bytes_nic",
1898 "rx_pkts_nic",
1899 "rx_bytes_nic",
1900 AMKSTR(d_tx_pkts),
1901 AMKSTR(d_tx_bytes),
1902 AMKSTR(d_tx_mpdus_queued),
1903 AMKSTR(d_tx_mpdus_completed),
1904 AMKSTR(d_tx_mpdu_xretries),
1905 AMKSTR(d_tx_aggregates),
1906 AMKSTR(d_tx_ampdus_queued_hw),
1907 AMKSTR(d_tx_ampdus_queued_sw),
1908 AMKSTR(d_tx_ampdus_completed),
1909 AMKSTR(d_tx_ampdu_retries),
1910 AMKSTR(d_tx_ampdu_xretries),
1911 AMKSTR(d_tx_fifo_underrun),
1912 AMKSTR(d_tx_op_exceeded),
1913 AMKSTR(d_tx_timer_expiry),
1914 AMKSTR(d_tx_desc_cfg_err),
1915 AMKSTR(d_tx_data_underrun),
1916 AMKSTR(d_tx_delim_underrun),
1917
1918 "d_rx_decrypt_crc_err",
1919 "d_rx_phy_err",
1920 "d_rx_mic_err",
1921 "d_rx_pre_delim_crc_err",
1922 "d_rx_post_delim_crc_err",
1923 "d_rx_decrypt_busy_err",
1924
1925 "d_rx_phyerr_radar",
1926 "d_rx_phyerr_ofdm_timing",
1927 "d_rx_phyerr_cck_timing",
1928
1929};
1930#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
1931
1932static void ath9k_get_et_strings(struct ieee80211_hw *hw,
1933 struct ieee80211_vif *vif,
1934 u32 sset, u8 *data)
1935{
1936 if (sset == ETH_SS_STATS)
1937 memcpy(data, *ath9k_gstrings_stats,
1938 sizeof(ath9k_gstrings_stats));
1939}
1940
1941static int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
1942 struct ieee80211_vif *vif, int sset)
1943{
1944 if (sset == ETH_SS_STATS)
1945 return ATH9K_SSTATS_LEN;
1946 return 0;
1947}
1948
1949#define PR_QNUM(_n) (sc->tx.txq_map[_n]->axq_qnum)
1950#define AWDATA(elem) \
1951 do { \
1952 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem; \
1953 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem; \
1954 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem; \
1955 data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem; \
1956 } while (0)
1957
1958#define AWDATA_RX(elem) \
1959 do { \
1960 data[i++] = sc->debug.stats.rxstats.elem; \
1961 } while (0)
1962
1963static void ath9k_get_et_stats(struct ieee80211_hw *hw,
1964 struct ieee80211_vif *vif,
1965 struct ethtool_stats *stats, u64 *data)
1966{
1967 struct ath_softc *sc = hw->priv;
1968 int i = 0;
1969
1970 data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_pkts_all +
1971 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_pkts_all +
1972 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_pkts_all +
1973 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_pkts_all);
1974 data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_bytes_all +
1975 sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_bytes_all +
1976 sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_bytes_all +
1977 sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_bytes_all);
1978 AWDATA_RX(rx_pkts_all);
1979 AWDATA_RX(rx_bytes_all);
1980
1981 AWDATA(tx_pkts_all);
1982 AWDATA(tx_bytes_all);
1983 AWDATA(queued);
1984 AWDATA(completed);
1985 AWDATA(xretries);
1986 AWDATA(a_aggr);
1987 AWDATA(a_queued_hw);
1988 AWDATA(a_queued_sw);
1989 AWDATA(a_completed);
1990 AWDATA(a_retries);
1991 AWDATA(a_xretries);
1992 AWDATA(fifo_underrun);
1993 AWDATA(xtxop);
1994 AWDATA(timer_exp);
1995 AWDATA(desc_cfg_err);
1996 AWDATA(data_underrun);
1997 AWDATA(delim_underrun);
1998
1999 AWDATA_RX(decrypt_crc_err);
2000 AWDATA_RX(phy_err);
2001 AWDATA_RX(mic_err);
2002 AWDATA_RX(pre_delim_crc_err);
2003 AWDATA_RX(post_delim_crc_err);
2004 AWDATA_RX(decrypt_busy_err);
2005
2006 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
2007 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
2008 AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
2009
2010 WARN_ON(i != ATH9K_SSTATS_LEN);
2011}
2012
2013/* End of ethtool get-stats functions */
2014
2015#endif
2016
2017
2018#ifdef CONFIG_PM_SLEEP 1886#ifdef CONFIG_PM_SLEEP
2019 1887
2020static void ath9k_wow_map_triggers(struct ath_softc *sc, 1888static void ath9k_wow_map_triggers(struct ath_softc *sc,
@@ -2408,7 +2276,12 @@ struct ieee80211_ops ath9k_ops = {
2408 2276
2409#ifdef CONFIG_ATH9K_DEBUGFS 2277#ifdef CONFIG_ATH9K_DEBUGFS
2410 .get_et_sset_count = ath9k_get_et_sset_count, 2278 .get_et_sset_count = ath9k_get_et_sset_count,
2411 .get_et_stats = ath9k_get_et_stats, 2279 .get_et_stats = ath9k_get_et_stats,
2412 .get_et_strings = ath9k_get_et_strings, 2280 .get_et_strings = ath9k_get_et_strings,
2281#endif
2282
2283#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
2284 .sta_add_debugfs = ath9k_sta_add_debugfs,
2285 .sta_remove_debugfs = ath9k_sta_remove_debugfs,
2413#endif 2286#endif
2414}; 2287};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index ec2d7c807567..5c02702f21e7 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -43,6 +43,7 @@ static bool ath_mci_add_profile(struct ath_common *common,
43 struct ath_mci_profile_info *info) 43 struct ath_mci_profile_info *info)
44{ 44{
45 struct ath_mci_profile_info *entry; 45 struct ath_mci_profile_info *entry;
46 u8 voice_priority[] = { 110, 110, 110, 112, 110, 110, 114, 116, 118 };
46 47
47 if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) && 48 if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
48 (info->type == MCI_GPM_COEX_PROFILE_VOICE)) 49 (info->type == MCI_GPM_COEX_PROFILE_VOICE))
@@ -59,6 +60,12 @@ static bool ath_mci_add_profile(struct ath_common *common,
59 memcpy(entry, info, 10); 60 memcpy(entry, info, 10);
60 INC_PROF(mci, info); 61 INC_PROF(mci, info);
61 list_add_tail(&entry->list, &mci->info); 62 list_add_tail(&entry->list, &mci->info);
63 if (info->type == MCI_GPM_COEX_PROFILE_VOICE) {
64 if (info->voice_type < sizeof(voice_priority))
65 mci->voice_priority = voice_priority[info->voice_type];
66 else
67 mci->voice_priority = 110;
68 }
62 69
63 return true; 70 return true;
64} 71}
@@ -150,7 +157,7 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
150 * For single PAN/FTP profile, allocate 35% for BT 157 * For single PAN/FTP profile, allocate 35% for BT
151 * to improve WLAN throughput. 158 * to improve WLAN throughput.
152 */ 159 */
153 btcoex->duty_cycle = 35; 160 btcoex->duty_cycle = AR_SREV_9565(sc->sc_ah) ? 40 : 35;
154 btcoex->btcoex_period = 53; 161 btcoex->btcoex_period = 53;
155 ath_dbg(common, MCI, 162 ath_dbg(common, MCI,
156 "Single PAN/FTP bt period %d ms dutycycle %d\n", 163 "Single PAN/FTP bt period %d ms dutycycle %d\n",
@@ -200,23 +207,6 @@ skip_tuning:
200 ath9k_btcoex_timer_resume(sc); 207 ath9k_btcoex_timer_resume(sc);
201} 208}
202 209
203static void ath_mci_wait_btcal_done(struct ath_softc *sc)
204{
205 struct ath_hw *ah = sc->sc_ah;
206
207 /* Stop tx & rx */
208 ieee80211_stop_queues(sc->hw);
209 ath_stoprecv(sc);
210 ath_drain_all_txq(sc, false);
211
212 /* Wait for cal done */
213 ar9003_mci_start_reset(ah, ah->curchan);
214
215 /* Resume tx & rx */
216 ath_startrecv(sc);
217 ieee80211_wake_queues(sc->hw);
218}
219
220static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) 210static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
221{ 211{
222 struct ath_hw *ah = sc->sc_ah; 212 struct ath_hw *ah = sc->sc_ah;
@@ -228,7 +218,7 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
228 case MCI_GPM_BT_CAL_REQ: 218 case MCI_GPM_BT_CAL_REQ:
229 if (mci_hw->bt_state == MCI_BT_AWAKE) { 219 if (mci_hw->bt_state == MCI_BT_AWAKE) {
230 mci_hw->bt_state = MCI_BT_CAL_START; 220 mci_hw->bt_state = MCI_BT_CAL_START;
231 ath_mci_wait_btcal_done(sc); 221 ath9k_queue_reset(sc, RESET_TYPE_MCI);
232 } 222 }
233 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state); 223 ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
234 break; 224 break;
@@ -250,6 +240,58 @@ static void ath9k_mci_work(struct work_struct *work)
250 ath_mci_update_scheme(sc); 240 ath_mci_update_scheme(sc);
251} 241}
252 242
243static void ath_mci_update_stomp_txprio(u8 cur_txprio, u8 *stomp_prio)
244{
245 if (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_NONE])
246 stomp_prio[ATH_BTCOEX_STOMP_NONE] = cur_txprio;
247
248 if (cur_txprio > stomp_prio[ATH_BTCOEX_STOMP_ALL])
249 stomp_prio[ATH_BTCOEX_STOMP_ALL] = cur_txprio;
250
251 if ((cur_txprio > ATH_MCI_HI_PRIO) &&
252 (cur_txprio < stomp_prio[ATH_BTCOEX_STOMP_LOW]))
253 stomp_prio[ATH_BTCOEX_STOMP_LOW] = cur_txprio;
254}
255
256static void ath_mci_set_concur_txprio(struct ath_softc *sc)
257{
258 struct ath_btcoex *btcoex = &sc->btcoex;
259 struct ath_mci_profile *mci = &btcoex->mci;
260 u8 stomp_txprio[ATH_BTCOEX_STOMP_MAX];
261
262 memset(stomp_txprio, 0, sizeof(stomp_txprio));
263 if (mci->num_mgmt) {
264 stomp_txprio[ATH_BTCOEX_STOMP_ALL] = ATH_MCI_INQUIRY_PRIO;
265 if (!mci->num_pan && !mci->num_other_acl)
266 stomp_txprio[ATH_BTCOEX_STOMP_NONE] =
267 ATH_MCI_INQUIRY_PRIO;
268 } else {
269 u8 prof_prio[] = { 50, 90, 94, 52 };/* RFCOMM, A2DP, HID, PAN */
270
271 stomp_txprio[ATH_BTCOEX_STOMP_LOW] =
272 stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0xff;
273
274 if (mci->num_sco)
275 ath_mci_update_stomp_txprio(mci->voice_priority,
276 stomp_txprio);
277 if (mci->num_other_acl)
278 ath_mci_update_stomp_txprio(prof_prio[0], stomp_txprio);
279 if (mci->num_a2dp)
280 ath_mci_update_stomp_txprio(prof_prio[1], stomp_txprio);
281 if (mci->num_hid)
282 ath_mci_update_stomp_txprio(prof_prio[2], stomp_txprio);
283 if (mci->num_pan)
284 ath_mci_update_stomp_txprio(prof_prio[3], stomp_txprio);
285
286 if (stomp_txprio[ATH_BTCOEX_STOMP_NONE] == 0xff)
287 stomp_txprio[ATH_BTCOEX_STOMP_NONE] = 0;
288
289 if (stomp_txprio[ATH_BTCOEX_STOMP_LOW] == 0xff)
290 stomp_txprio[ATH_BTCOEX_STOMP_LOW] = 0;
291 }
292 ath9k_hw_btcoex_set_concur_txprio(sc->sc_ah, stomp_txprio);
293}
294
253static u8 ath_mci_process_profile(struct ath_softc *sc, 295static u8 ath_mci_process_profile(struct ath_softc *sc,
254 struct ath_mci_profile_info *info) 296 struct ath_mci_profile_info *info)
255{ 297{
@@ -281,6 +323,7 @@ static u8 ath_mci_process_profile(struct ath_softc *sc,
281 } else 323 } else
282 ath_mci_del_profile(common, mci, entry); 324 ath_mci_del_profile(common, mci, entry);
283 325
326 ath_mci_set_concur_txprio(sc);
284 return 1; 327 return 1;
285} 328}
286 329
@@ -314,6 +357,7 @@ static u8 ath_mci_process_status(struct ath_softc *sc,
314 mci->num_mgmt++; 357 mci->num_mgmt++;
315 } while (++i < ATH_MCI_MAX_PROFILE); 358 } while (++i < ATH_MCI_MAX_PROFILE);
316 359
360 ath_mci_set_concur_txprio(sc);
317 if (old_num_mgmt != mci->num_mgmt) 361 if (old_num_mgmt != mci->num_mgmt)
318 return 1; 362 return 1;
319 363
@@ -518,6 +562,8 @@ void ath_mci_intr(struct ath_softc *sc)
518 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM; 562 mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
519 563
520 while (more_data == MCI_GPM_MORE) { 564 while (more_data == MCI_GPM_MORE) {
565 if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
566 return;
521 567
522 pgpm = mci->gpm_buf.bf_addr; 568 pgpm = mci->gpm_buf.bf_addr;
523 offset = ar9003_mci_get_next_gpm_offset(ah, false, 569 offset = ar9003_mci_get_next_gpm_offset(ah, false,
@@ -600,3 +646,130 @@ void ath_mci_enable(struct ath_softc *sc)
600 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) 646 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
601 sc->sc_ah->imask |= ATH9K_INT_MCI; 647 sc->sc_ah->imask |= ATH9K_INT_MCI;
602} 648}
649
650void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
651{
652 struct ath_hw *ah = sc->sc_ah;
653 struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
654 struct ath9k_channel *chan = ah->curchan;
655 u32 channelmap[] = {0x00000000, 0xffff0000, 0xffffffff, 0x7fffffff};
656 int i;
657 s16 chan_start, chan_end;
658 u16 wlan_chan;
659
660 if (!chan || !IS_CHAN_2GHZ(chan))
661 return;
662
663 if (allow_all)
664 goto send_wlan_chan;
665
666 wlan_chan = chan->channel - 2402;
667
668 chan_start = wlan_chan - 10;
669 chan_end = wlan_chan + 10;
670
671 if (chan->chanmode == CHANNEL_G_HT40PLUS)
672 chan_end += 20;
673 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
674 chan_start -= 20;
675
676 /* adjust side band */
677 chan_start -= 7;
678 chan_end += 7;
679
680 if (chan_start <= 0)
681 chan_start = 0;
682 if (chan_end >= ATH_MCI_NUM_BT_CHANNELS)
683 chan_end = ATH_MCI_NUM_BT_CHANNELS - 1;
684
685 ath_dbg(ath9k_hw_common(ah), MCI,
686 "WLAN current channel %d mask BT channel %d - %d\n",
687 wlan_chan, chan_start, chan_end);
688
689 for (i = chan_start; i < chan_end; i++)
690 MCI_GPM_CLR_CHANNEL_BIT(&channelmap, i);
691
692send_wlan_chan:
693 /* update and send wlan channels info to BT */
694 for (i = 0; i < 4; i++)
695 mci->wlan_channels[i] = channelmap[i];
696 ar9003_mci_send_wlan_channels(ah);
697 ar9003_mci_state(ah, MCI_STATE_SEND_VERSION_QUERY);
698}
699
700void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
701 bool concur_tx)
702{
703 struct ath_hw *ah = sc->sc_ah;
704 struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
705 bool old_concur_tx = mci_hw->concur_tx;
706
707 if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX)) {
708 mci_hw->concur_tx = false;
709 return;
710 }
711
712 if (!IS_CHAN_2GHZ(ah->curchan))
713 return;
714
715 if (setchannel) {
716 struct ath9k_hw_cal_data *caldata = &sc->caldata;
717 if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
718 (ah->curchan->channel > caldata->channel) &&
719 (ah->curchan->channel <= caldata->channel + 20))
720 return;
721 if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
722 (ah->curchan->channel < caldata->channel) &&
723 (ah->curchan->channel >= caldata->channel - 20))
724 return;
725 mci_hw->concur_tx = false;
726 } else
727 mci_hw->concur_tx = concur_tx;
728
729 if (old_concur_tx != mci_hw->concur_tx)
730 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
731}
732
733static void ath9k_mci_stomp_audio(struct ath_softc *sc)
734{
735 struct ath_hw *ah = sc->sc_ah;
736 struct ath_btcoex *btcoex = &sc->btcoex;
737 struct ath_mci_profile *mci = &btcoex->mci;
738
739 if (!mci->num_sco && !mci->num_a2dp)
740 return;
741
742 if (ah->stats.avgbrssi > 25) {
743 btcoex->stomp_audio = 0;
744 return;
745 }
746
747 btcoex->stomp_audio++;
748}
749void ath9k_mci_update_rssi(struct ath_softc *sc)
750{
751 struct ath_hw *ah = sc->sc_ah;
752 struct ath_btcoex *btcoex = &sc->btcoex;
753 struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
754
755 ath9k_mci_stomp_audio(sc);
756
757 if (!(mci_hw->config & ATH_MCI_CONFIG_CONCUR_TX))
758 return;
759
760 if (ah->stats.avgbrssi >= 40) {
761 if (btcoex->rssi_count < 0)
762 btcoex->rssi_count = 0;
763 if (++btcoex->rssi_count >= ATH_MCI_CONCUR_TX_SWITCH) {
764 btcoex->rssi_count = 0;
765 ath9k_mci_set_txpower(sc, false, true);
766 }
767 } else {
768 if (btcoex->rssi_count > 0)
769 btcoex->rssi_count = 0;
770 if (--btcoex->rssi_count <= -ATH_MCI_CONCUR_TX_SWITCH) {
771 btcoex->rssi_count = 0;
772 ath9k_mci_set_txpower(sc, false, false);
773 }
774 }
775}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index fc14eea034eb..06958837620c 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -32,6 +32,27 @@
32#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\ 32#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\
33 ATH_MCI_MAX_SCO_PROFILE) 33 ATH_MCI_MAX_SCO_PROFILE)
34 34
35#define ATH_MCI_INQUIRY_PRIO 62
36#define ATH_MCI_HI_PRIO 60
37#define ATH_MCI_NUM_BT_CHANNELS 79
38#define ATH_MCI_CONCUR_TX_SWITCH 5
39
40#define MCI_GPM_SET_CHANNEL_BIT(_p_gpm, _bt_chan) \
41 do { \
42 if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) { \
43 *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
44 (_bt_chan / 8)) |= (1 << (_bt_chan & 7)); \
45 } \
46 } while (0)
47
48#define MCI_GPM_CLR_CHANNEL_BIT(_p_gpm, _bt_chan) \
49 do { \
50 if (_bt_chan < ATH_MCI_NUM_BT_CHANNELS) { \
51 *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_CHANNEL_MAP + \
52 (_bt_chan / 8)) &= ~(1 << (_bt_chan & 7));\
53 } \
54 } while (0)
55
35#define INC_PROF(_mci, _info) do { \ 56#define INC_PROF(_mci, _info) do { \
36 switch (_info->type) { \ 57 switch (_info->type) { \
37 case MCI_GPM_COEX_PROFILE_RFCOMM:\ 58 case MCI_GPM_COEX_PROFILE_RFCOMM:\
@@ -49,6 +70,7 @@
49 _mci->num_pan++; \ 70 _mci->num_pan++; \
50 break; \ 71 break; \
51 case MCI_GPM_COEX_PROFILE_VOICE: \ 72 case MCI_GPM_COEX_PROFILE_VOICE: \
73 case MCI_GPM_COEX_PROFILE_A2DPVO:\
52 _mci->num_sco++; \ 74 _mci->num_sco++; \
53 break; \ 75 break; \
54 default: \ 76 default: \
@@ -73,6 +95,7 @@
73 _mci->num_pan--; \ 95 _mci->num_pan--; \
74 break; \ 96 break; \
75 case MCI_GPM_COEX_PROFILE_VOICE: \ 97 case MCI_GPM_COEX_PROFILE_VOICE: \
98 case MCI_GPM_COEX_PROFILE_A2DPVO:\
76 _mci->num_sco--; \ 99 _mci->num_sco--; \
77 break; \ 100 break; \
78 default: \ 101 default: \
@@ -113,6 +136,7 @@ struct ath_mci_profile {
113 u8 num_pan; 136 u8 num_pan;
114 u8 num_other_acl; 137 u8 num_other_acl;
115 u8 num_bdr; 138 u8 num_bdr;
139 u8 voice_priority;
116}; 140};
117 141
118struct ath_mci_buf { 142struct ath_mci_buf {
@@ -130,13 +154,25 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci);
130int ath_mci_setup(struct ath_softc *sc); 154int ath_mci_setup(struct ath_softc *sc);
131void ath_mci_cleanup(struct ath_softc *sc); 155void ath_mci_cleanup(struct ath_softc *sc);
132void ath_mci_intr(struct ath_softc *sc); 156void ath_mci_intr(struct ath_softc *sc);
157void ath9k_mci_update_rssi(struct ath_softc *sc);
133 158
134#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 159#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
135void ath_mci_enable(struct ath_softc *sc); 160void ath_mci_enable(struct ath_softc *sc);
161void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all);
162void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
163 bool concur_tx);
136#else 164#else
137static inline void ath_mci_enable(struct ath_softc *sc) 165static inline void ath_mci_enable(struct ath_softc *sc)
138{ 166{
139} 167}
168static inline void ath9k_mci_update_wlan_channels(struct ath_softc *sc,
169 bool allow_all)
170{
171}
172static inline void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
173 bool concur_tx)
174{
175}
140#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 176#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
141 177
142#endif /* MCI_H*/ 178#endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f088f4bf9a26..8e9b826f878b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -96,17 +96,6 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
96 return true; 96 return true;
97} 97}
98 98
99static void ath_pci_extn_synch_enable(struct ath_common *common)
100{
101 struct ath_softc *sc = (struct ath_softc *) common->priv;
102 struct pci_dev *pdev = to_pci_dev(sc->dev);
103 u8 lnkctl;
104
105 pci_read_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, &lnkctl);
106 lnkctl |= PCI_EXP_LNKCTL_ES;
107 pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
108}
109
110/* Need to be called after we discover btcoex capabilities */ 99/* Need to be called after we discover btcoex capabilities */
111static void ath_pci_aspm_init(struct ath_common *common) 100static void ath_pci_aspm_init(struct ath_common *common)
112{ 101{
@@ -153,7 +142,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = {
153 .ath_bus_type = ATH_PCI, 142 .ath_bus_type = ATH_PCI,
154 .read_cachesize = ath_pci_read_cachesize, 143 .read_cachesize = ath_pci_read_cachesize,
155 .eeprom_read = ath_pci_eeprom_read, 144 .eeprom_read = ath_pci_eeprom_read,
156 .extn_synch_en = ath_pci_extn_synch_enable,
157 .aspm_init = ath_pci_aspm_init, 145 .aspm_init = ath_pci_aspm_init,
158}; 146};
159 147
@@ -299,7 +287,7 @@ static void ath_pci_remove(struct pci_dev *pdev)
299 pci_release_region(pdev, 0); 287 pci_release_region(pdev, 0);
300} 288}
301 289
302#ifdef CONFIG_PM 290#ifdef CONFIG_PM_SLEEP
303 291
304static int ath_pci_suspend(struct device *device) 292static int ath_pci_suspend(struct device *device)
305{ 293{
@@ -345,22 +333,15 @@ static int ath_pci_resume(struct device *device)
345 return 0; 333 return 0;
346} 334}
347 335
348static const struct dev_pm_ops ath9k_pm_ops = { 336static SIMPLE_DEV_PM_OPS(ath9k_pm_ops, ath_pci_suspend, ath_pci_resume);
349 .suspend = ath_pci_suspend,
350 .resume = ath_pci_resume,
351 .freeze = ath_pci_suspend,
352 .thaw = ath_pci_resume,
353 .poweroff = ath_pci_suspend,
354 .restore = ath_pci_resume,
355};
356 337
357#define ATH9K_PM_OPS (&ath9k_pm_ops) 338#define ATH9K_PM_OPS (&ath9k_pm_ops)
358 339
359#else /* !CONFIG_PM */ 340#else /* !CONFIG_PM_SLEEP */
360 341
361#define ATH9K_PM_OPS NULL 342#define ATH9K_PM_OPS NULL
362 343
363#endif /* !CONFIG_PM */ 344#endif /* !CONFIG_PM_SLEEP */
364 345
365 346
366MODULE_DEVICE_TABLE(pci, ath_pci_id_table); 347MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 27ed80b54881..714558d1ba78 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -982,16 +982,6 @@ static void ath_rc_update_per(struct ath_softc *sc,
982 } 982 }
983} 983}
984 984
985static void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
986 int xretries, int retries, u8 per)
987{
988 struct ath_rc_stats *stats = &rc->rcstats[rix];
989
990 stats->xretries += xretries;
991 stats->retries += retries;
992 stats->per = per;
993}
994
995static void ath_rc_update_ht(struct ath_softc *sc, 985static void ath_rc_update_ht(struct ath_softc *sc,
996 struct ath_rate_priv *ath_rc_priv, 986 struct ath_rate_priv *ath_rc_priv,
997 struct ieee80211_tx_info *tx_info, 987 struct ieee80211_tx_info *tx_info,
@@ -1065,14 +1055,6 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1065 1055
1066} 1056}
1067 1057
1068static void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
1069{
1070 struct ath_rc_stats *stats;
1071
1072 stats = &rc->rcstats[final_rate];
1073 stats->success++;
1074}
1075
1076static void ath_rc_tx_status(struct ath_softc *sc, 1058static void ath_rc_tx_status(struct ath_softc *sc,
1077 struct ath_rate_priv *ath_rc_priv, 1059 struct ath_rate_priv *ath_rc_priv,
1078 struct sk_buff *skb) 1060 struct sk_buff *skb)
@@ -1350,7 +1332,25 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1350 } 1332 }
1351} 1333}
1352 1334
1353#ifdef CONFIG_ATH9K_DEBUGFS 1335#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
1336
1337void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
1338{
1339 struct ath_rc_stats *stats;
1340
1341 stats = &rc->rcstats[final_rate];
1342 stats->success++;
1343}
1344
1345void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
1346 int xretries, int retries, u8 per)
1347{
1348 struct ath_rc_stats *stats = &rc->rcstats[rix];
1349
1350 stats->xretries += xretries;
1351 stats->retries += retries;
1352 stats->per = per;
1353}
1354 1354
1355static ssize_t read_file_rcstat(struct file *file, char __user *user_buf, 1355static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
1356 size_t count, loff_t *ppos) 1356 size_t count, loff_t *ppos)
@@ -1428,10 +1428,17 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
1428 struct dentry *dir) 1428 struct dentry *dir)
1429{ 1429{
1430 struct ath_rate_priv *rc = priv_sta; 1430 struct ath_rate_priv *rc = priv_sta;
1431 debugfs_create_file("rc_stats", S_IRUGO, dir, rc, &fops_rcstat); 1431 rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
1432 dir, rc, &fops_rcstat);
1433}
1434
1435static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
1436{
1437 struct ath_rate_priv *rc = priv_sta;
1438 debugfs_remove(rc->debugfs_rcstats);
1432} 1439}
1433 1440
1434#endif /* CONFIG_ATH9K_DEBUGFS */ 1441#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
1435 1442
1436static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 1443static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
1437{ 1444{
@@ -1476,8 +1483,10 @@ static struct rate_control_ops ath_rate_ops = {
1476 .free = ath_rate_free, 1483 .free = ath_rate_free,
1477 .alloc_sta = ath_rate_alloc_sta, 1484 .alloc_sta = ath_rate_alloc_sta,
1478 .free_sta = ath_rate_free_sta, 1485 .free_sta = ath_rate_free_sta,
1479#ifdef CONFIG_ATH9K_DEBUGFS 1486
1487#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
1480 .add_sta_debugfs = ath_rate_add_sta_debugfs, 1488 .add_sta_debugfs = ath_rate_add_sta_debugfs,
1489 .remove_sta_debugfs = ath_rate_remove_sta_debugfs,
1481#endif 1490#endif
1482}; 1491};
1483 1492
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 268e67dc5fb2..267dbfcfaa96 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -211,10 +211,26 @@ struct ath_rate_priv {
211 struct ath_rateset neg_ht_rates; 211 struct ath_rateset neg_ht_rates;
212 const struct ath_rate_table *rate_table; 212 const struct ath_rate_table *rate_table;
213 213
214#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
214 struct dentry *debugfs_rcstats; 215 struct dentry *debugfs_rcstats;
215 struct ath_rc_stats rcstats[RATE_TABLE_SIZE]; 216 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
217#endif
216}; 218};
217 219
220#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
221void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
222void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
223 int xretries, int retries, u8 per);
224#else
225static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
226{
227}
228static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
229 int xretries, int retries, u8 per)
230{
231}
232#endif
233
218#ifdef CONFIG_ATH9K_RATE_CONTROL 234#ifdef CONFIG_ATH9K_RATE_CONTROL
219int ath_rate_control_register(void); 235int ath_rate_control_register(void);
220void ath_rate_control_unregister(void); 236void ath_rate_control_unregister(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 83d16e7ed272..d4df98a938bf 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -976,7 +976,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common,
976 rx_status->freq = hw->conf.channel->center_freq; 976 rx_status->freq = hw->conf.channel->center_freq;
977 rx_status->signal = ah->noise + rx_stats->rs_rssi; 977 rx_status->signal = ah->noise + rx_stats->rs_rssi;
978 rx_status->antenna = rx_stats->rs_antenna; 978 rx_status->antenna = rx_stats->rs_antenna;
979 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 979 rx_status->flag |= RX_FLAG_MACTIME_END;
980 if (rx_stats->rs_moreaggr) 980 if (rx_stats->rs_moreaggr)
981 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 981 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
982 982
@@ -1105,7 +1105,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1105 else 1105 else
1106 rs.is_mybeacon = false; 1106 rs.is_mybeacon = false;
1107 1107
1108 sc->rx.num_pkts++; 1108 if (ieee80211_is_data_present(hdr->frame_control) &&
1109 !ieee80211_is_qos_nullfunc(hdr->frame_control))
1110 sc->rx.num_pkts++;
1111
1109 ath_debug_stat_rx(sc, &rs); 1112 ath_debug_stat_rx(sc, &rs);
1110 1113
1111 /* 1114 /*
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 4e6760f8596d..ad3c82c09177 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -907,10 +907,6 @@
907 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \ 907 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
908 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20)) 908 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9462_20))
909 909
910#define AR_SREV_9462_20_OR_LATER(_ah) \
911 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
912 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
913
914#define AR_SREV_9565(_ah) \ 910#define AR_SREV_9565(_ah) \
915 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565)) 911 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9565))
916 912
@@ -2315,6 +2311,8 @@ enum {
2315#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2)) 2311#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
2316#define AR_BTCOEX_WL_LNA 0x1940 2312#define AR_BTCOEX_WL_LNA 0x1940
2317#define AR_BTCOEX_RFGAIN_CTRL 0x1944 2313#define AR_BTCOEX_RFGAIN_CTRL 0x1944
2314#define AR_BTCOEX_WL_LNA_TIMEOUT 0x003FFFFF
2315#define AR_BTCOEX_WL_LNA_TIMEOUT_S 0
2318 2316
2319#define AR_BTCOEX_CTRL2 0x1948 2317#define AR_BTCOEX_CTRL2 0x1948
2320#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800 2318#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
@@ -2360,4 +2358,11 @@ enum {
2360#define AR_GLB_SWREG_DISCONT_MODE 0x2002c 2358#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
2361#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3 2359#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
2362 2360
2361#define AR_MCI_MISC 0x1a74
2362#define AR_MCI_MISC_HW_FIX_EN 0x00000001
2363#define AR_MCI_MISC_HW_FIX_EN_S 0
2364#define AR_MCI_DBG_CNT_CTRL 0x1a78
2365#define AR_MCI_DBG_CNT_CTRL_ENABLE 0x00000001
2366#define AR_MCI_DBG_CNT_CTRL_ENABLE_S 0
2367
2363#endif 2368#endif
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
index a483d518758c..9f8563091bea 100644
--- a/drivers/net/wireless/ath/ath9k/wow.c
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -118,7 +118,7 @@ static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
118 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]); 118 (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
119 data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]); 119 data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
120 120
121 if (AR_SREV_9462_20_OR_LATER(ah)) { 121 if (AR_SREV_9462_20(ah)) {
122 /* AR9462 2.0 has an extra descriptor word (time based 122 /* AR9462 2.0 has an extra descriptor word (time based
123 * discard) compared to other chips */ 123 * discard) compared to other chips */
124 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0); 124 REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 741918a2027b..90e48a0fafe5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -312,7 +312,6 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
312 } 312 }
313 313
314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 314 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
315 bf->bf_next = NULL;
316 list_del(&bf->list); 315 list_del(&bf->list);
317 316
318 spin_unlock_bh(&sc->tx.txbuflock); 317 spin_unlock_bh(&sc->tx.txbuflock);
@@ -1263,7 +1262,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
1263 int tidno; 1262 int tidno;
1264 1263
1265 for (tidno = 0, tid = &an->tid[tidno]; 1264 for (tidno = 0, tid = &an->tid[tidno];
1266 tidno < WME_NUM_TID; tidno++, tid++) { 1265 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1267 1266
1268 if (!tid->sched) 1267 if (!tid->sched)
1269 continue; 1268 continue;
@@ -1297,7 +1296,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
1297 int tidno; 1296 int tidno;
1298 1297
1299 for (tidno = 0, tid = &an->tid[tidno]; 1298 for (tidno = 0, tid = &an->tid[tidno];
1300 tidno < WME_NUM_TID; tidno++, tid++) { 1299 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
1301 1300
1302 ac = tid->ac; 1301 ac = tid->ac;
1303 txq = ac->txq; 1302 txq = ac->txq;
@@ -1354,10 +1353,10 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1354 struct ath_hw *ah = sc->sc_ah; 1353 struct ath_hw *ah = sc->sc_ah;
1355 struct ath9k_tx_queue_info qi; 1354 struct ath9k_tx_queue_info qi;
1356 static const int subtype_txq_to_hwq[] = { 1355 static const int subtype_txq_to_hwq[] = {
1357 [WME_AC_BE] = ATH_TXQ_AC_BE, 1356 [IEEE80211_AC_BE] = ATH_TXQ_AC_BE,
1358 [WME_AC_BK] = ATH_TXQ_AC_BK, 1357 [IEEE80211_AC_BK] = ATH_TXQ_AC_BK,
1359 [WME_AC_VI] = ATH_TXQ_AC_VI, 1358 [IEEE80211_AC_VI] = ATH_TXQ_AC_VI,
1360 [WME_AC_VO] = ATH_TXQ_AC_VO, 1359 [IEEE80211_AC_VO] = ATH_TXQ_AC_VO,
1361 }; 1360 };
1362 int axq_qnum, i; 1361 int axq_qnum, i;
1363 1362
@@ -2319,6 +2318,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2319 2318
2320 ath_txq_lock(sc, txq); 2319 ath_txq_lock(sc, txq);
2321 2320
2321 TX_STAT_INC(txq->axq_qnum, txprocdesc);
2322
2322 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) { 2323 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2323 ath_txq_unlock(sc, txq); 2324 ath_txq_unlock(sc, txq);
2324 return; 2325 return;
@@ -2446,7 +2447,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2446 int tidno, acno; 2447 int tidno, acno;
2447 2448
2448 for (tidno = 0, tid = &an->tid[tidno]; 2449 for (tidno = 0, tid = &an->tid[tidno];
2449 tidno < WME_NUM_TID; 2450 tidno < IEEE80211_NUM_TIDS;
2450 tidno++, tid++) { 2451 tidno++, tid++) {
2451 tid->an = an; 2452 tid->an = an;
2452 tid->tidno = tidno; 2453 tid->tidno = tidno;
@@ -2464,7 +2465,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2464 } 2465 }
2465 2466
2466 for (acno = 0, ac = &an->ac[acno]; 2467 for (acno = 0, ac = &an->ac[acno];
2467 acno < WME_NUM_AC; acno++, ac++) { 2468 acno < IEEE80211_NUM_ACS; acno++, ac++) {
2468 ac->sched = false; 2469 ac->sched = false;
2469 ac->txq = sc->tx.txq_map[acno]; 2470 ac->txq = sc->tx.txq_map[acno];
2470 INIT_LIST_HEAD(&ac->tid_q); 2471 INIT_LIST_HEAD(&ac->tid_q);
@@ -2479,7 +2480,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2479 int tidno; 2480 int tidno;
2480 2481
2481 for (tidno = 0, tid = &an->tid[tidno]; 2482 for (tidno = 0, tid = &an->tid[tidno];
2482 tidno < WME_NUM_TID; tidno++, tid++) { 2483 tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
2483 2484
2484 ac = tid->ac; 2485 ac = tid->ac;
2485 txq = ac->txq; 2486 txq = ac->txq;
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index 267d5dcf82dc..13a204598766 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -1,6 +1,7 @@
1config CARL9170 1config CARL9170
2 tristate "Linux Community AR9170 802.11n USB support" 2 tristate "Linux Community AR9170 802.11n USB support"
3 depends on USB && MAC80211 && EXPERIMENTAL 3 depends on USB && MAC80211 && EXPERIMENTAL
4 select ATH_COMMON
4 select FW_LOADER 5 select FW_LOADER
5 select CRC32 6 select CRC32
6 help 7 help
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 24ac2876a733..aaebecd19e59 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -28,11 +28,6 @@
28#include "fwcmd.h" 28#include "fwcmd.h"
29#include "version.h" 29#include "version.h"
30 30
31#define MAKE_STR(symbol) #symbol
32#define TO_STR(symbol) MAKE_STR(symbol)
33#define CARL9170FW_API_VER_STR TO_STR(CARL9170FW_API_MAX_VER)
34MODULE_VERSION(CARL9170FW_API_VER_STR ":" CARL9170FW_VERSION_GIT);
35
36static const u8 otus_magic[4] = { OTUS_MAGIC }; 31static const u8 otus_magic[4] = { OTUS_MAGIC };
37 32
38static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4], 33static const void *carl9170_fw_find_desc(struct ar9170 *ar, const u8 descid[4],
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index e3b1b6e87760..24d75ab94f0d 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -343,7 +343,24 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
343 break; 343 break;
344 } 344 }
345 } else { 345 } else {
346 mac_addr = NULL; 346 /*
347 * Enable monitor mode
348 *
349 * rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
350 * sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
351 *
352 * When the hardware is in SNIFFER_PROMISC mode,
353 * it generates spurious ACKs for every incoming
354 * frame. This confuses every peer in the
355 * vicinity and the network throughput will suffer
356 * badly.
357 *
358 * Hence, the hardware will be put into station
359 * mode and just the rx filters are disabled.
360 */
361 cam_mode |= AR9170_MAC_CAM_STA;
362 rx_ctrl |= AR9170_MAC_RX_CTRL_PASS_TO_HOST;
363 mac_addr = common->macaddr;
347 bssid = NULL; 364 bssid = NULL;
348 } 365 }
349 rcu_read_unlock(); 366 rcu_read_unlock();
@@ -355,8 +372,6 @@ int carl9170_set_operating_mode(struct ar9170 *ar)
355 enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE; 372 enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
356 373
357 if (ar->sniffer_enabled) { 374 if (ar->sniffer_enabled) {
358 rx_ctrl |= AR9170_MAC_RX_CTRL_ACK_IN_SNIFFER;
359 sniffer |= AR9170_MAC_SNIFFER_ENABLE_PROMISC;
360 enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE; 375 enc_mode |= AR9170_MAC_ENCRYPTION_RX_SOFTWARE;
361 } 376 }
362 377
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index a0b723078547..4684dd989496 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -164,9 +164,6 @@ void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
164 struct carl9170_rsp *cmd = buf; 164 struct carl9170_rsp *cmd = buf;
165 struct ieee80211_vif *vif; 165 struct ieee80211_vif *vif;
166 166
167 if (carl9170_check_sequence(ar, cmd->hdr.seq))
168 return;
169
170 if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) { 167 if ((cmd->hdr.cmd & CARL9170_RSP_FLAG) != CARL9170_RSP_FLAG) {
171 if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG)) 168 if (!(cmd->hdr.cmd & CARL9170_CMD_ASYNC_FLAG))
172 carl9170_cmd_callback(ar, len, buf); 169 carl9170_cmd_callback(ar, len, buf);
@@ -663,6 +660,35 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
663 return false; 660 return false;
664} 661}
665 662
663static int carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len,
664 struct ieee80211_rx_status *status)
665{
666 struct sk_buff *skb;
667
668 /* (driver) frame trap handler
669 *
670 * Because power-saving mode handing has to be implemented by
671 * the driver/firmware. We have to check each incoming beacon
672 * from the associated AP, if there's new data for us (either
673 * broadcast/multicast or unicast) we have to react quickly.
674 *
675 * So, if you have you want to add additional frame trap
676 * handlers, this would be the perfect place!
677 */
678
679 carl9170_ps_beacon(ar, buf, len);
680
681 carl9170_ba_check(ar, buf, len);
682
683 skb = carl9170_rx_copy_data(buf, len);
684 if (!skb)
685 return -ENOMEM;
686
687 memcpy(IEEE80211_SKB_RXCB(skb), status, sizeof(*status));
688 ieee80211_rx(ar->hw, skb);
689 return 0;
690}
691
666/* 692/*
667 * If the frame alignment is right (or the kernel has 693 * If the frame alignment is right (or the kernel has
668 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there 694 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
@@ -672,14 +698,12 @@ static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms,
672 * mode, and we need to observe the proper ordering, 698 * mode, and we need to observe the proper ordering,
673 * this is non-trivial. 699 * this is non-trivial.
674 */ 700 */
675 701static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
676static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
677{ 702{
678 struct ar9170_rx_head *head; 703 struct ar9170_rx_head *head;
679 struct ar9170_rx_macstatus *mac; 704 struct ar9170_rx_macstatus *mac;
680 struct ar9170_rx_phystatus *phy = NULL; 705 struct ar9170_rx_phystatus *phy = NULL;
681 struct ieee80211_rx_status status; 706 struct ieee80211_rx_status status;
682 struct sk_buff *skb;
683 int mpdu_len; 707 int mpdu_len;
684 u8 mac_status; 708 u8 mac_status;
685 709
@@ -790,19 +814,13 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
790 814
791 if (phy) 815 if (phy)
792 carl9170_rx_phy_status(ar, phy, &status); 816 carl9170_rx_phy_status(ar, phy, &status);
817 else
818 status.flag |= RX_FLAG_NO_SIGNAL_VAL;
793 819
794 carl9170_ps_beacon(ar, buf, mpdu_len); 820 if (carl9170_handle_mpdu(ar, buf, mpdu_len, &status))
795
796 carl9170_ba_check(ar, buf, mpdu_len);
797
798 skb = carl9170_rx_copy_data(buf, mpdu_len);
799 if (!skb)
800 goto drop; 821 goto drop;
801 822
802 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
803 ieee80211_rx(ar->hw, skb);
804 return; 823 return;
805
806drop: 824drop:
807 ar->rx_dropped++; 825 ar->rx_dropped++;
808} 826}
@@ -820,6 +838,9 @@ static void carl9170_rx_untie_cmds(struct ar9170 *ar, const u8 *respbuf,
820 if (unlikely(i > resplen)) 838 if (unlikely(i > resplen))
821 break; 839 break;
822 840
841 if (carl9170_check_sequence(ar, cmd->hdr.seq))
842 break;
843
823 carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4); 844 carl9170_handle_command_response(ar, cmd, cmd->hdr.len + 4);
824 } 845 }
825 846
@@ -851,7 +872,7 @@ static void __carl9170_rx(struct ar9170 *ar, u8 *buf, unsigned int len)
851 if (i == 12) 872 if (i == 12)
852 carl9170_rx_untie_cmds(ar, buf, len); 873 carl9170_rx_untie_cmds(ar, buf, len);
853 else 874 else
854 carl9170_handle_mpdu(ar, buf, len); 875 carl9170_rx_untie_data(ar, buf, len);
855} 876}
856 877
857static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len) 878static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 84377cf580e0..ef4ec0da6e49 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -1485,6 +1485,13 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
1485 } 1485 }
1486 1486
1487 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1487 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1488 /* to static code analyzers and reviewers:
1489 * mac80211 guarantees that a valid "sta"
1490 * reference is present, if a frame is to
1491 * be part of an ampdu. Hence any extra
1492 * sta == NULL checks are redundant in this
1493 * special case.
1494 */
1488 run = carl9170_tx_ampdu_queue(ar, sta, skb); 1495 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1489 if (run) 1496 if (run)
1490 carl9170_tx_ampdu(ar); 1497 carl9170_tx_ampdu(ar);
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 888152ce3eca..307bc0ddff99 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -295,6 +295,13 @@ static void carl9170_usb_rx_irq_complete(struct urb *urb)
295 goto resubmit; 295 goto resubmit;
296 } 296 }
297 297
298 /*
299 * While the carl9170 firmware does not use this EP, the
300 * firmware loader in the EEPROM unfortunately does.
301 * Therefore we need to be ready to handle out-of-band
302 * responses and traps in case the firmware crashed and
303 * the loader took over again.
304 */
298 carl9170_handle_command_response(ar, urb->transfer_buffer, 305 carl9170_handle_command_response(ar, urb->transfer_buffer,
299 urb->actual_length); 306 urb->actual_length);
300 307
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 19befb331073..39e8a590d7fc 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -20,8 +20,8 @@
20#include "ath.h" 20#include "ath.h"
21#include "reg.h" 21#include "reg.h"
22 22
23#define REG_READ (common->ops->read) 23#define REG_READ (common->ops->read)
24#define REG_WRITE (common->ops->write) 24#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
25 25
26/** 26/**
27 * ath_hw_set_bssid_mask - filter out bssids we listen 27 * ath_hw_set_bssid_mask - filter out bssids we listen
@@ -119,8 +119,8 @@ void ath_hw_setbssidmask(struct ath_common *common)
119{ 119{
120 void *ah = common->ah; 120 void *ah = common->ah;
121 121
122 REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL); 122 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(common->bssidmask));
123 REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU); 123 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(common->bssidmask + 4));
124} 124}
125EXPORT_SYMBOL(ath_hw_setbssidmask); 125EXPORT_SYMBOL(ath_hw_setbssidmask);
126 126
@@ -139,7 +139,7 @@ void ath_hw_cycle_counters_update(struct ath_common *common)
139 void *ah = common->ah; 139 void *ah = common->ah;
140 140
141 /* freeze */ 141 /* freeze */
142 REG_WRITE(ah, AR_MIBC_FMC, AR_MIBC); 142 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
143 143
144 /* read */ 144 /* read */
145 cycles = REG_READ(ah, AR_CCCNT); 145 cycles = REG_READ(ah, AR_CCCNT);
@@ -148,13 +148,13 @@ void ath_hw_cycle_counters_update(struct ath_common *common)
148 tx = REG_READ(ah, AR_TFCNT); 148 tx = REG_READ(ah, AR_TFCNT);
149 149
150 /* clear */ 150 /* clear */
151 REG_WRITE(ah, 0, AR_CCCNT); 151 REG_WRITE(ah, AR_CCCNT, 0);
152 REG_WRITE(ah, 0, AR_RFCNT); 152 REG_WRITE(ah, AR_RFCNT, 0);
153 REG_WRITE(ah, 0, AR_RCCNT); 153 REG_WRITE(ah, AR_RCCNT, 0);
154 REG_WRITE(ah, 0, AR_TFCNT); 154 REG_WRITE(ah, AR_TFCNT, 0);
155 155
156 /* unfreeze */ 156 /* unfreeze */
157 REG_WRITE(ah, 0, AR_MIBC); 157 REG_WRITE(ah, AR_MIBC, 0);
158 158
159 /* update all cycle counters here */ 159 /* update all cycle counters here */
160 common->cc_ani.cycles += cycles; 160 common->cc_ani.cycles += cycles;
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 51e33b53386e..c1b159ebcffe 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -45,11 +45,11 @@ static struct pci_driver atmel_driver = {
45 .name = "atmel", 45 .name = "atmel",
46 .id_table = card_ids, 46 .id_table = card_ids,
47 .probe = atmel_pci_probe, 47 .probe = atmel_pci_probe,
48 .remove = __devexit_p(atmel_pci_remove), 48 .remove = atmel_pci_remove,
49}; 49};
50 50
51 51
52static int __devinit atmel_pci_probe(struct pci_dev *pdev, 52static int atmel_pci_probe(struct pci_dev *pdev,
53 const struct pci_device_id *pent) 53 const struct pci_device_id *pent)
54{ 54{
55 struct net_device *dev; 55 struct net_device *dev;
@@ -69,7 +69,7 @@ static int __devinit atmel_pci_probe(struct pci_dev *pdev,
69 return 0; 69 return 0;
70} 70}
71 71
72static void __devexit atmel_pci_remove(struct pci_dev *pdev) 72static void atmel_pci_remove(struct pci_dev *pdev)
73{ 73{
74 stop_atmel_card(pci_get_drvdata(pdev)); 74 stop_atmel_card(pci_get_drvdata(pdev));
75} 75}
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 777cd74921d7..38bc5a7997ff 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -409,7 +409,10 @@ static inline
409 struct b43_dmadesc_meta *meta) 409 struct b43_dmadesc_meta *meta)
410{ 410{
411 if (meta->skb) { 411 if (meta->skb) {
412 dev_kfree_skb_any(meta->skb); 412 if (ring->tx)
413 ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
414 else
415 dev_kfree_skb_any(meta->skb);
413 meta->skb = NULL; 416 meta->skb = NULL;
414 } 417 }
415} 418}
@@ -1454,7 +1457,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1454 if (unlikely(err == -ENOKEY)) { 1457 if (unlikely(err == -ENOKEY)) {
1455 /* Drop this packet, as we don't have the encryption key 1458 /* Drop this packet, as we don't have the encryption key
1456 * anymore and must not transmit it unencrypted. */ 1459 * anymore and must not transmit it unencrypted. */
1457 dev_kfree_skb_any(skb); 1460 ieee80211_free_txskb(dev->wl->hw, skb);
1458 err = 0; 1461 err = 0;
1459 goto out; 1462 goto out;
1460 } 1463 }
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c5a99c8c8168..16ab280359bd 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3397,7 +3397,7 @@ static void b43_tx_work(struct work_struct *work)
3397 break; 3397 break;
3398 } 3398 }
3399 if (unlikely(err)) 3399 if (unlikely(err))
3400 dev_kfree_skb(skb); /* Drop it */ 3400 ieee80211_free_txskb(wl->hw, skb);
3401 err = 0; 3401 err = 0;
3402 } 3402 }
3403 3403
@@ -3419,7 +3419,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
3419 3419
3420 if (unlikely(skb->len < 2 + 2 + 6)) { 3420 if (unlikely(skb->len < 2 + 2 + 6)) {
3421 /* Too short, this can't be a valid frame. */ 3421 /* Too short, this can't be a valid frame. */
3422 dev_kfree_skb_any(skb); 3422 ieee80211_free_txskb(hw, skb);
3423 return; 3423 return;
3424 } 3424 }
3425 B43_WARN_ON(skb_shinfo(skb)->nr_frags); 3425 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
@@ -4229,8 +4229,12 @@ redo:
4229 4229
4230 /* Drain all TX queues. */ 4230 /* Drain all TX queues. */
4231 for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) { 4231 for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
4232 while (skb_queue_len(&wl->tx_queue[queue_num])) 4232 while (skb_queue_len(&wl->tx_queue[queue_num])) {
4233 dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num])); 4233 struct sk_buff *skb;
4234
4235 skb = skb_dequeue(&wl->tx_queue[queue_num]);
4236 ieee80211_free_txskb(wl->hw, skb);
4237 }
4234 } 4238 }
4235 4239
4236 b43_mac_suspend(dev); 4240 b43_mac_suspend(dev);
@@ -4652,7 +4656,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4652 switch (dev->dev->bus_type) { 4656 switch (dev->dev->bus_type) {
4653#ifdef CONFIG_B43_BCMA 4657#ifdef CONFIG_B43_BCMA
4654 case B43_BUS_BCMA: 4658 case B43_BUS_BCMA:
4655 bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci, 4659 bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
4656 dev->dev->bdev, true); 4660 dev->dev->bdev, true);
4657 break; 4661 break;
4658#endif 4662#endif
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 714cad649c45..f2ea2ceec8a9 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -60,7 +60,7 @@ static int b43_pcmcia_resume(struct pcmcia_device *dev)
60# define b43_pcmcia_resume NULL 60# define b43_pcmcia_resume NULL
61#endif /* CONFIG_PM */ 61#endif /* CONFIG_PM */
62 62
63static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev) 63static int b43_pcmcia_probe(struct pcmcia_device *dev)
64{ 64{
65 struct ssb_bus *ssb; 65 struct ssb_bus *ssb;
66 int err = -ENOMEM; 66 int err = -ENOMEM;
@@ -110,7 +110,7 @@ out_error:
110 return err; 110 return err;
111} 111}
112 112
113static void __devexit b43_pcmcia_remove(struct pcmcia_device *dev) 113static void b43_pcmcia_remove(struct pcmcia_device *dev)
114{ 114{
115 struct ssb_bus *ssb = dev->priv; 115 struct ssb_bus *ssb = dev->priv;
116 116
@@ -125,7 +125,7 @@ static struct pcmcia_driver b43_pcmcia_driver = {
125 .name = "b43-pcmcia", 125 .name = "b43-pcmcia",
126 .id_table = b43_pcmcia_tbl, 126 .id_table = b43_pcmcia_tbl,
127 .probe = b43_pcmcia_probe, 127 .probe = b43_pcmcia_probe,
128 .remove = __devexit_p(b43_pcmcia_remove), 128 .remove = b43_pcmcia_remove,
129 .suspend = b43_pcmcia_suspend, 129 .suspend = b43_pcmcia_suspend,
130 .resume = b43_pcmcia_resume, 130 .resume = b43_pcmcia_resume,
131}; 131};
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 3533ab86bd36..a73ff8c9deb5 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -196,7 +196,7 @@ static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
196 for (i = 0; i < ARRAY_SIZE(q->packets); i++) { 196 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
197 pack = &(q->packets[i]); 197 pack = &(q->packets[i]);
198 if (pack->skb) { 198 if (pack->skb) {
199 dev_kfree_skb_any(pack->skb); 199 ieee80211_free_txskb(q->dev->wl->hw, pack->skb);
200 pack->skb = NULL; 200 pack->skb = NULL;
201 } 201 }
202 } 202 }
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
552 if (unlikely(err == -ENOKEY)) { 552 if (unlikely(err == -ENOKEY)) {
553 /* Drop this packet, as we don't have the encryption key 553 /* Drop this packet, as we don't have the encryption key
554 * anymore and must not transmit it unencrypted. */ 554 * anymore and must not transmit it unencrypted. */
555 dev_kfree_skb_any(skb); 555 ieee80211_free_txskb(dev->wl->hw, skb);
556 err = 0; 556 err = 0;
557 goto out; 557 goto out;
558 } 558 }
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index a54fb2d29089..59a521800694 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -93,7 +93,7 @@ void b43_sdio_free_irq(struct b43_wldev *dev)
93 sdio->irq_handler = NULL; 93 sdio->irq_handler = NULL;
94} 94}
95 95
96static int __devinit b43_sdio_probe(struct sdio_func *func, 96static int b43_sdio_probe(struct sdio_func *func,
97 const struct sdio_device_id *id) 97 const struct sdio_device_id *id)
98{ 98{
99 struct b43_sdio *sdio; 99 struct b43_sdio *sdio;
@@ -171,7 +171,7 @@ out:
171 return error; 171 return error;
172} 172}
173 173
174static void __devexit b43_sdio_remove(struct sdio_func *func) 174static void b43_sdio_remove(struct sdio_func *func)
175{ 175{
176 struct b43_sdio *sdio = sdio_get_drvdata(func); 176 struct b43_sdio *sdio = sdio_get_drvdata(func);
177 177
@@ -193,7 +193,7 @@ static struct sdio_driver b43_sdio_driver = {
193 .name = "b43-sdio", 193 .name = "b43-sdio",
194 .id_table = b43_sdio_ids, 194 .id_table = b43_sdio_ids,
195 .probe = b43_sdio_probe, 195 .probe = b43_sdio_probe,
196 .remove = __devexit_p(b43_sdio_remove), 196 .remove = b43_sdio_remove,
197}; 197};
198 198
199int b43_sdio_init(void) 199int b43_sdio_init(void)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 136510edf3cf..8cb206a89083 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -796,7 +796,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
796 status.mactime += mactime; 796 status.mactime += mactime;
797 if (low_mactime_now <= mactime) 797 if (low_mactime_now <= mactime)
798 status.mactime -= 0x10000; 798 status.mactime -= 0x10000;
799 status.flag |= RX_FLAG_MACTIME_MPDU; 799 status.flag |= RX_FLAG_MACTIME_START;
800 } 800 }
801 801
802 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; 802 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index a29da674e69d..482476fdb1f3 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/ssb/ssb.h> 14#include <linux/ssb/ssb.h>
15#include <linux/ssb/ssb_driver_chipcommon.h> 15#include <linux/ssb/ssb_driver_chipcommon.h>
16#include <linux/completion.h>
16 17
17#include <net/mac80211.h> 18#include <net/mac80211.h>
18 19
@@ -733,6 +734,10 @@ struct b43legacy_wldev {
733 734
734 /* Firmware data */ 735 /* Firmware data */
735 struct b43legacy_firmware fw; 736 struct b43legacy_firmware fw;
737 const struct firmware *fwp; /* needed to pass fw pointer */
738
739 /* completion struct for firmware loading */
740 struct completion fw_load_complete;
736 741
737 /* Devicelist in struct b43legacy_wl (all 802.11 cores) */ 742 /* Devicelist in struct b43legacy_wl (all 802.11 cores) */
738 struct list_head list; 743 struct list_head list;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 18e208e3eca1..8c3f70e1a013 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1513,9 +1513,17 @@ static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
1513 "and download the correct firmware (version 3).\n"); 1513 "and download the correct firmware (version 3).\n");
1514} 1514}
1515 1515
1516static void b43legacy_fw_cb(const struct firmware *firmware, void *context)
1517{
1518 struct b43legacy_wldev *dev = context;
1519
1520 dev->fwp = firmware;
1521 complete(&dev->fw_load_complete);
1522}
1523
1516static int do_request_fw(struct b43legacy_wldev *dev, 1524static int do_request_fw(struct b43legacy_wldev *dev,
1517 const char *name, 1525 const char *name,
1518 const struct firmware **fw) 1526 const struct firmware **fw, bool async)
1519{ 1527{
1520 char path[sizeof(modparam_fwpostfix) + 32]; 1528 char path[sizeof(modparam_fwpostfix) + 32];
1521 struct b43legacy_fw_header *hdr; 1529 struct b43legacy_fw_header *hdr;
@@ -1528,7 +1536,24 @@ static int do_request_fw(struct b43legacy_wldev *dev,
1528 snprintf(path, ARRAY_SIZE(path), 1536 snprintf(path, ARRAY_SIZE(path),
1529 "b43legacy%s/%s.fw", 1537 "b43legacy%s/%s.fw",
1530 modparam_fwpostfix, name); 1538 modparam_fwpostfix, name);
1531 err = request_firmware(fw, path, dev->dev->dev); 1539 b43legacyinfo(dev->wl, "Loading firmware %s\n", path);
1540 if (async) {
1541 init_completion(&dev->fw_load_complete);
1542 err = request_firmware_nowait(THIS_MODULE, 1, path,
1543 dev->dev->dev, GFP_KERNEL,
1544 dev, b43legacy_fw_cb);
1545 if (err) {
1546 b43legacyerr(dev->wl, "Unable to load firmware\n");
1547 return err;
1548 }
1549 /* stall here until fw ready */
1550 wait_for_completion(&dev->fw_load_complete);
1551 if (!dev->fwp)
1552 err = -EINVAL;
1553 *fw = dev->fwp;
1554 } else {
1555 err = request_firmware(fw, path, dev->dev->dev);
1556 }
1532 if (err) { 1557 if (err) {
1533 b43legacyerr(dev->wl, "Firmware file \"%s\" not found " 1558 b43legacyerr(dev->wl, "Firmware file \"%s\" not found "
1534 "or load failed.\n", path); 1559 "or load failed.\n", path);
@@ -1580,7 +1605,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
1580 filename = "ucode4"; 1605 filename = "ucode4";
1581 else 1606 else
1582 filename = "ucode5"; 1607 filename = "ucode5";
1583 err = do_request_fw(dev, filename, &fw->ucode); 1608 err = do_request_fw(dev, filename, &fw->ucode, true);
1584 if (err) 1609 if (err)
1585 goto err_load; 1610 goto err_load;
1586 } 1611 }
@@ -1589,7 +1614,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
1589 filename = "pcm4"; 1614 filename = "pcm4";
1590 else 1615 else
1591 filename = "pcm5"; 1616 filename = "pcm5";
1592 err = do_request_fw(dev, filename, &fw->pcm); 1617 err = do_request_fw(dev, filename, &fw->pcm, false);
1593 if (err) 1618 if (err)
1594 goto err_load; 1619 goto err_load;
1595 } 1620 }
@@ -1607,7 +1632,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
1607 default: 1632 default:
1608 goto err_no_initvals; 1633 goto err_no_initvals;
1609 } 1634 }
1610 err = do_request_fw(dev, filename, &fw->initvals); 1635 err = do_request_fw(dev, filename, &fw->initvals, false);
1611 if (err) 1636 if (err)
1612 goto err_load; 1637 goto err_load;
1613 } 1638 }
@@ -1627,7 +1652,7 @@ static void b43legacy_request_firmware(struct work_struct *work)
1627 default: 1652 default:
1628 goto err_no_initvals; 1653 goto err_no_initvals;
1629 } 1654 }
1630 err = do_request_fw(dev, filename, &fw->initvals_band); 1655 err = do_request_fw(dev, filename, &fw->initvals_band, false);
1631 if (err) 1656 if (err)
1632 goto err_load; 1657 goto err_load;
1633 } 1658 }
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index b8ffea6f5c64..849a28c80302 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -557,7 +557,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
557 status.mactime += mactime; 557 status.mactime += mactime;
558 if (low_mactime_now <= mactime) 558 if (low_mactime_now <= mactime)
559 status.mactime -= 0x10000; 559 status.mactime -= 0x10000;
560 status.flag |= RX_FLAG_MACTIME_MPDU; 560 status.flag |= RX_FLAG_MACTIME_START;
561 } 561 }
562 562
563 chanid = (chanstat & B43legacy_RX_CHAN_ID) >> 563 chanid = (chanstat & B43legacy_RX_CHAN_ID) >>
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index c9d811eb6556..1d92d874ebb6 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -55,13 +55,16 @@ config BRCMFMAC_USB
55 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to 55 IEEE802.11n embedded FullMAC WLAN driver. Say Y if you want to
56 use the driver for an USB wireless card. 56 use the driver for an USB wireless card.
57 57
58config BRCMISCAN 58config BRCM_TRACING
59 bool "Broadcom I-Scan (OBSOLETE)" 59 bool "Broadcom device tracing"
60 depends on BRCMFMAC 60 depends on BRCMSMAC || BRCMFMAC
61 ---help--- 61 ---help---
62 This option enables the I-Scan method. By default fullmac uses the 62 If you say Y here, the Broadcom wireless drivers will register
63 new E-Scan method which uses less memory in firmware and gives no 63 with ftrace to dump event information into the trace ringbuffer.
64 limitation on the number of scan results. 64 Tracing can be enabled at runtime to aid in debugging wireless
65 issues. This option adds a small amount of overhead when tracing
66 is disabled. If unsure, say Y to allow developers to better help
67 you when wireless problems occur.
65 68
66config BRCMDBG 69config BRCMDBG
67 bool "Broadcom driver debug functions" 70 bool "Broadcom driver debug functions"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 9d5170b6df50..1a6661a9f008 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,8 @@ ccflags-y += -D__CHECK_ENDIAN__
24obj-$(CONFIG_BRCMFMAC) += brcmfmac.o 24obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
25brcmfmac-objs += \ 25brcmfmac-objs += \
26 wl_cfg80211.o \ 26 wl_cfg80211.o \
27 fwil.o \
28 fweh.o \
27 dhd_cdc.o \ 29 dhd_cdc.o \
28 dhd_common.o \ 30 dhd_common.o \
29 dhd_linux.o 31 dhd_linux.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3b2c4c20e7fc..be35a2f99b1c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -42,7 +42,8 @@
42#ifdef CONFIG_BRCMFMAC_SDIO_OOB 42#ifdef CONFIG_BRCMFMAC_SDIO_OOB
43static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id) 43static irqreturn_t brcmf_sdio_irqhandler(int irq, void *dev_id)
44{ 44{
45 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(dev_id); 45 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id);
46 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
46 47
47 brcmf_dbg(INTR, "oob intr triggered\n"); 48 brcmf_dbg(INTR, "oob intr triggered\n");
48 49
@@ -66,12 +67,11 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
66 u8 data; 67 u8 data;
67 unsigned long flags; 68 unsigned long flags;
68 69
69 brcmf_dbg(TRACE, "Entering\n"); 70 brcmf_dbg(TRACE, "Entering: irq %d\n", sdiodev->irq);
70 71
71 brcmf_dbg(ERROR, "requesting irq %d\n", sdiodev->irq);
72 ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler, 72 ret = request_irq(sdiodev->irq, brcmf_sdio_irqhandler,
73 sdiodev->irq_flags, "brcmf_oob_intr", 73 sdiodev->irq_flags, "brcmf_oob_intr",
74 &sdiodev->func[1]->card->dev); 74 &sdiodev->func[1]->dev);
75 if (ret != 0) 75 if (ret != 0)
76 return ret; 76 return ret;
77 spin_lock_init(&sdiodev->irq_en_lock); 77 spin_lock_init(&sdiodev->irq_en_lock);
@@ -84,6 +84,8 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
84 return ret; 84 return ret;
85 sdiodev->irq_wake = true; 85 sdiodev->irq_wake = true;
86 86
87 sdio_claim_host(sdiodev->func[1]);
88
87 /* must configure SDIO_CCCR_IENx to enable irq */ 89 /* must configure SDIO_CCCR_IENx to enable irq */
88 data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret); 90 data = brcmf_sdio_regrb(sdiodev, SDIO_CCCR_IENx, &ret);
89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; 91 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
@@ -95,6 +97,8 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
95 data |= SDIO_SEPINT_ACT_HI; 97 data |= SDIO_SEPINT_ACT_HI;
96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); 98 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
97 99
100 sdio_release_host(sdiodev->func[1]);
101
98 return 0; 102 return 0;
99} 103}
100 104
@@ -102,14 +106,16 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
102{ 106{
103 brcmf_dbg(TRACE, "Entering\n"); 107 brcmf_dbg(TRACE, "Entering\n");
104 108
109 sdio_claim_host(sdiodev->func[1]);
105 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); 110 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
106 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); 111 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
112 sdio_release_host(sdiodev->func[1]);
107 113
108 if (sdiodev->irq_wake) { 114 if (sdiodev->irq_wake) {
109 disable_irq_wake(sdiodev->irq); 115 disable_irq_wake(sdiodev->irq);
110 sdiodev->irq_wake = false; 116 sdiodev->irq_wake = false;
111 } 117 }
112 free_irq(sdiodev->irq, &sdiodev->func[1]->card->dev); 118 free_irq(sdiodev->irq, &sdiodev->func[1]->dev);
113 sdiodev->irq_en = false; 119 sdiodev->irq_en = false;
114 120
115 return 0; 121 return 0;
@@ -117,7 +123,8 @@ int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev)
117#else /* CONFIG_BRCMFMAC_SDIO_OOB */ 123#else /* CONFIG_BRCMFMAC_SDIO_OOB */
118static void brcmf_sdio_irqhandler(struct sdio_func *func) 124static void brcmf_sdio_irqhandler(struct sdio_func *func)
119{ 125{
120 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); 126 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev);
127 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
121 128
122 brcmf_dbg(INTR, "ib intr triggered\n"); 129 brcmf_dbg(INTR, "ib intr triggered\n");
123 130
@@ -176,7 +183,7 @@ brcmf_sdcard_set_sbaddr_window(struct brcmf_sdio_dev *sdiodev, u32 address)
176 } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); 183 } while (err != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
177 184
178 if (err) { 185 if (err) {
179 brcmf_dbg(ERROR, "failed at addr:0x%0x\n", 186 brcmf_err("failed at addr:0x%0x\n",
180 SBSDIO_FUNC1_SBADDRLOW + i); 187 SBSDIO_FUNC1_SBADDRLOW + i);
181 break; 188 break;
182 } 189 }
@@ -238,7 +245,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
238 } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT); 245 } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
239 246
240 if (ret != 0) 247 if (ret != 0)
241 brcmf_dbg(ERROR, "failed with %d\n", ret); 248 brcmf_err("failed with %d\n", ret);
242 249
243 return ret; 250 return ret;
244} 251}
@@ -249,9 +256,7 @@ u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
249 int retval; 256 int retval;
250 257
251 brcmf_dbg(INFO, "addr:0x%08x\n", addr); 258 brcmf_dbg(INFO, "addr:0x%08x\n", addr);
252 sdio_claim_host(sdiodev->func[1]);
253 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false); 259 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
254 sdio_release_host(sdiodev->func[1]);
255 brcmf_dbg(INFO, "data:0x%02x\n", data); 260 brcmf_dbg(INFO, "data:0x%02x\n", data);
256 261
257 if (ret) 262 if (ret)
@@ -266,9 +271,7 @@ u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
266 int retval; 271 int retval;
267 272
268 brcmf_dbg(INFO, "addr:0x%08x\n", addr); 273 brcmf_dbg(INFO, "addr:0x%08x\n", addr);
269 sdio_claim_host(sdiodev->func[1]);
270 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false); 274 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, false);
271 sdio_release_host(sdiodev->func[1]);
272 brcmf_dbg(INFO, "data:0x%08x\n", data); 275 brcmf_dbg(INFO, "data:0x%08x\n", data);
273 276
274 if (ret) 277 if (ret)
@@ -283,9 +286,7 @@ void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
283 int retval; 286 int retval;
284 287
285 brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data); 288 brcmf_dbg(INFO, "addr:0x%08x, data:0x%02x\n", addr, data);
286 sdio_claim_host(sdiodev->func[1]);
287 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true); 289 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
288 sdio_release_host(sdiodev->func[1]);
289 290
290 if (ret) 291 if (ret)
291 *ret = retval; 292 *ret = retval;
@@ -297,9 +298,7 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
297 int retval; 298 int retval;
298 299
299 brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data); 300 brcmf_dbg(INFO, "addr:0x%08x, data:0x%08x\n", addr, data);
300 sdio_claim_host(sdiodev->func[1]);
301 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true); 301 retval = brcmf_sdio_regrw_helper(sdiodev, addr, &data, true);
302 sdio_release_host(sdiodev->func[1]);
303 302
304 if (ret) 303 if (ret)
305 *ret = retval; 304 *ret = retval;
@@ -340,7 +339,7 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
340 339
341 mypkt = brcmu_pkt_buf_get_skb(nbytes); 340 mypkt = brcmu_pkt_buf_get_skb(nbytes);
342 if (!mypkt) { 341 if (!mypkt) {
343 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", 342 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
344 nbytes); 343 nbytes);
345 return -EIO; 344 return -EIO;
346 } 345 }
@@ -364,8 +363,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
364 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 363 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
365 fn, addr, pkt->len); 364 fn, addr, pkt->len);
366 365
367 sdio_claim_host(sdiodev->func[1]);
368
369 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 366 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
370 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); 367 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
371 if (err) 368 if (err)
@@ -376,8 +373,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
376 fn, addr, pkt); 373 fn, addr, pkt);
377 374
378done: 375done:
379 sdio_release_host(sdiodev->func[1]);
380
381 return err; 376 return err;
382} 377}
383 378
@@ -391,8 +386,6 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
391 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", 386 brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
392 fn, addr, pktq->qlen); 387 fn, addr, pktq->qlen);
393 388
394 sdio_claim_host(sdiodev->func[1]);
395
396 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2; 389 width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
397 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr); 390 err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
398 if (err) 391 if (err)
@@ -403,8 +396,6 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
403 pktq); 396 pktq);
404 397
405done: 398done:
406 sdio_release_host(sdiodev->func[1]);
407
408 return err; 399 return err;
409} 400}
410 401
@@ -417,7 +408,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
417 408
418 mypkt = brcmu_pkt_buf_get_skb(nbytes); 409 mypkt = brcmu_pkt_buf_get_skb(nbytes);
419 if (!mypkt) { 410 if (!mypkt) {
420 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", 411 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
421 nbytes); 412 nbytes);
422 return -EIO; 413 return -EIO;
423 } 414 }
@@ -446,8 +437,6 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
446 if (flags & SDIO_REQ_ASYNC) 437 if (flags & SDIO_REQ_ASYNC)
447 return -ENOTSUPP; 438 return -ENOTSUPP;
448 439
449 sdio_claim_host(sdiodev->func[1]);
450
451 if (bar0 != sdiodev->sbwad) { 440 if (bar0 != sdiodev->sbwad) {
452 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0); 441 err = brcmf_sdcard_set_sbaddr_window(sdiodev, bar0);
453 if (err) 442 if (err)
@@ -467,8 +456,6 @@ brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
467 addr, pkt); 456 addr, pkt);
468 457
469done: 458done:
470 sdio_release_host(sdiodev->func[1]);
471
472 return err; 459 return err;
473} 460}
474 461
@@ -484,7 +471,7 @@ int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
484 471
485 mypkt = brcmu_pkt_buf_get_skb(nbytes); 472 mypkt = brcmu_pkt_buf_get_skb(nbytes);
486 if (!mypkt) { 473 if (!mypkt) {
487 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n", 474 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n",
488 nbytes); 475 nbytes);
489 return -EIO; 476 return -EIO;
490 } 477 }
@@ -510,10 +497,8 @@ int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
510 brcmf_dbg(TRACE, "Enter\n"); 497 brcmf_dbg(TRACE, "Enter\n");
511 498
512 /* issue abort cmd52 command through F0 */ 499 /* issue abort cmd52 command through F0 */
513 sdio_claim_host(sdiodev->func[1]);
514 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0, 500 brcmf_sdioh_request_byte(sdiodev, SDIOH_WRITE, SDIO_FUNC_0,
515 SDIO_CCCR_ABORT, &t_func); 501 SDIO_CCCR_ABORT, &t_func);
516 sdio_release_host(sdiodev->func[1]);
517 502
518 brcmf_dbg(TRACE, "Exit\n"); 503 brcmf_dbg(TRACE, "Exit\n");
519 return 0; 504 return 0;
@@ -530,13 +515,10 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
530 515
531 regs = SI_ENUM_BASE; 516 regs = SI_ENUM_BASE;
532 517
533 /* Report the BAR, to fix if needed */
534 sdiodev->sbwad = SI_ENUM_BASE;
535
536 /* try to attach to the target device */ 518 /* try to attach to the target device */
537 sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev); 519 sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
538 if (!sdiodev->bus) { 520 if (!sdiodev->bus) {
539 brcmf_dbg(ERROR, "device attach failed\n"); 521 brcmf_err("device attach failed\n");
540 ret = -ENODEV; 522 ret = -ENODEV;
541 goto out; 523 goto out;
542 } 524 }
@@ -551,6 +533,8 @@ EXPORT_SYMBOL(brcmf_sdio_probe);
551 533
552int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev) 534int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev)
553{ 535{
536 sdiodev->bus_if->state = BRCMF_BUS_DOWN;
537
554 if (sdiodev->bus) { 538 if (sdiodev->bus) {
555 brcmf_sdbrcm_disconnect(sdiodev->bus); 539 brcmf_sdbrcm_disconnect(sdiodev->bus);
556 sdiodev->bus = NULL; 540 sdiodev->bus = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index c3247d5b3c22..d33e5598611b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -107,15 +107,13 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
107 /* Enable Function 2 */ 107 /* Enable Function 2 */
108 err_ret = sdio_enable_func(sdfunc); 108 err_ret = sdio_enable_func(sdfunc);
109 if (err_ret) 109 if (err_ret)
110 brcmf_dbg(ERROR, 110 brcmf_err("enable F2 failed:%d\n",
111 "enable F2 failed:%d\n",
112 err_ret); 111 err_ret);
113 } else { 112 } else {
114 /* Disable Function 2 */ 113 /* Disable Function 2 */
115 err_ret = sdio_disable_func(sdfunc); 114 err_ret = sdio_disable_func(sdfunc);
116 if (err_ret) 115 if (err_ret)
117 brcmf_dbg(ERROR, 116 brcmf_err("Disable F2 failed:%d\n",
118 "Disable F2 failed:%d\n",
119 err_ret); 117 err_ret);
120 } 118 }
121 } 119 }
@@ -129,7 +127,7 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
129 sdio_writeb(sdfunc, *byte, regaddr, &err_ret); 127 sdio_writeb(sdfunc, *byte, regaddr, &err_ret);
130 kfree(sdfunc); 128 kfree(sdfunc);
131 } else if (regaddr < 0xF0) { 129 } else if (regaddr < 0xF0) {
132 brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr); 130 brcmf_err("F0 Wr:0x%02x: write disallowed\n", regaddr);
133 err_ret = -EPERM; 131 err_ret = -EPERM;
134 } else { 132 } else {
135 sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret); 133 sdio_f0_writeb(sdfunc, *byte, regaddr, &err_ret);
@@ -166,7 +164,7 @@ int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint func,
166 } 164 }
167 165
168 if (err_ret) 166 if (err_ret)
169 brcmf_dbg(ERROR, "Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n", 167 brcmf_err("Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
170 rw ? "write" : "read", func, regaddr, *byte, err_ret); 168 rw ? "write" : "read", func, regaddr, *byte, err_ret);
171 169
172 return err_ret; 170 return err_ret;
@@ -179,7 +177,7 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
179 int err_ret = -EIO; 177 int err_ret = -EIO;
180 178
181 if (func == 0) { 179 if (func == 0) {
182 brcmf_dbg(ERROR, "Only CMD52 allowed to F0\n"); 180 brcmf_err("Only CMD52 allowed to F0\n");
183 return -EINVAL; 181 return -EINVAL;
184 } 182 }
185 183
@@ -198,7 +196,7 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
198 sdio_writew(sdiodev->func[func], (*word & 0xFFFF), 196 sdio_writew(sdiodev->func[func], (*word & 0xFFFF),
199 addr, &err_ret); 197 addr, &err_ret);
200 else 198 else
201 brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes); 199 brcmf_err("Invalid nbytes: %d\n", nbytes);
202 } else { /* CMD52 Read */ 200 } else { /* CMD52 Read */
203 if (nbytes == 4) 201 if (nbytes == 4)
204 *word = sdio_readl(sdiodev->func[func], addr, &err_ret); 202 *word = sdio_readl(sdiodev->func[func], addr, &err_ret);
@@ -206,11 +204,11 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
206 *word = sdio_readw(sdiodev->func[func], addr, 204 *word = sdio_readw(sdiodev->func[func], addr,
207 &err_ret) & 0xFFFF; 205 &err_ret) & 0xFFFF;
208 else 206 else
209 brcmf_dbg(ERROR, "Invalid nbytes: %d\n", nbytes); 207 brcmf_err("Invalid nbytes: %d\n", nbytes);
210 } 208 }
211 209
212 if (err_ret) 210 if (err_ret)
213 brcmf_dbg(ERROR, "Failed to %s word, Err: 0x%08x\n", 211 brcmf_err("Failed to %s word, Err: 0x%08x\n",
214 rw ? "write" : "read", err_ret); 212 rw ? "write" : "read", err_ret);
215 213
216 return err_ret; 214 return err_ret;
@@ -270,7 +268,7 @@ brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
270 err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func, 268 err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
271 addr, pkt, pkt_len); 269 addr, pkt, pkt_len);
272 if (err_ret) { 270 if (err_ret) {
273 brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", 271 brcmf_err("%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
274 write ? "TX" : "RX", pkt, SGCount, addr, 272 write ? "TX" : "RX", pkt, SGCount, addr,
275 pkt_len, err_ret); 273 pkt_len, err_ret);
276 } else { 274 } else {
@@ -315,7 +313,7 @@ int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
315 status = brcmf_sdioh_request_data(sdiodev, write, fifo, func, 313 status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
316 addr, pkt, pkt_len); 314 addr, pkt, pkt_len);
317 if (status) { 315 if (status) {
318 brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n", 316 brcmf_err("%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
319 write ? "TX" : "RX", pkt, addr, pkt_len, status); 317 write ? "TX" : "RX", pkt, addr, pkt_len, status);
320 } else { 318 } else {
321 brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n", 319 brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
@@ -336,7 +334,7 @@ static int brcmf_sdioh_get_cisaddr(struct brcmf_sdio_dev *sdiodev, u32 regaddr)
336 for (i = 0; i < 3; i++) { 334 for (i = 0; i < 3; i++) {
337 regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret); 335 regdata = brcmf_sdio_regrl(sdiodev, regaddr, &ret);
338 if (ret != 0) 336 if (ret != 0)
339 brcmf_dbg(ERROR, "Can't read!\n"); 337 brcmf_err("Can't read!\n");
340 338
341 *ptr++ = (u8) regdata; 339 *ptr++ = (u8) regdata;
342 regaddr++; 340 regaddr++;
@@ -372,11 +370,9 @@ static int brcmf_sdioh_enablefuncs(struct brcmf_sdio_dev *sdiodev)
372 } 370 }
373 371
374 /* Enable Function 1 */ 372 /* Enable Function 1 */
375 sdio_claim_host(sdiodev->func[1]);
376 err_ret = sdio_enable_func(sdiodev->func[1]); 373 err_ret = sdio_enable_func(sdiodev->func[1]);
377 sdio_release_host(sdiodev->func[1]);
378 if (err_ret) 374 if (err_ret)
379 brcmf_dbg(ERROR, "Failed to enable F1 Err: 0x%08x\n", err_ret); 375 brcmf_err("Failed to enable F1 Err: 0x%08x\n", err_ret);
380 376
381 return false; 377 return false;
382} 378}
@@ -393,24 +389,23 @@ int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev)
393 sdiodev->num_funcs = 2; 389 sdiodev->num_funcs = 2;
394 390
395 sdio_claim_host(sdiodev->func[1]); 391 sdio_claim_host(sdiodev->func[1]);
392
396 err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE); 393 err_ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
397 sdio_release_host(sdiodev->func[1]);
398 if (err_ret) { 394 if (err_ret) {
399 brcmf_dbg(ERROR, "Failed to set F1 blocksize\n"); 395 brcmf_err("Failed to set F1 blocksize\n");
400 goto out; 396 goto out;
401 } 397 }
402 398
403 sdio_claim_host(sdiodev->func[2]);
404 err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE); 399 err_ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
405 sdio_release_host(sdiodev->func[2]);
406 if (err_ret) { 400 if (err_ret) {
407 brcmf_dbg(ERROR, "Failed to set F2 blocksize\n"); 401 brcmf_err("Failed to set F2 blocksize\n");
408 goto out; 402 goto out;
409 } 403 }
410 404
411 brcmf_sdioh_enablefuncs(sdiodev); 405 brcmf_sdioh_enablefuncs(sdiodev);
412 406
413out: 407out:
408 sdio_release_host(sdiodev->func[1]);
414 brcmf_dbg(TRACE, "Done\n"); 409 brcmf_dbg(TRACE, "Done\n");
415 return err_ret; 410 return err_ret;
416} 411}
@@ -437,7 +432,7 @@ static int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
437 struct brcmf_sdio_oobirq *oobirq_entry; 432 struct brcmf_sdio_oobirq *oobirq_entry;
438 433
439 if (list_empty(&oobirq_lh)) { 434 if (list_empty(&oobirq_lh)) {
440 brcmf_dbg(ERROR, "no valid oob irq resource\n"); 435 brcmf_err("no valid oob irq resource\n");
441 return -ENXIO; 436 return -ENXIO;
442 } 437 }
443 438
@@ -459,95 +454,106 @@ static inline int brcmf_sdio_getintrcfg(struct brcmf_sdio_dev *sdiodev)
459#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ 454#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
460 455
461static int brcmf_ops_sdio_probe(struct sdio_func *func, 456static int brcmf_ops_sdio_probe(struct sdio_func *func,
462 const struct sdio_device_id *id) 457 const struct sdio_device_id *id)
463{ 458{
464 int ret = 0; 459 int err;
465 struct brcmf_sdio_dev *sdiodev; 460 struct brcmf_sdio_dev *sdiodev;
466 struct brcmf_bus *bus_if; 461 struct brcmf_bus *bus_if;
467 462
468 brcmf_dbg(TRACE, "Enter\n"); 463 brcmf_dbg(TRACE, "Enter\n");
469 brcmf_dbg(TRACE, "func->class=%x\n", func->class); 464 brcmf_dbg(TRACE, "Class=%x\n", func->class);
470 brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor); 465 brcmf_dbg(TRACE, "sdio vendor ID: 0x%04x\n", func->vendor);
471 brcmf_dbg(TRACE, "sdio_device: 0x%04x\n", func->device); 466 brcmf_dbg(TRACE, "sdio device ID: 0x%04x\n", func->device);
472 brcmf_dbg(TRACE, "Function#: 0x%04x\n", func->num); 467 brcmf_dbg(TRACE, "Function#: %d\n", func->num);
473 468
474 if (func->num == 1) { 469 /* Consume func num 1 but dont do anything with it. */
475 if (dev_get_drvdata(&func->card->dev)) { 470 if (func->num == 1)
476 brcmf_dbg(ERROR, "card private drvdata occupied\n"); 471 return 0;
477 return -ENXIO; 472
478 } 473 /* Ignore anything but func 2 */
479 bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL); 474 if (func->num != 2)
480 if (!bus_if) 475 return -ENODEV;
481 return -ENOMEM; 476
482 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL); 477 bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
483 if (!sdiodev) { 478 if (!bus_if)
484 kfree(bus_if); 479 return -ENOMEM;
485 return -ENOMEM; 480 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
486 } 481 if (!sdiodev) {
487 sdiodev->func[0] = func; 482 kfree(bus_if);
488 sdiodev->func[1] = func; 483 return -ENOMEM;
489 sdiodev->bus_if = bus_if;
490 bus_if->bus_priv.sdio = sdiodev;
491 bus_if->type = SDIO_BUS;
492 bus_if->align = BRCMF_SDALIGN;
493 dev_set_drvdata(&func->card->dev, sdiodev);
494
495 atomic_set(&sdiodev->suspend, false);
496 init_waitqueue_head(&sdiodev->request_byte_wait);
497 init_waitqueue_head(&sdiodev->request_word_wait);
498 init_waitqueue_head(&sdiodev->request_chain_wait);
499 init_waitqueue_head(&sdiodev->request_buffer_wait);
500 } 484 }
501 485
502 if (func->num == 2) { 486 sdiodev->func[0] = func->card->sdio_func[0];
503 sdiodev = dev_get_drvdata(&func->card->dev); 487 sdiodev->func[1] = func->card->sdio_func[0];
504 if ((!sdiodev) || (sdiodev->func[1]->card != func->card)) 488 sdiodev->func[2] = func;
505 return -ENODEV;
506
507 ret = brcmf_sdio_getintrcfg(sdiodev);
508 if (ret)
509 return ret;
510 sdiodev->func[2] = func;
511 489
512 bus_if = sdiodev->bus_if; 490 sdiodev->bus_if = bus_if;
513 sdiodev->dev = &func->dev; 491 bus_if->bus_priv.sdio = sdiodev;
514 dev_set_drvdata(&func->dev, bus_if); 492 bus_if->align = BRCMF_SDALIGN;
493 dev_set_drvdata(&func->dev, bus_if);
494 dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
495 sdiodev->dev = &sdiodev->func[1]->dev;
515 496
516 brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n"); 497 atomic_set(&sdiodev->suspend, false);
517 ret = brcmf_sdio_probe(sdiodev); 498 init_waitqueue_head(&sdiodev->request_byte_wait);
499 init_waitqueue_head(&sdiodev->request_word_wait);
500 init_waitqueue_head(&sdiodev->request_chain_wait);
501 init_waitqueue_head(&sdiodev->request_buffer_wait);
502 err = brcmf_sdio_getintrcfg(sdiodev);
503 if (err)
504 goto fail;
505
506 brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
507 err = brcmf_sdio_probe(sdiodev);
508 if (err) {
509 brcmf_err("F2 error, probe failed %d...\n", err);
510 goto fail;
518 } 511 }
512 brcmf_dbg(TRACE, "F2 init completed...\n");
513 return 0;
519 514
520 return ret; 515fail:
516 dev_set_drvdata(&func->dev, NULL);
517 dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
518 kfree(sdiodev);
519 kfree(bus_if);
520 return err;
521} 521}
522 522
523static void brcmf_ops_sdio_remove(struct sdio_func *func) 523static void brcmf_ops_sdio_remove(struct sdio_func *func)
524{ 524{
525 struct brcmf_bus *bus_if; 525 struct brcmf_bus *bus_if;
526 struct brcmf_sdio_dev *sdiodev; 526 struct brcmf_sdio_dev *sdiodev;
527
527 brcmf_dbg(TRACE, "Enter\n"); 528 brcmf_dbg(TRACE, "Enter\n");
528 brcmf_dbg(INFO, "func->class=%x\n", func->class); 529 brcmf_dbg(TRACE, "sdio vendor ID: 0x%04x\n", func->vendor);
529 brcmf_dbg(INFO, "sdio_vendor: 0x%04x\n", func->vendor); 530 brcmf_dbg(TRACE, "sdio device ID: 0x%04x\n", func->device);
530 brcmf_dbg(INFO, "sdio_device: 0x%04x\n", func->device); 531 brcmf_dbg(TRACE, "Function: %d\n", func->num);
531 brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
532 532
533 if (func->num == 2) { 533 if (func->num != 1 && func->num != 2)
534 bus_if = dev_get_drvdata(&func->dev); 534 return;
535
536 bus_if = dev_get_drvdata(&func->dev);
537 if (bus_if) {
535 sdiodev = bus_if->bus_priv.sdio; 538 sdiodev = bus_if->bus_priv.sdio;
536 brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
537 brcmf_sdio_remove(sdiodev); 539 brcmf_sdio_remove(sdiodev);
538 dev_set_drvdata(&func->card->dev, NULL); 540
539 dev_set_drvdata(&func->dev, NULL); 541 dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
542 dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
543
540 kfree(bus_if); 544 kfree(bus_if);
541 kfree(sdiodev); 545 kfree(sdiodev);
542 } 546 }
547
548 brcmf_dbg(TRACE, "Exit\n");
543} 549}
544 550
545#ifdef CONFIG_PM_SLEEP 551#ifdef CONFIG_PM_SLEEP
546static int brcmf_sdio_suspend(struct device *dev) 552static int brcmf_sdio_suspend(struct device *dev)
547{ 553{
548 mmc_pm_flag_t sdio_flags; 554 mmc_pm_flag_t sdio_flags;
549 struct sdio_func *func = dev_to_sdio_func(dev); 555 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
550 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); 556 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
551 int ret = 0; 557 int ret = 0;
552 558
553 brcmf_dbg(TRACE, "\n"); 559 brcmf_dbg(TRACE, "\n");
@@ -556,13 +562,13 @@ static int brcmf_sdio_suspend(struct device *dev)
556 562
557 sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]); 563 sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
558 if (!(sdio_flags & MMC_PM_KEEP_POWER)) { 564 if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
559 brcmf_dbg(ERROR, "Host can't keep power while suspended\n"); 565 brcmf_err("Host can't keep power while suspended\n");
560 return -EINVAL; 566 return -EINVAL;
561 } 567 }
562 568
563 ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER); 569 ret = sdio_set_host_pm_flags(sdiodev->func[1], MMC_PM_KEEP_POWER);
564 if (ret) { 570 if (ret) {
565 brcmf_dbg(ERROR, "Failed to set pm_flags\n"); 571 brcmf_err("Failed to set pm_flags\n");
566 return ret; 572 return ret;
567 } 573 }
568 574
@@ -573,8 +579,8 @@ static int brcmf_sdio_suspend(struct device *dev)
573 579
574static int brcmf_sdio_resume(struct device *dev) 580static int brcmf_sdio_resume(struct device *dev)
575{ 581{
576 struct sdio_func *func = dev_to_sdio_func(dev); 582 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
577 struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev); 583 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
578 584
579 brcmf_sdio_wdtmr_enable(sdiodev, true); 585 brcmf_sdio_wdtmr_enable(sdiodev, true);
580 atomic_set(&sdiodev->suspend, false); 586 atomic_set(&sdiodev->suspend, false);
@@ -627,7 +633,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
627 ret = sdio_register_driver(&brcmf_sdmmc_driver); 633 ret = sdio_register_driver(&brcmf_sdmmc_driver);
628 634
629 if (ret) 635 if (ret)
630 brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret); 636 brcmf_err("sdio_register_driver failed: %d\n", ret);
631 637
632 return ret; 638 return ret;
633} 639}
@@ -657,7 +663,7 @@ void brcmf_sdio_init(void)
657 ret = platform_driver_register(&brcmf_sdio_pd); 663 ret = platform_driver_register(&brcmf_sdio_pd);
658 664
659 if (ret) 665 if (ret)
660 brcmf_dbg(ERROR, "platform_driver_register failed: %d\n", ret); 666 brcmf_err("platform_driver_register failed: %d\n", ret);
661} 667}
662#else 668#else
663void brcmf_sdio_exit(void) 669void brcmf_sdio_exit(void)
@@ -676,6 +682,6 @@ void brcmf_sdio_init(void)
676 ret = sdio_register_driver(&brcmf_sdmmc_driver); 682 ret = sdio_register_driver(&brcmf_sdmmc_driver);
677 683
678 if (ret) 684 if (ret)
679 brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret); 685 brcmf_err("sdio_register_driver failed: %d\n", ret);
680} 686}
681#endif /* CONFIG_BRCMFMAC_SDIO_OOB */ 687#endif /* CONFIG_BRCMFMAC_SDIO_OOB */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 17e7ae73e008..fd672bf53867 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -23,6 +23,8 @@
23 23
24#define BRCMF_VERSION_STR "4.218.248.5" 24#define BRCMF_VERSION_STR "4.218.248.5"
25 25
26#include "fweh.h"
27
26/******************************************************************************* 28/*******************************************************************************
27 * IO codes that are interpreted by dongle firmware 29 * IO codes that are interpreted by dongle firmware
28 ******************************************************************************/ 30 ******************************************************************************/
@@ -38,8 +40,11 @@
38#define BRCMF_C_GET_SSID 25 40#define BRCMF_C_GET_SSID 25
39#define BRCMF_C_SET_SSID 26 41#define BRCMF_C_SET_SSID 26
40#define BRCMF_C_GET_CHANNEL 29 42#define BRCMF_C_GET_CHANNEL 29
43#define BRCMF_C_SET_CHANNEL 30
41#define BRCMF_C_GET_SRL 31 44#define BRCMF_C_GET_SRL 31
45#define BRCMF_C_SET_SRL 32
42#define BRCMF_C_GET_LRL 33 46#define BRCMF_C_GET_LRL 33
47#define BRCMF_C_SET_LRL 34
43#define BRCMF_C_GET_RADIO 37 48#define BRCMF_C_GET_RADIO 37
44#define BRCMF_C_SET_RADIO 38 49#define BRCMF_C_SET_RADIO 38
45#define BRCMF_C_GET_PHYTYPE 39 50#define BRCMF_C_GET_PHYTYPE 39
@@ -58,6 +63,7 @@
58#define BRCMF_C_SET_COUNTRY 84 63#define BRCMF_C_SET_COUNTRY 84
59#define BRCMF_C_GET_PM 85 64#define BRCMF_C_GET_PM 85
60#define BRCMF_C_SET_PM 86 65#define BRCMF_C_SET_PM 86
66#define BRCMF_C_GET_CURR_RATESET 114
61#define BRCMF_C_GET_AP 117 67#define BRCMF_C_GET_AP 117
62#define BRCMF_C_SET_AP 118 68#define BRCMF_C_SET_AP 118
63#define BRCMF_C_GET_RSSI 127 69#define BRCMF_C_GET_RSSI 127
@@ -65,6 +71,7 @@
65#define BRCMF_C_SET_WSEC 134 71#define BRCMF_C_SET_WSEC 134
66#define BRCMF_C_GET_PHY_NOISE 135 72#define BRCMF_C_GET_PHY_NOISE 135
67#define BRCMF_C_GET_BSS_INFO 136 73#define BRCMF_C_GET_BSS_INFO 136
74#define BRCMF_C_GET_PHYLIST 180
68#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185 75#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
69#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187 76#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
70#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201 77#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
@@ -100,29 +107,8 @@
100#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff 107#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
101#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16 108#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
102 109
103#define BRCMF_SCAN_ACTION_START 1
104#define BRCMF_SCAN_ACTION_CONTINUE 2
105#define WL_SCAN_ACTION_ABORT 3
106
107#define BRCMF_ISCAN_REQ_VERSION 1
108
109/* brcmf_iscan_results status values */
110#define BRCMF_SCAN_RESULTS_SUCCESS 0
111#define BRCMF_SCAN_RESULTS_PARTIAL 1
112#define BRCMF_SCAN_RESULTS_PENDING 2
113#define BRCMF_SCAN_RESULTS_ABORTED 3
114#define BRCMF_SCAN_RESULTS_NO_MEM 4
115
116/* Indicates this key is using soft encrypt */
117#define WL_SOFT_KEY (1 << 0)
118/* primary (ie tx) key */ 110/* primary (ie tx) key */
119#define BRCMF_PRIMARY_KEY (1 << 1) 111#define BRCMF_PRIMARY_KEY (1 << 1)
120/* Reserved for backward compat */
121#define WL_KF_RES_4 (1 << 4)
122/* Reserved for backward compat */
123#define WL_KF_RES_5 (1 << 5)
124/* Indicates a group key for a IBSS PEER */
125#define WL_IBSS_PEER_GROUP_KEY (1 << 6)
126 112
127/* For supporting multiple interfaces */ 113/* For supporting multiple interfaces */
128#define BRCMF_MAX_IFS 16 114#define BRCMF_MAX_IFS 16
@@ -130,10 +116,6 @@
130#define DOT11_BSSTYPE_ANY 2 116#define DOT11_BSSTYPE_ANY 2
131#define DOT11_MAX_DEFAULT_KEYS 4 117#define DOT11_MAX_DEFAULT_KEYS 4
132 118
133#define BRCMF_EVENT_MSG_LINK 0x01
134#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
135#define BRCMF_EVENT_MSG_GROUP 0x04
136
137#define BRCMF_ESCAN_REQ_VERSION 1 119#define BRCMF_ESCAN_REQ_VERSION 1
138 120
139#define WLC_BSS_RSSI_ON_CHANNEL 0x0002 121#define WLC_BSS_RSSI_ON_CHANNEL 0x0002
@@ -141,108 +123,6 @@
141#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */ 123#define BRCMF_MAXRATES_IN_SET 16 /* max # of rates in rateset */
142#define BRCMF_STA_ASSOC 0x10 /* Associated */ 124#define BRCMF_STA_ASSOC 0x10 /* Associated */
143 125
144struct brcmf_event_msg {
145 __be16 version;
146 __be16 flags;
147 __be32 event_type;
148 __be32 status;
149 __be32 reason;
150 __be32 auth_type;
151 __be32 datalen;
152 u8 addr[ETH_ALEN];
153 char ifname[IFNAMSIZ];
154 u8 ifidx;
155 u8 bsscfgidx;
156} __packed;
157
158struct brcm_ethhdr {
159 u16 subtype;
160 u16 length;
161 u8 version;
162 u8 oui[3];
163 u16 usr_subtype;
164} __packed;
165
166struct brcmf_event {
167 struct ethhdr eth;
168 struct brcm_ethhdr hdr;
169 struct brcmf_event_msg msg;
170} __packed;
171
172/* event codes sent by the dongle to this driver */
173#define BRCMF_E_SET_SSID 0
174#define BRCMF_E_JOIN 1
175#define BRCMF_E_START 2
176#define BRCMF_E_AUTH 3
177#define BRCMF_E_AUTH_IND 4
178#define BRCMF_E_DEAUTH 5
179#define BRCMF_E_DEAUTH_IND 6
180#define BRCMF_E_ASSOC 7
181#define BRCMF_E_ASSOC_IND 8
182#define BRCMF_E_REASSOC 9
183#define BRCMF_E_REASSOC_IND 10
184#define BRCMF_E_DISASSOC 11
185#define BRCMF_E_DISASSOC_IND 12
186#define BRCMF_E_QUIET_START 13
187#define BRCMF_E_QUIET_END 14
188#define BRCMF_E_BEACON_RX 15
189#define BRCMF_E_LINK 16
190#define BRCMF_E_MIC_ERROR 17
191#define BRCMF_E_NDIS_LINK 18
192#define BRCMF_E_ROAM 19
193#define BRCMF_E_TXFAIL 20
194#define BRCMF_E_PMKID_CACHE 21
195#define BRCMF_E_RETROGRADE_TSF 22
196#define BRCMF_E_PRUNE 23
197#define BRCMF_E_AUTOAUTH 24
198#define BRCMF_E_EAPOL_MSG 25
199#define BRCMF_E_SCAN_COMPLETE 26
200#define BRCMF_E_ADDTS_IND 27
201#define BRCMF_E_DELTS_IND 28
202#define BRCMF_E_BCNSENT_IND 29
203#define BRCMF_E_BCNRX_MSG 30
204#define BRCMF_E_BCNLOST_MSG 31
205#define BRCMF_E_ROAM_PREP 32
206#define BRCMF_E_PFN_NET_FOUND 33
207#define BRCMF_E_PFN_NET_LOST 34
208#define BRCMF_E_RESET_COMPLETE 35
209#define BRCMF_E_JOIN_START 36
210#define BRCMF_E_ROAM_START 37
211#define BRCMF_E_ASSOC_START 38
212#define BRCMF_E_IBSS_ASSOC 39
213#define BRCMF_E_RADIO 40
214#define BRCMF_E_PSM_WATCHDOG 41
215#define BRCMF_E_PROBREQ_MSG 44
216#define BRCMF_E_SCAN_CONFIRM_IND 45
217#define BRCMF_E_PSK_SUP 46
218#define BRCMF_E_COUNTRY_CODE_CHANGED 47
219#define BRCMF_E_EXCEEDED_MEDIUM_TIME 48
220#define BRCMF_E_ICV_ERROR 49
221#define BRCMF_E_UNICAST_DECODE_ERROR 50
222#define BRCMF_E_MULTICAST_DECODE_ERROR 51
223#define BRCMF_E_TRACE 52
224#define BRCMF_E_IF 54
225#define BRCMF_E_RSSI 56
226#define BRCMF_E_PFN_SCAN_COMPLETE 57
227#define BRCMF_E_EXTLOG_MSG 58
228#define BRCMF_E_ACTION_FRAME 59
229#define BRCMF_E_ACTION_FRAME_COMPLETE 60
230#define BRCMF_E_PRE_ASSOC_IND 61
231#define BRCMF_E_PRE_REASSOC_IND 62
232#define BRCMF_E_CHANNEL_ADOPTED 63
233#define BRCMF_E_AP_STARTED 64
234#define BRCMF_E_DFS_AP_STOP 65
235#define BRCMF_E_DFS_AP_RESUME 66
236#define BRCMF_E_RESERVED1 67
237#define BRCMF_E_RESERVED2 68
238#define BRCMF_E_ESCAN_RESULT 69
239#define BRCMF_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70
240#define BRCMF_E_DCS_REQUEST 73
241
242#define BRCMF_E_FIFO_CREDIT_MAP 74
243
244#define BRCMF_E_LAST 75
245
246#define BRCMF_E_STATUS_SUCCESS 0 126#define BRCMF_E_STATUS_SUCCESS 0
247#define BRCMF_E_STATUS_FAIL 1 127#define BRCMF_E_STATUS_FAIL 1
248#define BRCMF_E_STATUS_TIMEOUT 2 128#define BRCMF_E_STATUS_TIMEOUT 2
@@ -318,6 +198,12 @@ struct brcmf_event {
318#define BRCMF_E_LINK_ASSOC_REC 3 198#define BRCMF_E_LINK_ASSOC_REC 3
319#define BRCMF_E_LINK_BSSCFG_DIS 4 199#define BRCMF_E_LINK_BSSCFG_DIS 4
320 200
201/* Small, medium and maximum buffer size for dcmd
202 */
203#define BRCMF_DCMD_SMLEN 256
204#define BRCMF_DCMD_MEDLEN 1536
205#define BRCMF_DCMD_MAXLEN 8192
206
321/* Pattern matching filter. Specifies an offset within received packets to 207/* Pattern matching filter. Specifies an offset within received packets to
322 * start matching, the pattern to match, the size of the pattern, and a bitmask 208 * start matching, the pattern to match, the size of the pattern, and a bitmask
323 * that indicates which bits within the pattern should be matched. 209 * that indicates which bits within the pattern should be matched.
@@ -397,7 +283,7 @@ struct brcm_rateset_le {
397 /* # rates in this set */ 283 /* # rates in this set */
398 __le32 count; 284 __le32 count;
399 /* rates in 500kbps units w/hi bit set if basic */ 285 /* rates in 500kbps units w/hi bit set if basic */
400 u8 rates[WL_NUMRATES]; 286 u8 rates[BRCMF_MAXRATES_IN_SET];
401}; 287};
402 288
403struct brcmf_ssid { 289struct brcmf_ssid {
@@ -446,14 +332,6 @@ struct brcmf_scan_params_le {
446 __le16 channel_list[1]; /* list of chanspecs */ 332 __le16 channel_list[1]; /* list of chanspecs */
447}; 333};
448 334
449/* incremental scan struct */
450struct brcmf_iscan_params_le {
451 __le32 version;
452 __le16 action;
453 __le16 scan_duration;
454 struct brcmf_scan_params_le params_le;
455};
456
457struct brcmf_scan_results { 335struct brcmf_scan_results {
458 u32 buflen; 336 u32 buflen;
459 u32 version; 337 u32 version;
@@ -461,12 +339,6 @@ struct brcmf_scan_results {
461 struct brcmf_bss_info_le bss_info_le[]; 339 struct brcmf_bss_info_le bss_info_le[];
462}; 340};
463 341
464struct brcmf_scan_results_le {
465 __le32 buflen;
466 __le32 version;
467 __le32 count;
468};
469
470struct brcmf_escan_params_le { 342struct brcmf_escan_params_le {
471 __le32 version; 343 __le32 version;
472 __le16 action; 344 __le16 action;
@@ -502,23 +374,6 @@ struct brcmf_join_params {
502 struct brcmf_assoc_params_le params_le; 374 struct brcmf_assoc_params_le params_le;
503}; 375};
504 376
505/* incremental scan results struct */
506struct brcmf_iscan_results {
507 union {
508 u32 status;
509 __le32 status_le;
510 };
511 union {
512 struct brcmf_scan_results results;
513 struct brcmf_scan_results_le results_le;
514 };
515};
516
517/* size of brcmf_iscan_results not including variable length array */
518#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
519 (sizeof(struct brcmf_scan_results) + \
520 offsetof(struct brcmf_iscan_results, results))
521
522struct brcmf_wsec_key { 377struct brcmf_wsec_key {
523 u32 index; /* key index */ 378 u32 index; /* key index */
524 u32 len; /* key length */ 379 u32 len; /* key length */
@@ -615,7 +470,6 @@ struct brcmf_pub {
615 struct brcmf_bus *bus_if; 470 struct brcmf_bus *bus_if;
616 struct brcmf_proto *prot; 471 struct brcmf_proto *prot;
617 struct brcmf_cfg80211_info *config; 472 struct brcmf_cfg80211_info *config;
618 struct device *dev; /* fullmac dongle device pointer */
619 473
620 /* Internal brcmf items */ 474 /* Internal brcmf items */
621 uint hdrlen; /* Total BRCMF header length (proto + bus) */ 475 uint hdrlen; /* Total BRCMF header length (proto + bus) */
@@ -623,7 +477,6 @@ struct brcmf_pub {
623 u8 wme_dp; /* wme discard priority */ 477 u8 wme_dp; /* wme discard priority */
624 478
625 /* Dongle media info */ 479 /* Dongle media info */
626 bool iswl; /* Dongle-resident driver is wl */
627 unsigned long drv_version; /* Version of dongle-resident driver */ 480 unsigned long drv_version; /* Version of dongle-resident driver */
628 u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */ 481 u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
629 482
@@ -651,26 +504,26 @@ struct brcmf_pub {
651 int in_suspend; /* flag set to 1 when early suspend called */ 504 int in_suspend; /* flag set to 1 when early suspend called */
652 int dtim_skip; /* dtim skip , default 0 means wake each dtim */ 505 int dtim_skip; /* dtim skip , default 0 means wake each dtim */
653 506
654 /* Pkt filter defination */
655 char *pktfilter[100];
656 int pktfilter_count;
657
658 u8 country_code[BRCM_CNTRY_BUF_SZ];
659 char eventmask[BRCMF_EVENTING_MASK_LEN];
660
661 struct brcmf_if *iflist[BRCMF_MAX_IFS]; 507 struct brcmf_if *iflist[BRCMF_MAX_IFS];
662 508
663 struct mutex proto_block; 509 struct mutex proto_block;
510 unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
664 511
665 struct work_struct setmacaddr_work;
666 struct work_struct multicast_work;
667 u8 macvalue[ETH_ALEN]; 512 u8 macvalue[ETH_ALEN];
668 atomic_t pend_8021x_cnt; 513 atomic_t pend_8021x_cnt;
514 wait_queue_head_t pend_8021x_wait;
515
516 struct brcmf_fweh_info fweh;
669#ifdef DEBUG 517#ifdef DEBUG
670 struct dentry *dbgfs_dir; 518 struct dentry *dbgfs_dir;
671#endif 519#endif
672}; 520};
673 521
522struct bcmevent_name {
523 uint event;
524 const char *name;
525};
526
674struct brcmf_if_event { 527struct brcmf_if_event {
675 u8 ifidx; 528 u8 ifidx;
676 u8 action; 529 u8 action;
@@ -678,47 +531,54 @@ struct brcmf_if_event {
678 u8 bssidx; 531 u8 bssidx;
679}; 532};
680 533
681struct bcmevent_name { 534/* forward declaration */
682 uint event; 535struct brcmf_cfg80211_vif;
683 const char *name; 536
537/**
538 * struct brcmf_if - interface control information.
539 *
540 * @drvr: points to device related information.
541 * @vif: points to cfg80211 specific interface information.
542 * @ndev: associated network device.
543 * @stats: interface specific network statistics.
544 * @idx: interface index in device firmware.
545 * @bssidx: index of bss associated with this interface.
546 * @mac_addr: assigned mac address.
547 */
548struct brcmf_if {
549 struct brcmf_pub *drvr;
550 struct brcmf_cfg80211_vif *vif;
551 struct net_device *ndev;
552 struct net_device_stats stats;
553 struct work_struct setmacaddr_work;
554 struct work_struct multicast_work;
555 int idx;
556 s32 bssidx;
557 u8 mac_addr[ETH_ALEN];
684}; 558};
685 559
686extern const struct bcmevent_name bcmevent_names[]; 560static inline s32 brcmf_ndev_bssidx(struct net_device *ndev)
561{
562 struct brcmf_if *ifp = netdev_priv(ndev);
563 return ifp->bssidx;
564}
687 565
688extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen, 566extern const struct bcmevent_name bcmevent_names[];
689 char *buf, uint len);
690extern uint brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
691 char *buf, uint buflen, s32 bssidx);
692 567
693extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 568extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
694 569
695extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
696extern int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd);
697
698/* Return pointer to interface name */ 570/* Return pointer to interface name */
699extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx); 571extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
700 572
701/* Query dongle */ 573/* Query dongle */
702extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, 574extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
703 uint cmd, void *buf, uint len); 575 uint cmd, void *buf, uint len);
576extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
577 void *buf, uint len);
704 578
705#ifdef DEBUG 579extern int brcmf_net_attach(struct brcmf_if *ifp);
706extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size); 580extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx,
707#endif /* DEBUG */ 581 s32 bssidx, char *name, u8 *mac_addr);
708
709extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
710extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
711 void *pktdata, struct brcmf_event_msg *,
712 void **data_ptr);
713
714extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx); 582extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
715 583
716extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
717extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
718 int enable, int master_mode);
719
720#define BRCMF_DCMD_SMLEN 256 /* "small" cmd buffer required */
721#define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */
722#define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */
723
724#endif /* _BRCMF_H_ */ 584#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 9b8ee19ea55d..dd38b78a9726 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -43,37 +43,90 @@ struct brcmf_bus_dcmd {
43 struct list_head list; 43 struct list_head list;
44}; 44};
45 45
46/* interface structure between common and bus layer */ 46/**
47 * struct brcmf_bus_ops - bus callback operations.
48 *
49 * @init: prepare for communication with dongle.
50 * @stop: clear pending frames, disable data flow.
51 * @txdata: send a data frame to the dongle (callee disposes skb).
52 * @txctl: transmit a control request message to dongle.
53 * @rxctl: receive a control response message from dongle.
54 *
55 * This structure provides an abstract interface towards the
56 * bus specific driver. For control messages to common driver
57 * will assure there is only one active transaction.
58 */
59struct brcmf_bus_ops {
60 int (*init)(struct device *dev);
61 void (*stop)(struct device *dev);
62 int (*txdata)(struct device *dev, struct sk_buff *skb);
63 int (*txctl)(struct device *dev, unsigned char *msg, uint len);
64 int (*rxctl)(struct device *dev, unsigned char *msg, uint len);
65};
66
67/**
68 * struct brcmf_bus - interface structure between common and bus layer
69 *
70 * @bus_priv: pointer to private bus device.
71 * @dev: device pointer of bus device.
72 * @drvr: public driver information.
73 * @state: operational state of the bus interface.
74 * @maxctl: maximum size for rxctl request message.
75 * @drvr_up: indicates driver up/down status.
76 * @tx_realloc: number of tx packets realloced for headroom.
77 * @dstats: dongle-based statistical data.
78 * @align: alignment requirement for the bus.
79 * @dcmd_list: bus/device specific dongle initialization commands.
80 */
47struct brcmf_bus { 81struct brcmf_bus {
48 u8 type; /* bus type */
49 union { 82 union {
50 struct brcmf_sdio_dev *sdio; 83 struct brcmf_sdio_dev *sdio;
51 struct brcmf_usbdev *usb; 84 struct brcmf_usbdev *usb;
52 } bus_priv; 85 } bus_priv;
53 struct brcmf_pub *drvr; /* pointer to driver pub structure brcmf_pub */ 86 struct device *dev;
87 struct brcmf_pub *drvr;
54 enum brcmf_bus_state state; 88 enum brcmf_bus_state state;
55 uint maxctl; /* Max size rxctl request from proto to bus */ 89 uint maxctl;
56 bool drvr_up; /* Status flag of driver up/down */ 90 bool drvr_up;
57 unsigned long tx_realloc; /* Tx packets realloced for headroom */ 91 unsigned long tx_realloc;
58 struct dngl_stats dstats; /* Stats for dongle-based data */ 92 struct dngl_stats dstats;
59 u8 align; /* bus alignment requirement */ 93 u8 align;
60 struct list_head dcmd_list; 94 struct list_head dcmd_list;
61 95
62 /* interface functions pointers */ 96 struct brcmf_bus_ops *ops;
63 /* Stop bus module: clear pending frames, disable data flow */
64 void (*brcmf_bus_stop)(struct device *);
65 /* Initialize bus module: prepare for communication w/dongle */
66 int (*brcmf_bus_init)(struct device *);
67 /* Send a data frame to the dongle. Callee disposes of txp. */
68 int (*brcmf_bus_txdata)(struct device *, struct sk_buff *);
69 /* Send/receive a control message to/from the dongle.
70 * Expects caller to enforce a single outstanding transaction.
71 */
72 int (*brcmf_bus_txctl)(struct device *, unsigned char *, uint);
73 int (*brcmf_bus_rxctl)(struct device *, unsigned char *, uint);
74}; 97};
75 98
76/* 99/*
100 * callback wrappers
101 */
102static inline int brcmf_bus_init(struct brcmf_bus *bus)
103{
104 return bus->ops->init(bus->dev);
105}
106
107static inline void brcmf_bus_stop(struct brcmf_bus *bus)
108{
109 bus->ops->stop(bus->dev);
110}
111
112static inline int brcmf_bus_txdata(struct brcmf_bus *bus, struct sk_buff *skb)
113{
114 return bus->ops->txdata(bus->dev, skb);
115}
116
117static inline
118int brcmf_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
119{
120 return bus->ops->txctl(bus->dev, msg, len);
121}
122
123static inline
124int brcmf_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint len)
125{
126 return bus->ops->rxctl(bus->dev, msg, len);
127}
128
129/*
77 * interface functions from common layer 130 * interface functions from common layer
78 */ 131 */
79 132
@@ -85,7 +138,7 @@ extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
85 struct sk_buff *pkt, int prec); 138 struct sk_buff *pkt, int prec);
86 139
87/* Receive frame for delivery to OS. Callee disposes of rxp. */ 140/* Receive frame for delivery to OS. Callee disposes of rxp. */
88extern void brcmf_rx_frame(struct device *dev, int ifidx, 141extern void brcmf_rx_frame(struct device *dev, u8 ifidx,
89 struct sk_buff_head *rxlist); 142 struct sk_buff_head *rxlist);
90static inline void brcmf_rx_packet(struct device *dev, int ifidx, 143static inline void brcmf_rx_packet(struct device *dev, int ifidx,
91 struct sk_buff *pkt) 144 struct sk_buff *pkt)
@@ -111,9 +164,6 @@ extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
111 164
112extern int brcmf_bus_start(struct device *dev); 165extern int brcmf_bus_start(struct device *dev);
113 166
114extern int brcmf_add_if(struct device *dev, int ifidx,
115 char *name, u8 *mac_addr);
116
117#ifdef CONFIG_BRCMFMAC_SDIO 167#ifdef CONFIG_BRCMFMAC_SDIO
118extern void brcmf_sdio_exit(void); 168extern void brcmf_sdio_exit(void);
119extern void brcmf_sdio_init(void); 169extern void brcmf_sdio_init(void);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index a5c15cac5e7d..83923553f1ac 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -23,8 +23,6 @@
23 23
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/sched.h>
27#include <defs.h>
28 26
29#include <brcmu_utils.h> 27#include <brcmu_utils.h>
30#include <brcmu_wifi.h> 28#include <brcmu_wifi.h>
@@ -119,9 +117,7 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
119 len = CDC_MAX_MSG_SIZE; 117 len = CDC_MAX_MSG_SIZE;
120 118
121 /* Send request */ 119 /* Send request */
122 return drvr->bus_if->brcmf_bus_txctl(drvr->dev, 120 return brcmf_bus_txctl(drvr->bus_if, (unsigned char *)&prot->msg, len);
123 (unsigned char *)&prot->msg,
124 len);
125} 121}
126 122
127static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len) 123static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
@@ -130,11 +126,10 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
130 struct brcmf_proto *prot = drvr->prot; 126 struct brcmf_proto *prot = drvr->prot;
131 127
132 brcmf_dbg(TRACE, "Enter\n"); 128 brcmf_dbg(TRACE, "Enter\n");
133 129 len += sizeof(struct brcmf_proto_cdc_dcmd);
134 do { 130 do {
135 ret = drvr->bus_if->brcmf_bus_rxctl(drvr->dev, 131 ret = brcmf_bus_rxctl(drvr->bus_if, (unsigned char *)&prot->msg,
136 (unsigned char *)&prot->msg, 132 len);
137 len + sizeof(struct brcmf_proto_cdc_dcmd));
138 if (ret < 0) 133 if (ret < 0)
139 break; 134 break;
140 } while (CDC_DCMD_ID(le32_to_cpu(prot->msg.flags)) != id); 135 } while (CDC_DCMD_ID(le32_to_cpu(prot->msg.flags)) != id);
@@ -181,7 +176,7 @@ brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
181 176
182 ret = brcmf_proto_cdc_msg(drvr); 177 ret = brcmf_proto_cdc_msg(drvr);
183 if (ret < 0) { 178 if (ret < 0) {
184 brcmf_dbg(ERROR, "brcmf_proto_cdc_msg failed w/status %d\n", 179 brcmf_err("brcmf_proto_cdc_msg failed w/status %d\n",
185 ret); 180 ret);
186 goto done; 181 goto done;
187 } 182 }
@@ -198,7 +193,7 @@ retry:
198 if ((id < prot->reqid) && (++retries < RETRIES)) 193 if ((id < prot->reqid) && (++retries < RETRIES))
199 goto retry; 194 goto retry;
200 if (id != prot->reqid) { 195 if (id != prot->reqid) {
201 brcmf_dbg(ERROR, "%s: unexpected request id %d (expected %d)\n", 196 brcmf_err("%s: unexpected request id %d (expected %d)\n",
202 brcmf_ifname(drvr, ifidx), id, prot->reqid); 197 brcmf_ifname(drvr, ifidx), id, prot->reqid);
203 ret = -EINVAL; 198 ret = -EINVAL;
204 goto done; 199 goto done;
@@ -260,7 +255,7 @@ int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
260 id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT; 255 id = (flags & CDC_DCMD_ID_MASK) >> CDC_DCMD_ID_SHIFT;
261 256
262 if (id != prot->reqid) { 257 if (id != prot->reqid) {
263 brcmf_dbg(ERROR, "%s: unexpected request id %d (expected %d)\n", 258 brcmf_err("%s: unexpected request id %d (expected %d)\n",
264 brcmf_ifname(drvr, ifidx), id, prot->reqid); 259 brcmf_ifname(drvr, ifidx), id, prot->reqid);
265 ret = -EINVAL; 260 ret = -EINVAL;
266 goto done; 261 goto done;
@@ -277,76 +272,6 @@ done:
277 return ret; 272 return ret;
278} 273}
279 274
280int
281brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
282 int len)
283{
284 struct brcmf_proto *prot = drvr->prot;
285 int ret = -1;
286
287 if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
288 brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
289 return ret;
290 }
291 mutex_lock(&drvr->proto_block);
292
293 brcmf_dbg(TRACE, "Enter\n");
294
295 if (len > BRCMF_DCMD_MAXLEN)
296 goto done;
297
298 if (prot->pending == true) {
299 brcmf_dbg(TRACE, "CDC packet is pending!!!! cmd=0x%x (%lu) lastcmd=0x%x (%lu)\n",
300 dcmd->cmd, (unsigned long)dcmd->cmd, prot->lastcmd,
301 (unsigned long)prot->lastcmd);
302 if (dcmd->cmd == BRCMF_C_SET_VAR ||
303 dcmd->cmd == BRCMF_C_GET_VAR)
304 brcmf_dbg(TRACE, "iovar cmd=%s\n", (char *)dcmd->buf);
305
306 goto done;
307 }
308
309 prot->pending = true;
310 prot->lastcmd = dcmd->cmd;
311 if (dcmd->set)
312 ret = brcmf_proto_cdc_set_dcmd(drvr, ifidx, dcmd->cmd,
313 dcmd->buf, len);
314 else {
315 ret = brcmf_proto_cdc_query_dcmd(drvr, ifidx, dcmd->cmd,
316 dcmd->buf, len);
317 if (ret > 0)
318 dcmd->used = ret -
319 sizeof(struct brcmf_proto_cdc_dcmd);
320 }
321
322 if (ret >= 0)
323 ret = 0;
324 else {
325 struct brcmf_proto_cdc_dcmd *msg = &prot->msg;
326 /* len == needed when set/query fails from dongle */
327 dcmd->needed = le32_to_cpu(msg->len);
328 }
329
330 /* Intercept the wme_dp dongle cmd here */
331 if (!ret && dcmd->cmd == BRCMF_C_SET_VAR &&
332 !strcmp(dcmd->buf, "wme_dp")) {
333 int slen;
334 __le32 val = 0;
335
336 slen = strlen("wme_dp") + 1;
337 if (len >= (int)(slen + sizeof(int)))
338 memcpy(&val, (char *)dcmd->buf + slen, sizeof(int));
339 drvr->wme_dp = (u8) le32_to_cpu(val);
340 }
341
342 prot->pending = false;
343
344done:
345 mutex_unlock(&drvr->proto_block);
346
347 return ret;
348}
349
350static bool pkt_sum_needed(struct sk_buff *skb) 275static bool pkt_sum_needed(struct sk_buff *skb)
351{ 276{
352 return skb->ip_summed == CHECKSUM_PARTIAL; 277 return skb->ip_summed == CHECKSUM_PARTIAL;
@@ -392,7 +317,7 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
392 /* Pop BDC header used to convey priority for buses that don't */ 317 /* Pop BDC header used to convey priority for buses that don't */
393 318
394 if (pktbuf->len < BDC_HEADER_LEN) { 319 if (pktbuf->len < BDC_HEADER_LEN) {
395 brcmf_dbg(ERROR, "rx data too short (%d < %d)\n", 320 brcmf_err("rx data too short (%d < %d)\n",
396 pktbuf->len, BDC_HEADER_LEN); 321 pktbuf->len, BDC_HEADER_LEN);
397 return -EBADE; 322 return -EBADE;
398 } 323 }
@@ -401,13 +326,13 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
401 326
402 *ifidx = BDC_GET_IF_IDX(h); 327 *ifidx = BDC_GET_IF_IDX(h);
403 if (*ifidx >= BRCMF_MAX_IFS) { 328 if (*ifidx >= BRCMF_MAX_IFS) {
404 brcmf_dbg(ERROR, "rx data ifnum out of range (%d)\n", *ifidx); 329 brcmf_err("rx data ifnum out of range (%d)\n", *ifidx);
405 return -EBADE; 330 return -EBADE;
406 } 331 }
407 332
408 if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) != 333 if (((h->flags & BDC_FLAG_VER_MASK) >> BDC_FLAG_VER_SHIFT) !=
409 BDC_PROTO_VER) { 334 BDC_PROTO_VER) {
410 brcmf_dbg(ERROR, "%s: non-BDC packet received, flags 0x%x\n", 335 brcmf_err("%s: non-BDC packet received, flags 0x%x\n",
411 brcmf_ifname(drvr, *ifidx), h->flags); 336 brcmf_ifname(drvr, *ifidx), h->flags);
412 return -EBADE; 337 return -EBADE;
413 } 338 }
@@ -436,7 +361,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
436 361
437 /* ensure that the msg buf directly follows the cdc msg struct */ 362 /* ensure that the msg buf directly follows the cdc msg struct */
438 if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) { 363 if ((unsigned long)(&cdc->msg + 1) != (unsigned long)cdc->buf) {
439 brcmf_dbg(ERROR, "struct brcmf_proto is not correctly defined\n"); 364 brcmf_err("struct brcmf_proto is not correctly defined\n");
440 goto fail; 365 goto fail;
441 } 366 }
442 367
@@ -458,35 +383,6 @@ void brcmf_proto_detach(struct brcmf_pub *drvr)
458 drvr->prot = NULL; 383 drvr->prot = NULL;
459} 384}
460 385
461int brcmf_proto_init(struct brcmf_pub *drvr)
462{
463 int ret = 0;
464 char buf[128];
465
466 brcmf_dbg(TRACE, "Enter\n");
467
468 mutex_lock(&drvr->proto_block);
469
470 /* Get the device MAC address */
471 strcpy(buf, "cur_etheraddr");
472 ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
473 buf, sizeof(buf));
474 if (ret < 0) {
475 mutex_unlock(&drvr->proto_block);
476 return ret;
477 }
478 memcpy(drvr->mac, buf, ETH_ALEN);
479
480 mutex_unlock(&drvr->proto_block);
481
482 ret = brcmf_c_preinit_dcmds(drvr);
483
484 /* Always assumes wl for now */
485 drvr->iswl = true;
486
487 return ret;
488}
489
490void brcmf_proto_stop(struct brcmf_pub *drvr) 386void brcmf_proto_stop(struct brcmf_pub *drvr)
491{ 387{
492 /* Nothing to do for CDC */ 388 /* Nothing to do for CDC */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 15c5db5752d1..f8b52e5b941a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -18,28 +18,21 @@
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/sched.h>
22#include <linux/netdevice.h> 21#include <linux/netdevice.h>
23#include <asm/unaligned.h>
24#include <defs.h>
25#include <brcmu_wifi.h> 22#include <brcmu_wifi.h>
26#include <brcmu_utils.h> 23#include <brcmu_utils.h>
27#include "dhd.h" 24#include "dhd.h"
28#include "dhd_bus.h" 25#include "dhd_bus.h"
29#include "dhd_proto.h" 26#include "dhd_proto.h"
30#include "dhd_dbg.h" 27#include "dhd_dbg.h"
28#include "fwil.h"
31 29
32#define BRCM_OUI "\x00\x10\x18" 30#define PKTFILTER_BUF_SIZE 128
33#define DOT11_OUI_LEN 3
34#define BCMILCP_BCM_SUBTYPE_EVENT 1
35#define PKTFILTER_BUF_SIZE 2048
36#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */ 31#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
37 32#define BRCMF_DEFAULT_BCN_TIMEOUT 3
38#define MSGTRACE_VERSION 1 33#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
39 34#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
40#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u) 35#define BRCMF_DEFAULT_PACKET_FILTER "100 0 0 0 0x01 0x00"
41#define BRCMF_PKT_FILTER_PATTERN_FIXED_LEN \
42 offsetof(struct brcmf_pkt_filter_pattern_le, mask_and_pattern)
43 36
44#ifdef DEBUG 37#ifdef DEBUG
45static const char brcmf_version[] = 38static const char brcmf_version[] =
@@ -50,89 +43,6 @@ static const char brcmf_version[] =
50 "Dongle Host Driver, version " BRCMF_VERSION_STR; 43 "Dongle Host Driver, version " BRCMF_VERSION_STR;
51#endif 44#endif
52 45
53/* Message trace header */
54struct msgtrace_hdr {
55 u8 version;
56 u8 spare;
57 __be16 len; /* Len of the trace */
58 __be32 seqnum; /* Sequence number of message. Useful
59 * if the messsage has been lost
60 * because of DMA error or a bus reset
61 * (ex: SDIO Func2)
62 */
63 __be32 discarded_bytes; /* Number of discarded bytes because of
64 trace overflow */
65 __be32 discarded_printf; /* Number of discarded printf
66 because of trace overflow */
67} __packed;
68
69
70uint
71brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
72{
73 uint len;
74
75 len = strlen(name) + 1;
76
77 if ((len + datalen) > buflen)
78 return 0;
79
80 strncpy(buf, name, buflen);
81
82 /* append data onto the end of the name string */
83 if (data && datalen) {
84 memcpy(&buf[len], data, datalen);
85 len += datalen;
86 }
87
88 return len;
89}
90
91uint
92brcmf_c_mkiovar_bsscfg(char *name, char *data, uint datalen,
93 char *buf, uint buflen, s32 bssidx)
94{
95 const s8 *prefix = "bsscfg:";
96 s8 *p;
97 u32 prefixlen;
98 u32 namelen;
99 u32 iolen;
100 __le32 bssidx_le;
101
102 if (bssidx == 0)
103 return brcmf_c_mkiovar(name, data, datalen, buf, buflen);
104
105 prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
106 namelen = (u32) strlen(name) + 1; /* lengh of iovar name + null */
107 iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
108
109 if (buflen < 0 || iolen > (u32)buflen) {
110 brcmf_dbg(ERROR, "buffer is too short\n");
111 return 0;
112 }
113
114 p = buf;
115
116 /* copy prefix, no null */
117 memcpy(p, prefix, prefixlen);
118 p += prefixlen;
119
120 /* copy iovar name including null */
121 memcpy(p, name, namelen);
122 p += namelen;
123
124 /* bss config index as first data */
125 bssidx_le = cpu_to_le32(bssidx);
126 memcpy(p, &bssidx_le, sizeof(bssidx_le));
127 p += sizeof(bssidx_le);
128
129 /* parameter buffer follows */
130 if (datalen)
131 memcpy(p, data, datalen);
132
133 return iolen;
134
135}
136 46
137bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, 47bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
138 struct sk_buff *pkt, int prec) 48 struct sk_buff *pkt, int prec)
@@ -170,7 +80,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
170 p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) : 80 p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
171 brcmu_pktq_pdeq_tail(q, eprec); 81 brcmu_pktq_pdeq_tail(q, eprec);
172 if (p == NULL) 82 if (p == NULL)
173 brcmf_dbg(ERROR, "brcmu_pktq_penq() failed, oldest %d\n", 83 brcmf_err("brcmu_pktq_penq() failed, oldest %d\n",
174 discard_oldest); 84 discard_oldest);
175 85
176 brcmu_pkt_buf_free_skb(p); 86 brcmu_pkt_buf_free_skb(p);
@@ -179,415 +89,22 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
179 /* Enqueue */ 89 /* Enqueue */
180 p = brcmu_pktq_penq(q, prec, pkt); 90 p = brcmu_pktq_penq(q, prec, pkt);
181 if (p == NULL) 91 if (p == NULL)
182 brcmf_dbg(ERROR, "brcmu_pktq_penq() failed\n"); 92 brcmf_err("brcmu_pktq_penq() failed\n");
183 93
184 return p != NULL; 94 return p != NULL;
185} 95}
186 96
187#ifdef DEBUG
188static void
189brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
190{
191 uint i, status, reason;
192 bool group = false, flush_txq = false, link = false;
193 char *auth_str, *event_name;
194 unsigned char *buf;
195 char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
196 static struct {
197 uint event;
198 char *event_name;
199 } event_names[] = {
200 {
201 BRCMF_E_SET_SSID, "SET_SSID"}, {
202 BRCMF_E_JOIN, "JOIN"}, {
203 BRCMF_E_START, "START"}, {
204 BRCMF_E_AUTH, "AUTH"}, {
205 BRCMF_E_AUTH_IND, "AUTH_IND"}, {
206 BRCMF_E_DEAUTH, "DEAUTH"}, {
207 BRCMF_E_DEAUTH_IND, "DEAUTH_IND"}, {
208 BRCMF_E_ASSOC, "ASSOC"}, {
209 BRCMF_E_ASSOC_IND, "ASSOC_IND"}, {
210 BRCMF_E_REASSOC, "REASSOC"}, {
211 BRCMF_E_REASSOC_IND, "REASSOC_IND"}, {
212 BRCMF_E_DISASSOC, "DISASSOC"}, {
213 BRCMF_E_DISASSOC_IND, "DISASSOC_IND"}, {
214 BRCMF_E_QUIET_START, "START_QUIET"}, {
215 BRCMF_E_QUIET_END, "END_QUIET"}, {
216 BRCMF_E_BEACON_RX, "BEACON_RX"}, {
217 BRCMF_E_LINK, "LINK"}, {
218 BRCMF_E_MIC_ERROR, "MIC_ERROR"}, {
219 BRCMF_E_NDIS_LINK, "NDIS_LINK"}, {
220 BRCMF_E_ROAM, "ROAM"}, {
221 BRCMF_E_TXFAIL, "TXFAIL"}, {
222 BRCMF_E_PMKID_CACHE, "PMKID_CACHE"}, {
223 BRCMF_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, {
224 BRCMF_E_PRUNE, "PRUNE"}, {
225 BRCMF_E_AUTOAUTH, "AUTOAUTH"}, {
226 BRCMF_E_EAPOL_MSG, "EAPOL_MSG"}, {
227 BRCMF_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
228 BRCMF_E_ADDTS_IND, "ADDTS_IND"}, {
229 BRCMF_E_DELTS_IND, "DELTS_IND"}, {
230 BRCMF_E_BCNSENT_IND, "BCNSENT_IND"}, {
231 BRCMF_E_BCNRX_MSG, "BCNRX_MSG"}, {
232 BRCMF_E_BCNLOST_MSG, "BCNLOST_MSG"}, {
233 BRCMF_E_ROAM_PREP, "ROAM_PREP"}, {
234 BRCMF_E_PFN_NET_FOUND, "PNO_NET_FOUND"}, {
235 BRCMF_E_PFN_NET_LOST, "PNO_NET_LOST"}, {
236 BRCMF_E_RESET_COMPLETE, "RESET_COMPLETE"}, {
237 BRCMF_E_JOIN_START, "JOIN_START"}, {
238 BRCMF_E_ROAM_START, "ROAM_START"}, {
239 BRCMF_E_ASSOC_START, "ASSOC_START"}, {
240 BRCMF_E_IBSS_ASSOC, "IBSS_ASSOC"}, {
241 BRCMF_E_RADIO, "RADIO"}, {
242 BRCMF_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, {
243 BRCMF_E_PROBREQ_MSG, "PROBREQ_MSG"}, {
244 BRCMF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, {
245 BRCMF_E_PSK_SUP, "PSK_SUP"}, {
246 BRCMF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, {
247 BRCMF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, {
248 BRCMF_E_ICV_ERROR, "ICV_ERROR"}, {
249 BRCMF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, {
250 BRCMF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, {
251 BRCMF_E_TRACE, "TRACE"}, {
252 BRCMF_E_ACTION_FRAME, "ACTION FRAME"}, {
253 BRCMF_E_ACTION_FRAME_COMPLETE, "ACTION FRAME TX COMPLETE"}, {
254 BRCMF_E_IF, "IF"}, {
255 BRCMF_E_RSSI, "RSSI"}, {
256 BRCMF_E_PFN_SCAN_COMPLETE, "SCAN_COMPLETE"}, {
257 BRCMF_E_ESCAN_RESULT, "ESCAN_RESULT"}
258 };
259 uint event_type, flags, auth_type, datalen;
260 static u32 seqnum_prev;
261 struct msgtrace_hdr hdr;
262 u32 nblost;
263 char *s, *p;
264
265 event_type = be32_to_cpu(event->event_type);
266 flags = be16_to_cpu(event->flags);
267 status = be32_to_cpu(event->status);
268 reason = be32_to_cpu(event->reason);
269 auth_type = be32_to_cpu(event->auth_type);
270 datalen = be32_to_cpu(event->datalen);
271 /* debug dump of event messages */
272 sprintf(eabuf, "%pM", event->addr);
273
274 event_name = "UNKNOWN";
275 for (i = 0; i < ARRAY_SIZE(event_names); i++) {
276 if (event_names[i].event == event_type)
277 event_name = event_names[i].event_name;
278 }
279
280 brcmf_dbg(EVENT, "EVENT: %s, event ID = %d\n", event_name, event_type);
281 brcmf_dbg(EVENT, "flags 0x%04x, status %d, reason %d, auth_type %d MAC %s\n",
282 flags, status, reason, auth_type, eabuf);
283
284 if (flags & BRCMF_EVENT_MSG_LINK)
285 link = true;
286 if (flags & BRCMF_EVENT_MSG_GROUP)
287 group = true;
288 if (flags & BRCMF_EVENT_MSG_FLUSHTXQ)
289 flush_txq = true;
290
291 switch (event_type) {
292 case BRCMF_E_START:
293 case BRCMF_E_DEAUTH:
294 case BRCMF_E_DISASSOC:
295 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
296 break;
297
298 case BRCMF_E_ASSOC_IND:
299 case BRCMF_E_REASSOC_IND:
300 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
301 break;
302
303 case BRCMF_E_ASSOC:
304 case BRCMF_E_REASSOC:
305 if (status == BRCMF_E_STATUS_SUCCESS)
306 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, SUCCESS\n",
307 event_name, eabuf);
308 else if (status == BRCMF_E_STATUS_TIMEOUT)
309 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, TIMEOUT\n",
310 event_name, eabuf);
311 else if (status == BRCMF_E_STATUS_FAIL)
312 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, FAILURE, reason %d\n",
313 event_name, eabuf, (int)reason);
314 else
315 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, unexpected status %d\n",
316 event_name, eabuf, (int)status);
317 break;
318
319 case BRCMF_E_DEAUTH_IND:
320 case BRCMF_E_DISASSOC_IND:
321 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, reason %d\n",
322 event_name, eabuf, (int)reason);
323 break;
324
325 case BRCMF_E_AUTH:
326 case BRCMF_E_AUTH_IND:
327 if (auth_type == WLAN_AUTH_OPEN)
328 auth_str = "Open System";
329 else if (auth_type == WLAN_AUTH_SHARED_KEY)
330 auth_str = "Shared Key";
331 else {
332 sprintf(err_msg, "AUTH unknown: %d", (int)auth_type);
333 auth_str = err_msg;
334 }
335 if (event_type == BRCMF_E_AUTH_IND)
336 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s\n",
337 event_name, eabuf, auth_str);
338 else if (status == BRCMF_E_STATUS_SUCCESS)
339 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, SUCCESS\n",
340 event_name, eabuf, auth_str);
341 else if (status == BRCMF_E_STATUS_TIMEOUT)
342 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
343 event_name, eabuf, auth_str);
344 else if (status == BRCMF_E_STATUS_FAIL) {
345 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, %s, FAILURE, reason %d\n",
346 event_name, eabuf, auth_str, (int)reason);
347 }
348
349 break;
350
351 case BRCMF_E_JOIN:
352 case BRCMF_E_ROAM:
353 case BRCMF_E_SET_SSID:
354 if (status == BRCMF_E_STATUS_SUCCESS)
355 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n",
356 event_name, eabuf);
357 else if (status == BRCMF_E_STATUS_FAIL)
358 brcmf_dbg(EVENT, "MACEVENT: %s, failed\n", event_name);
359 else if (status == BRCMF_E_STATUS_NO_NETWORKS)
360 brcmf_dbg(EVENT, "MACEVENT: %s, no networks found\n",
361 event_name);
362 else
363 brcmf_dbg(EVENT, "MACEVENT: %s, unexpected status %d\n",
364 event_name, (int)status);
365 break;
366
367 case BRCMF_E_BEACON_RX:
368 if (status == BRCMF_E_STATUS_SUCCESS)
369 brcmf_dbg(EVENT, "MACEVENT: %s, SUCCESS\n", event_name);
370 else if (status == BRCMF_E_STATUS_FAIL)
371 brcmf_dbg(EVENT, "MACEVENT: %s, FAIL\n", event_name);
372 else
373 brcmf_dbg(EVENT, "MACEVENT: %s, status %d\n",
374 event_name, status);
375 break;
376
377 case BRCMF_E_LINK:
378 brcmf_dbg(EVENT, "MACEVENT: %s %s\n",
379 event_name, link ? "UP" : "DOWN");
380 break;
381
382 case BRCMF_E_MIC_ERROR:
383 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
384 event_name, eabuf, group, flush_txq);
385 break;
386
387 case BRCMF_E_ICV_ERROR:
388 case BRCMF_E_UNICAST_DECODE_ERROR:
389 case BRCMF_E_MULTICAST_DECODE_ERROR:
390 brcmf_dbg(EVENT, "MACEVENT: %s, MAC %s\n", event_name, eabuf);
391 break;
392
393 case BRCMF_E_TXFAIL:
394 brcmf_dbg(EVENT, "MACEVENT: %s, RA %s\n", event_name, eabuf);
395 break;
396
397 case BRCMF_E_SCAN_COMPLETE:
398 case BRCMF_E_PMKID_CACHE:
399 brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
400 break;
401
402 case BRCMF_E_ESCAN_RESULT:
403 brcmf_dbg(EVENT, "MACEVENT: %s\n", event_name);
404 datalen = 0;
405 break;
406
407 case BRCMF_E_PFN_NET_FOUND:
408 case BRCMF_E_PFN_NET_LOST:
409 case BRCMF_E_PFN_SCAN_COMPLETE:
410 brcmf_dbg(EVENT, "PNOEVENT: %s\n", event_name);
411 break;
412
413 case BRCMF_E_PSK_SUP:
414 case BRCMF_E_PRUNE:
415 brcmf_dbg(EVENT, "MACEVENT: %s, status %d, reason %d\n",
416 event_name, (int)status, (int)reason);
417 break;
418
419 case BRCMF_E_TRACE:
420 buf = (unsigned char *) event_data;
421 memcpy(&hdr, buf, sizeof(struct msgtrace_hdr));
422
423 if (hdr.version != MSGTRACE_VERSION) {
424 brcmf_dbg(ERROR,
425 "MACEVENT: %s [unsupported version --> brcmf"
426 " version:%d dongle version:%d]\n",
427 event_name, MSGTRACE_VERSION, hdr.version);
428 /* Reset datalen to avoid display below */
429 datalen = 0;
430 break;
431 }
432
433 /* There are 2 bytes available at the end of data */
434 *(buf + sizeof(struct msgtrace_hdr)
435 + be16_to_cpu(hdr.len)) = '\0';
436
437 if (be32_to_cpu(hdr.discarded_bytes)
438 || be32_to_cpu(hdr.discarded_printf))
439 brcmf_dbg(ERROR,
440 "WLC_E_TRACE: [Discarded traces in dongle -->"
441 " discarded_bytes %d discarded_printf %d]\n",
442 be32_to_cpu(hdr.discarded_bytes),
443 be32_to_cpu(hdr.discarded_printf));
444
445 nblost = be32_to_cpu(hdr.seqnum) - seqnum_prev - 1;
446 if (nblost > 0)
447 brcmf_dbg(ERROR, "WLC_E_TRACE: [Event lost --> seqnum "
448 " %d nblost %d\n", be32_to_cpu(hdr.seqnum),
449 nblost);
450 seqnum_prev = be32_to_cpu(hdr.seqnum);
451
452 /* Display the trace buffer. Advance from \n to \n to
453 * avoid display big
454 * printf (issue with Linux printk )
455 */
456 p = (char *)&buf[sizeof(struct msgtrace_hdr)];
457 while ((s = strstr(p, "\n")) != NULL) {
458 *s = '\0';
459 pr_debug("%s\n", p);
460 p = s + 1;
461 }
462 pr_debug("%s\n", p);
463
464 /* Reset datalen to avoid display below */
465 datalen = 0;
466 break;
467
468 case BRCMF_E_RSSI:
469 brcmf_dbg(EVENT, "MACEVENT: %s %d\n",
470 event_name, be32_to_cpu(*((__be32 *)event_data)));
471 break;
472
473 default:
474 brcmf_dbg(EVENT,
475 "MACEVENT: %s %d, MAC %s, status %d, reason %d, "
476 "auth %d\n", event_name, event_type, eabuf,
477 (int)status, (int)reason, (int)auth_type);
478 break;
479 }
480
481 /* show any appended data */
482 brcmf_dbg_hex_dump(datalen, event_data, datalen, "Received data");
483}
484#endif /* DEBUG */
485
486int
487brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
488 struct brcmf_event_msg *event, void **data_ptr)
489{
490 /* check whether packet is a BRCM event pkt */
491 struct brcmf_event *pvt_data = (struct brcmf_event *) pktdata;
492 struct brcmf_if_event *ifevent;
493 char *event_data;
494 u32 type, status;
495 u16 flags;
496 int evlen;
497
498 if (memcmp(BRCM_OUI, &pvt_data->hdr.oui[0], DOT11_OUI_LEN)) {
499 brcmf_dbg(ERROR, "mismatched OUI, bailing\n");
500 return -EBADE;
501 }
502
503 /* BRCM event pkt may be unaligned - use xxx_ua to load user_subtype. */
504 if (get_unaligned_be16(&pvt_data->hdr.usr_subtype) !=
505 BCMILCP_BCM_SUBTYPE_EVENT) {
506 brcmf_dbg(ERROR, "mismatched subtype, bailing\n");
507 return -EBADE;
508 }
509
510 *data_ptr = &pvt_data[1];
511 event_data = *data_ptr;
512
513 /* memcpy since BRCM event pkt may be unaligned. */
514 memcpy(event, &pvt_data->msg, sizeof(struct brcmf_event_msg));
515
516 type = get_unaligned_be32(&event->event_type);
517 flags = get_unaligned_be16(&event->flags);
518 status = get_unaligned_be32(&event->status);
519 evlen = get_unaligned_be32(&event->datalen) +
520 sizeof(struct brcmf_event);
521
522 switch (type) {
523 case BRCMF_E_IF:
524 ifevent = (struct brcmf_if_event *) event_data;
525 brcmf_dbg(TRACE, "if event\n");
526
527 if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
528 if (ifevent->action == BRCMF_E_IF_ADD)
529 brcmf_add_if(drvr->dev, ifevent->ifidx,
530 event->ifname,
531 pvt_data->eth.h_dest);
532 else
533 brcmf_del_if(drvr, ifevent->ifidx);
534 } else {
535 brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
536 ifevent->ifidx, event->ifname);
537 }
538
539 /* send up the if event: btamp user needs it */
540 *ifidx = brcmf_ifname2idx(drvr, event->ifname);
541 break;
542
543 /* These are what external supplicant/authenticator wants */
544 case BRCMF_E_LINK:
545 case BRCMF_E_ASSOC_IND:
546 case BRCMF_E_REASSOC_IND:
547 case BRCMF_E_DISASSOC_IND:
548 case BRCMF_E_MIC_ERROR:
549 default:
550 /* Fall through: this should get _everything_ */
551
552 *ifidx = brcmf_ifname2idx(drvr, event->ifname);
553 brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n",
554 type, flags, status);
555
556 /* put it back to BRCMF_E_NDIS_LINK */
557 if (type == BRCMF_E_NDIS_LINK) {
558 u32 temp1;
559 __be32 temp2;
560
561 temp1 = get_unaligned_be32(&event->event_type);
562 brcmf_dbg(TRACE, "Converted to WLC_E_LINK type %d\n",
563 temp1);
564
565 temp2 = cpu_to_be32(BRCMF_E_NDIS_LINK);
566 memcpy((void *)(&pvt_data->msg.event_type), &temp2,
567 sizeof(pvt_data->msg.event_type));
568 }
569 break;
570 }
571
572#ifdef DEBUG
573 if (BRCMF_EVENT_ON())
574 brcmf_c_show_host_event(event, event_data);
575#endif /* DEBUG */
576
577 return 0;
578}
579
580/* Convert user's input in hex pattern to byte-size mask */ 97/* Convert user's input in hex pattern to byte-size mask */
581static int brcmf_c_pattern_atoh(char *src, char *dst) 98static int brcmf_c_pattern_atoh(char *src, char *dst)
582{ 99{
583 int i; 100 int i;
584 if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) { 101 if (strncmp(src, "0x", 2) != 0 && strncmp(src, "0X", 2) != 0) {
585 brcmf_dbg(ERROR, "Mask invalid format. Needs to start with 0x\n"); 102 brcmf_err("Mask invalid format. Needs to start with 0x\n");
586 return -EINVAL; 103 return -EINVAL;
587 } 104 }
588 src = src + 2; /* Skip past 0x */ 105 src = src + 2; /* Skip past 0x */
589 if (strlen(src) % 2 != 0) { 106 if (strlen(src) % 2 != 0) {
590 brcmf_dbg(ERROR, "Mask invalid format. Length must be even.\n"); 107 brcmf_err("Mask invalid format. Length must be even.\n");
591 return -EINVAL; 108 return -EINVAL;
592 } 109 }
593 for (i = 0; *src != '\0'; i++) { 110 for (i = 0; *src != '\0'; i++) {
@@ -603,90 +120,57 @@ static int brcmf_c_pattern_atoh(char *src, char *dst)
603 return i; 120 return i;
604} 121}
605 122
606void 123static void
607brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg, int enable, 124brcmf_c_pktfilter_offload_enable(struct brcmf_if *ifp, char *arg, int enable,
608 int master_mode) 125 int master_mode)
609{ 126{
610 unsigned long res; 127 unsigned long res;
611 char *argv[8]; 128 char *argv;
612 int i = 0;
613 const char *str;
614 int buf_len;
615 int str_len;
616 char *arg_save = NULL, *arg_org = NULL; 129 char *arg_save = NULL, *arg_org = NULL;
617 int rc; 130 s32 err;
618 char buf[128];
619 struct brcmf_pkt_filter_enable_le enable_parm; 131 struct brcmf_pkt_filter_enable_le enable_parm;
620 struct brcmf_pkt_filter_enable_le *pkt_filterp;
621 __le32 mmode_le;
622 132
623 arg_save = kmalloc(strlen(arg) + 1, GFP_ATOMIC); 133 arg_save = kstrdup(arg, GFP_ATOMIC);
624 if (!arg_save) 134 if (!arg_save)
625 goto fail; 135 goto fail;
626 136
627 arg_org = arg_save; 137 arg_org = arg_save;
628 memcpy(arg_save, arg, strlen(arg) + 1);
629 138
630 argv[i] = strsep(&arg_save, " "); 139 argv = strsep(&arg_save, " ");
631 140
632 i = 0; 141 if (argv == NULL) {
633 if (NULL == argv[i]) { 142 brcmf_err("No args provided\n");
634 brcmf_dbg(ERROR, "No args provided\n");
635 goto fail; 143 goto fail;
636 } 144 }
637 145
638 str = "pkt_filter_enable";
639 str_len = strlen(str);
640 strncpy(buf, str, str_len);
641 buf[str_len] = '\0';
642 buf_len = str_len + 1;
643
644 pkt_filterp = (struct brcmf_pkt_filter_enable_le *) (buf + str_len + 1);
645
646 /* Parse packet filter id. */ 146 /* Parse packet filter id. */
647 enable_parm.id = 0; 147 enable_parm.id = 0;
648 if (!kstrtoul(argv[i], 0, &res)) 148 if (!kstrtoul(argv, 0, &res))
649 enable_parm.id = cpu_to_le32((u32)res); 149 enable_parm.id = cpu_to_le32((u32)res);
650 150
651 /* Parse enable/disable value. */ 151 /* Enable/disable the specified filter. */
652 enable_parm.enable = cpu_to_le32(enable); 152 enable_parm.enable = cpu_to_le32(enable);
653 153
654 buf_len += sizeof(enable_parm); 154 err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_enable", &enable_parm,
655 memcpy((char *)pkt_filterp, &enable_parm, sizeof(enable_parm)); 155 sizeof(enable_parm));
156 if (err)
157 brcmf_err("Set pkt_filter_enable error (%d)\n", err);
656 158
657 /* Enable/disable the specified filter. */ 159 /* Control the master mode */
658 rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len); 160 err = brcmf_fil_iovar_int_set(ifp, "pkt_filter_mode", master_mode);
659 rc = rc >= 0 ? 0 : rc; 161 if (err)
660 if (rc) 162 brcmf_err("Set pkt_filter_mode error (%d)\n", err);
661 brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
662 arg, rc);
663 else
664 brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
665
666 /* Contorl the master mode */
667 mmode_le = cpu_to_le32(master_mode);
668 brcmf_c_mkiovar("pkt_filter_mode", (char *)&mmode_le, 4, buf,
669 sizeof(buf));
670 rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf,
671 sizeof(buf));
672 rc = rc >= 0 ? 0 : rc;
673 if (rc)
674 brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n",
675 arg, rc);
676 163
677fail: 164fail:
678 kfree(arg_org); 165 kfree(arg_org);
679} 166}
680 167
681void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg) 168static void brcmf_c_pktfilter_offload_set(struct brcmf_if *ifp, char *arg)
682{ 169{
683 const char *str; 170 struct brcmf_pkt_filter_le *pkt_filter;
684 struct brcmf_pkt_filter_le pkt_filter;
685 struct brcmf_pkt_filter_le *pkt_filterp;
686 unsigned long res; 171 unsigned long res;
687 int buf_len; 172 int buf_len;
688 int str_len; 173 s32 err;
689 int rc;
690 u32 mask_size; 174 u32 mask_size;
691 u32 pattern_size; 175 u32 pattern_size;
692 char *argv[8], *buf = NULL; 176 char *argv[8], *buf = NULL;
@@ -704,104 +188,64 @@ void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg)
704 goto fail; 188 goto fail;
705 189
706 argv[i] = strsep(&arg_save, " "); 190 argv[i] = strsep(&arg_save, " ");
707 while (argv[i++]) 191 while (argv[i]) {
192 i++;
193 if (i >= 8) {
194 brcmf_err("Too many parameters\n");
195 goto fail;
196 }
708 argv[i] = strsep(&arg_save, " "); 197 argv[i] = strsep(&arg_save, " ");
198 }
709 199
710 i = 0; 200 if (i != 6) {
711 if (NULL == argv[i]) { 201 brcmf_err("Not enough args provided %d\n", i);
712 brcmf_dbg(ERROR, "No args provided\n");
713 goto fail; 202 goto fail;
714 } 203 }
715 204
716 str = "pkt_filter_add"; 205 pkt_filter = (struct brcmf_pkt_filter_le *)buf;
717 strcpy(buf, str);
718 str_len = strlen(str);
719 buf_len = str_len + 1;
720
721 pkt_filterp = (struct brcmf_pkt_filter_le *) (buf + str_len + 1);
722 206
723 /* Parse packet filter id. */ 207 /* Parse packet filter id. */
724 pkt_filter.id = 0; 208 pkt_filter->id = 0;
725 if (!kstrtoul(argv[i], 0, &res)) 209 if (!kstrtoul(argv[0], 0, &res))
726 pkt_filter.id = cpu_to_le32((u32)res); 210 pkt_filter->id = cpu_to_le32((u32)res);
727
728 if (NULL == argv[++i]) {
729 brcmf_dbg(ERROR, "Polarity not provided\n");
730 goto fail;
731 }
732 211
733 /* Parse filter polarity. */ 212 /* Parse filter polarity. */
734 pkt_filter.negate_match = 0; 213 pkt_filter->negate_match = 0;
735 if (!kstrtoul(argv[i], 0, &res)) 214 if (!kstrtoul(argv[1], 0, &res))
736 pkt_filter.negate_match = cpu_to_le32((u32)res); 215 pkt_filter->negate_match = cpu_to_le32((u32)res);
737
738 if (NULL == argv[++i]) {
739 brcmf_dbg(ERROR, "Filter type not provided\n");
740 goto fail;
741 }
742 216
743 /* Parse filter type. */ 217 /* Parse filter type. */
744 pkt_filter.type = 0; 218 pkt_filter->type = 0;
745 if (!kstrtoul(argv[i], 0, &res)) 219 if (!kstrtoul(argv[2], 0, &res))
746 pkt_filter.type = cpu_to_le32((u32)res); 220 pkt_filter->type = cpu_to_le32((u32)res);
747
748 if (NULL == argv[++i]) {
749 brcmf_dbg(ERROR, "Offset not provided\n");
750 goto fail;
751 }
752 221
753 /* Parse pattern filter offset. */ 222 /* Parse pattern filter offset. */
754 pkt_filter.u.pattern.offset = 0; 223 pkt_filter->u.pattern.offset = 0;
755 if (!kstrtoul(argv[i], 0, &res)) 224 if (!kstrtoul(argv[3], 0, &res))
756 pkt_filter.u.pattern.offset = cpu_to_le32((u32)res); 225 pkt_filter->u.pattern.offset = cpu_to_le32((u32)res);
757
758 if (NULL == argv[++i]) {
759 brcmf_dbg(ERROR, "Bitmask not provided\n");
760 goto fail;
761 }
762 226
763 /* Parse pattern filter mask. */ 227 /* Parse pattern filter mask. */
764 mask_size = 228 mask_size = brcmf_c_pattern_atoh(argv[4],
765 brcmf_c_pattern_atoh 229 (char *)pkt_filter->u.pattern.mask_and_pattern);
766 (argv[i], (char *)pkt_filterp->u.pattern.mask_and_pattern);
767
768 if (NULL == argv[++i]) {
769 brcmf_dbg(ERROR, "Pattern not provided\n");
770 goto fail;
771 }
772 230
773 /* Parse pattern filter pattern. */ 231 /* Parse pattern filter pattern. */
774 pattern_size = 232 pattern_size = brcmf_c_pattern_atoh(argv[5],
775 brcmf_c_pattern_atoh(argv[i], 233 (char *)&pkt_filter->u.pattern.mask_and_pattern[mask_size]);
776 (char *)&pkt_filterp->u.pattern.
777 mask_and_pattern[mask_size]);
778 234
779 if (mask_size != pattern_size) { 235 if (mask_size != pattern_size) {
780 brcmf_dbg(ERROR, "Mask and pattern not the same size\n"); 236 brcmf_err("Mask and pattern not the same size\n");
781 goto fail; 237 goto fail;
782 } 238 }
783 239
784 pkt_filter.u.pattern.size_bytes = cpu_to_le32(mask_size); 240 pkt_filter->u.pattern.size_bytes = cpu_to_le32(mask_size);
785 buf_len += BRCMF_PKT_FILTER_FIXED_LEN; 241 buf_len = offsetof(struct brcmf_pkt_filter_le,
786 buf_len += (BRCMF_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size); 242 u.pattern.mask_and_pattern);
787 243 buf_len += mask_size + pattern_size;
788 /* Keep-alive attributes are set in local
789 * variable (keep_alive_pkt), and
790 ** then memcpy'ed into buffer (keep_alive_pktp) since there is no
791 ** guarantee that the buffer is properly aligned.
792 */
793 memcpy((char *)pkt_filterp,
794 &pkt_filter,
795 BRCMF_PKT_FILTER_FIXED_LEN + BRCMF_PKT_FILTER_PATTERN_FIXED_LEN);
796
797 rc = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, buf, buf_len);
798 rc = rc >= 0 ? 0 : rc;
799 244
800 if (rc) 245 err = brcmf_fil_iovar_data_set(ifp, "pkt_filter_add", pkt_filter,
801 brcmf_dbg(TRACE, "failed to add pktfilter %s, retcode = %d\n", 246 buf_len);
802 arg, rc); 247 if (err)
803 else 248 brcmf_err("Set pkt_filter_add error (%d)\n", err);
804 brcmf_dbg(TRACE, "successfully added pktfilter %s\n", arg);
805 249
806fail: 250fail:
807 kfree(arg_org); 251 kfree(arg_org);
@@ -809,130 +253,125 @@ fail:
809 kfree(buf); 253 kfree(buf);
810} 254}
811 255
812static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode) 256int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
813{ 257{
814 char iovbuf[32]; 258 s8 eventmask[BRCMF_EVENTING_MASK_LEN];
815 int retcode; 259 u8 buf[BRCMF_DCMD_SMLEN];
816 __le32 arp_mode_le; 260 char *ptr;
817 261 s32 err;
818 arp_mode_le = cpu_to_le32(arp_mode);
819 brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf,
820 sizeof(iovbuf));
821 retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
822 iovbuf, sizeof(iovbuf));
823 retcode = retcode >= 0 ? 0 : retcode;
824 if (retcode)
825 brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, retcode = %d\n",
826 arp_mode, retcode);
827 else
828 brcmf_dbg(TRACE, "successfully set ARP offload mode to 0x%x\n",
829 arp_mode);
830}
831
832static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable)
833{
834 char iovbuf[32];
835 int retcode;
836 __le32 arp_enable_le;
837
838 arp_enable_le = cpu_to_le32(arp_enable);
839
840 brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4,
841 iovbuf, sizeof(iovbuf));
842 retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
843 iovbuf, sizeof(iovbuf));
844 retcode = retcode >= 0 ? 0 : retcode;
845 if (retcode)
846 brcmf_dbg(TRACE, "failed to enable ARP offload to %d, retcode = %d\n",
847 arp_enable, retcode);
848 else
849 brcmf_dbg(TRACE, "successfully enabled ARP offload to %d\n",
850 arp_enable);
851}
852
853int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
854{
855 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
856 "event_msgs" + '\0' + bitvec */
857 char buf[128], *ptr;
858 __le32 roaming_le = cpu_to_le32(1);
859 __le32 bcn_timeout_le = cpu_to_le32(3);
860 __le32 scan_assoc_time_le = cpu_to_le32(40);
861 __le32 scan_unassoc_time_le = cpu_to_le32(40);
862 int i;
863 struct brcmf_bus_dcmd *cmdlst; 262 struct brcmf_bus_dcmd *cmdlst;
864 struct list_head *cur, *q; 263 struct list_head *cur, *q;
865 264
866 mutex_lock(&drvr->proto_block); 265 /* retreive mac address */
867 266 err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
868 /* Set Country code */ 267 sizeof(ifp->mac_addr));
869 if (drvr->country_code[0] != 0) { 268 if (err < 0) {
870 if (brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_COUNTRY, 269 brcmf_err("Retreiving cur_etheraddr failed, %d\n",
871 drvr->country_code, 270 err);
872 sizeof(drvr->country_code)) < 0) 271 goto done;
873 brcmf_dbg(ERROR, "country code setting failed\n");
874 } 272 }
273 memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
875 274
876 /* query for 'ver' to get version info from firmware */ 275 /* query for 'ver' to get version info from firmware */
877 memset(buf, 0, sizeof(buf)); 276 memset(buf, 0, sizeof(buf));
878 ptr = buf; 277 strcpy(buf, "ver");
879 brcmf_c_mkiovar("ver", NULL, 0, buf, sizeof(buf)); 278 err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
880 brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, buf, sizeof(buf)); 279 if (err < 0) {
280 brcmf_err("Retreiving version information failed, %d\n",
281 err);
282 goto done;
283 }
284 ptr = (char *)buf;
881 strsep(&ptr, "\n"); 285 strsep(&ptr, "\n");
882 /* Print fw version info */ 286 /* Print fw version info */
883 brcmf_dbg(ERROR, "Firmware version = %s\n", buf); 287 brcmf_err("Firmware version = %s\n", buf);
884 288
885 /* Setup timeout if Beacons are lost and roam is off to report 289 /*
886 link down */ 290 * Setup timeout if Beacons are lost and roam is off to report
887 brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf, 291 * link down
888 sizeof(iovbuf)); 292 */
889 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, 293 err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
890 sizeof(iovbuf)); 294 BRCMF_DEFAULT_BCN_TIMEOUT);
295 if (err) {
296 brcmf_err("bcn_timeout error (%d)\n", err);
297 goto done;
298 }
891 299
892 /* Enable/Disable build-in roaming to allowed ext supplicant to take 300 /* Enable/Disable build-in roaming to allowed ext supplicant to take
893 of romaing */ 301 * of romaing
894 brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4, 302 */
895 iovbuf, sizeof(iovbuf)); 303 err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
896 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, 304 if (err) {
897 sizeof(iovbuf)); 305 brcmf_err("roam_off error (%d)\n", err);
898 306 goto done;
899 /* Setup event_msgs */ 307 }
900 brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN, 308
901 iovbuf, sizeof(iovbuf)); 309 /* Setup event_msgs, enable E_IF */
902 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, 310 err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
903 sizeof(iovbuf)); 311 BRCMF_EVENTING_MASK_LEN);
904 312 if (err) {
905 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME, 313 brcmf_err("Get event_msgs error (%d)\n", err);
906 (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le)); 314 goto done;
907 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME, 315 }
908 (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le)); 316 setbit(eventmask, BRCMF_E_IF);
909 317 err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
910 /* Set and enable ARP offload feature */ 318 BRCMF_EVENTING_MASK_LEN);
911 brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE); 319 if (err) {
912 brcmf_c_arp_offload_enable(drvr, true); 320 brcmf_err("Set event_msgs error (%d)\n", err);
913 321 goto done;
914 /* Set up pkt filter */
915 for (i = 0; i < drvr->pktfilter_count; i++) {
916 brcmf_c_pktfilter_offload_set(drvr, drvr->pktfilter[i]);
917 brcmf_c_pktfilter_offload_enable(drvr, drvr->pktfilter[i],
918 0, true);
919 } 322 }
920 323
324 /* Setup default scan channel time */
325 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
326 BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
327 if (err) {
328 brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
329 err);
330 goto done;
331 }
332
333 /* Setup default scan unassoc time */
334 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
335 BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
336 if (err) {
337 brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
338 err);
339 goto done;
340 }
341
342 /* Try to set and enable ARP offload feature, this may fail */
343 err = brcmf_fil_iovar_int_set(ifp, "arp_ol", BRCMF_ARPOL_MODE);
344 if (err) {
345 brcmf_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n",
346 BRCMF_ARPOL_MODE, err);
347 err = 0;
348 } else {
349 err = brcmf_fil_iovar_int_set(ifp, "arpoe", 1);
350 if (err) {
351 brcmf_dbg(TRACE, "failed to enable ARP offload err = %d\n",
352 err);
353 err = 0;
354 } else
355 brcmf_dbg(TRACE, "successfully enabled ARP offload to 0x%x\n",
356 BRCMF_ARPOL_MODE);
357 }
358
359 /* Setup packet filter */
360 brcmf_c_pktfilter_offload_set(ifp, BRCMF_DEFAULT_PACKET_FILTER);
361 brcmf_c_pktfilter_offload_enable(ifp, BRCMF_DEFAULT_PACKET_FILTER,
362 0, true);
363
921 /* set bus specific command if there is any */ 364 /* set bus specific command if there is any */
922 list_for_each_safe(cur, q, &drvr->bus_if->dcmd_list) { 365 list_for_each_safe(cur, q, &ifp->drvr->bus_if->dcmd_list) {
923 cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list); 366 cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
924 if (cmdlst->name && cmdlst->param && cmdlst->param_len) { 367 if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
925 brcmf_c_mkiovar(cmdlst->name, cmdlst->param, 368 brcmf_fil_iovar_data_set(ifp, cmdlst->name,
926 cmdlst->param_len, iovbuf, 369 cmdlst->param,
927 sizeof(iovbuf)); 370 cmdlst->param_len);
928 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
929 iovbuf, sizeof(iovbuf));
930 } 371 }
931 list_del(cur); 372 list_del(cur);
932 kfree(cmdlst); 373 kfree(cmdlst);
933 } 374 }
934 375done:
935 mutex_unlock(&drvr->proto_block); 376 return err;
936
937 return 0;
938} 377}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 7f89540b56da..57671eddf79d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -14,12 +14,9 @@
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16#include <linux/debugfs.h> 16#include <linux/debugfs.h>
17#include <linux/if_ether.h> 17#include <linux/netdevice.h>
18#include <linux/if.h>
19#include <linux/ieee80211.h>
20#include <linux/module.h> 18#include <linux/module.h>
21 19
22#include <defs.h>
23#include <brcmu_wifi.h> 20#include <brcmu_wifi.h>
24#include <brcmu_utils.h> 21#include <brcmu_utils.h>
25#include "dhd.h" 22#include "dhd.h"
@@ -46,10 +43,12 @@ void brcmf_debugfs_exit(void)
46 43
47int brcmf_debugfs_attach(struct brcmf_pub *drvr) 44int brcmf_debugfs_attach(struct brcmf_pub *drvr)
48{ 45{
46 struct device *dev = drvr->bus_if->dev;
47
49 if (!root_folder) 48 if (!root_folder)
50 return -ENODEV; 49 return -ENODEV;
51 50
52 drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder); 51 drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
53 return PTR_RET(drvr->dbgfs_dir); 52 return PTR_RET(drvr->dbgfs_dir);
54} 53}
55 54
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index fb508c2256dd..f2ab01cd7966 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -18,7 +18,6 @@
18#define _BRCMF_DBG_H_ 18#define _BRCMF_DBG_H_
19 19
20/* message levels */ 20/* message levels */
21#define BRCMF_ERROR_VAL 0x0001
22#define BRCMF_TRACE_VAL 0x0002 21#define BRCMF_TRACE_VAL 0x0002
23#define BRCMF_INFO_VAL 0x0004 22#define BRCMF_INFO_VAL 0x0004
24#define BRCMF_DATA_VAL 0x0008 23#define BRCMF_DATA_VAL 0x0008
@@ -27,27 +26,34 @@
27#define BRCMF_HDRS_VAL 0x0040 26#define BRCMF_HDRS_VAL 0x0040
28#define BRCMF_BYTES_VAL 0x0080 27#define BRCMF_BYTES_VAL 0x0080
29#define BRCMF_INTR_VAL 0x0100 28#define BRCMF_INTR_VAL 0x0100
30#define BRCMF_GLOM_VAL 0x0400 29#define BRCMF_GLOM_VAL 0x0200
31#define BRCMF_EVENT_VAL 0x0800 30#define BRCMF_EVENT_VAL 0x0400
32#define BRCMF_BTA_VAL 0x1000 31#define BRCMF_BTA_VAL 0x0800
33#define BRCMF_ISCAN_VAL 0x2000 32#define BRCMF_FIL_VAL 0x1000
33#define BRCMF_USB_VAL 0x2000
34#define BRCMF_SCAN_VAL 0x4000
35#define BRCMF_CONN_VAL 0x8000
36
37/* Macro for error messages. net_ratelimit() is used when driver
38 * debugging is not selected. When debugging the driver error
39 * messages are as important as other tracing or even more so.
40 */
41#ifdef CONFIG_BRCMDBG
42#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
43#else
44#define brcmf_err(fmt, ...) \
45 do { \
46 if (net_ratelimit()) \
47 pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \
48 } while (0)
49#endif
34 50
35#if defined(DEBUG) 51#if defined(DEBUG)
36 52
37#define brcmf_dbg(level, fmt, ...) \ 53#define brcmf_dbg(level, fmt, ...) \
38do { \ 54do { \
39 if (BRCMF_ERROR_VAL == BRCMF_##level##_VAL) { \ 55 if (brcmf_msg_level & BRCMF_##level##_VAL) \
40 if (brcmf_msg_level & BRCMF_##level##_VAL) { \ 56 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
41 if (net_ratelimit()) \
42 pr_debug("%s: " fmt, \
43 __func__, ##__VA_ARGS__); \
44 } \
45 } else { \
46 if (brcmf_msg_level & BRCMF_##level##_VAL) { \
47 pr_debug("%s: " fmt, \
48 __func__, ##__VA_ARGS__); \
49 } \
50 } \
51} while (0) 57} while (0)
52 58
53#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL) 59#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL)
@@ -56,6 +62,7 @@ do { \
56#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL) 62#define BRCMF_BYTES_ON() (brcmf_msg_level & BRCMF_BYTES_VAL)
57#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL) 63#define BRCMF_GLOM_ON() (brcmf_msg_level & BRCMF_GLOM_VAL)
58#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL) 64#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
65#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
59 66
60#else /* (defined DEBUG) || (defined DEBUG) */ 67#else /* (defined DEBUG) || (defined DEBUG) */
61 68
@@ -67,6 +74,7 @@ do { \
67#define BRCMF_BYTES_ON() 0 74#define BRCMF_BYTES_ON() 0
68#define BRCMF_GLOM_ON() 0 75#define BRCMF_GLOM_ON() 0
69#define BRCMF_EVENT_ON() 0 76#define BRCMF_EVENT_ON() 0
77#define BRCMF_FIL_ON() 0
70 78
71#endif /* defined(DEBUG) */ 79#endif /* defined(DEBUG) */
72 80
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index d7c76ce9d8cb..74a616b4de8e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -16,27 +16,11 @@
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 18
19#include <linux/init.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/kthread.h>
22#include <linux/slab.h>
23#include <linux/skbuff.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
26#include <linux/mmc/sdio_func.h>
27#include <linux/random.h>
28#include <linux/spinlock.h>
29#include <linux/ethtool.h>
30#include <linux/fcntl.h>
31#include <linux/fs.h>
32#include <linux/uaccess.h>
33#include <linux/hardirq.h>
34#include <linux/mutex.h>
35#include <linux/wait.h>
36#include <linux/module.h> 21#include <linux/module.h>
37#include <net/cfg80211.h> 22#include <net/cfg80211.h>
38#include <net/rtnetlink.h> 23#include <net/rtnetlink.h>
39#include <defs.h>
40#include <brcmu_utils.h> 24#include <brcmu_utils.h>
41#include <brcmu_wifi.h> 25#include <brcmu_wifi.h>
42 26
@@ -45,55 +29,29 @@
45#include "dhd_proto.h" 29#include "dhd_proto.h"
46#include "dhd_dbg.h" 30#include "dhd_dbg.h"
47#include "wl_cfg80211.h" 31#include "wl_cfg80211.h"
32#include "fwil.h"
48 33
49MODULE_AUTHOR("Broadcom Corporation"); 34MODULE_AUTHOR("Broadcom Corporation");
50MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver."); 35MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
51MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac cards"); 36MODULE_SUPPORTED_DEVICE("Broadcom 802.11 WLAN fullmac cards");
52MODULE_LICENSE("Dual BSD/GPL"); 37MODULE_LICENSE("Dual BSD/GPL");
53 38
54 39#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
55/* Interface control information */
56struct brcmf_if {
57 struct brcmf_pub *drvr; /* back pointer to brcmf_pub */
58 /* OS/stack specifics */
59 struct net_device *ndev;
60 struct net_device_stats stats;
61 int idx; /* iface idx in dongle */
62 u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
63};
64 40
65/* Error bits */ 41/* Error bits */
66int brcmf_msg_level = BRCMF_ERROR_VAL; 42int brcmf_msg_level;
67module_param(brcmf_msg_level, int, 0); 43module_param(brcmf_msg_level, int, 0);
68 44
69int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name)
70{
71 int i = BRCMF_MAX_IFS;
72 struct brcmf_if *ifp;
73
74 if (name == NULL || *name == '\0')
75 return 0;
76
77 while (--i > 0) {
78 ifp = drvr->iflist[i];
79 if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
80 break;
81 }
82
83 brcmf_dbg(TRACE, "return idx %d for \"%s\"\n", i, name);
84
85 return i; /* default - the primary interface */
86}
87 45
88char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx) 46char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
89{ 47{
90 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) { 48 if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
91 brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx); 49 brcmf_err("ifidx %d out of range\n", ifidx);
92 return "<if_bad>"; 50 return "<if_bad>";
93 } 51 }
94 52
95 if (drvr->iflist[ifidx] == NULL) { 53 if (drvr->iflist[ifidx] == NULL) {
96 brcmf_dbg(ERROR, "null i/f %d\n", ifidx); 54 brcmf_err("null i/f %d\n", ifidx);
97 return "<if_null>"; 55 return "<if_null>";
98 } 56 }
99 57
@@ -105,38 +63,33 @@ char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
105 63
106static void _brcmf_set_multicast_list(struct work_struct *work) 64static void _brcmf_set_multicast_list(struct work_struct *work)
107{ 65{
66 struct brcmf_if *ifp;
108 struct net_device *ndev; 67 struct net_device *ndev;
109 struct netdev_hw_addr *ha; 68 struct netdev_hw_addr *ha;
110 u32 dcmd_value, cnt; 69 u32 cmd_value, cnt;
111 __le32 cnt_le; 70 __le32 cnt_le;
112 __le32 dcmd_le_value;
113
114 struct brcmf_dcmd dcmd;
115 char *buf, *bufp; 71 char *buf, *bufp;
116 uint buflen; 72 u32 buflen;
117 int ret; 73 s32 err;
118 74
119 struct brcmf_pub *drvr = container_of(work, struct brcmf_pub, 75 brcmf_dbg(TRACE, "enter\n");
120 multicast_work);
121 76
122 ndev = drvr->iflist[0]->ndev; 77 ifp = container_of(work, struct brcmf_if, multicast_work);
123 cnt = netdev_mc_count(ndev); 78 ndev = ifp->ndev;
124 79
125 /* Determine initial value of allmulti flag */ 80 /* Determine initial value of allmulti flag */
126 dcmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false; 81 cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false;
127 82
128 /* Send down the multicast list first. */ 83 /* Send down the multicast list first. */
129 84 cnt = netdev_mc_count(ndev);
130 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETH_ALEN); 85 buflen = sizeof(cnt) + (cnt * ETH_ALEN);
131 bufp = buf = kmalloc(buflen, GFP_ATOMIC); 86 buf = kmalloc(buflen, GFP_ATOMIC);
132 if (!bufp) 87 if (!buf)
133 return; 88 return;
134 89 bufp = buf;
135 strcpy(bufp, "mcast_list");
136 bufp += strlen("mcast_list") + 1;
137 90
138 cnt_le = cpu_to_le32(cnt); 91 cnt_le = cpu_to_le32(cnt);
139 memcpy(bufp, &cnt_le, sizeof(cnt)); 92 memcpy(bufp, &cnt_le, sizeof(cnt_le));
140 bufp += sizeof(cnt_le); 93 bufp += sizeof(cnt_le);
141 94
142 netdev_for_each_mc_addr(ha, ndev) { 95 netdev_for_each_mc_addr(ha, ndev) {
@@ -147,129 +100,66 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
147 cnt--; 100 cnt--;
148 } 101 }
149 102
150 memset(&dcmd, 0, sizeof(dcmd)); 103 err = brcmf_fil_iovar_data_set(ifp, "mcast_list", buf, buflen);
151 dcmd.cmd = BRCMF_C_SET_VAR; 104 if (err < 0) {
152 dcmd.buf = buf; 105 brcmf_err("Setting mcast_list failed, %d\n", err);
153 dcmd.len = buflen; 106 cmd_value = cnt ? true : cmd_value;
154 dcmd.set = true;
155
156 ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
157 if (ret < 0) {
158 brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
159 brcmf_ifname(drvr, 0), cnt);
160 dcmd_value = cnt ? true : dcmd_value;
161 } 107 }
162 108
163 kfree(buf); 109 kfree(buf);
164 110
165 /* Now send the allmulti setting. This is based on the setting in the 111 /*
112 * Now send the allmulti setting. This is based on the setting in the
166 * net_device flags, but might be modified above to be turned on if we 113 * net_device flags, but might be modified above to be turned on if we
167 * were trying to set some addresses and dongle rejected it... 114 * were trying to set some addresses and dongle rejected it...
168 */ 115 */
116 err = brcmf_fil_iovar_int_set(ifp, "allmulti", cmd_value);
117 if (err < 0)
118 brcmf_err("Setting allmulti failed, %d\n", err);
169 119
170 buflen = sizeof("allmulti") + sizeof(dcmd_value); 120 /*Finally, pick up the PROMISC flag */
171 buf = kmalloc(buflen, GFP_ATOMIC); 121 cmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
172 if (!buf) 122 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PROMISC, cmd_value);
173 return; 123 if (err < 0)
174 124 brcmf_err("Setting BRCMF_C_SET_PROMISC failed, %d\n",
175 dcmd_le_value = cpu_to_le32(dcmd_value); 125 err);
176
177 if (!brcmf_c_mkiovar
178 ("allmulti", (void *)&dcmd_le_value,
179 sizeof(dcmd_le_value), buf, buflen)) {
180 brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
181 brcmf_ifname(drvr, 0),
182 (int)sizeof(dcmd_value), buflen);
183 kfree(buf);
184 return;
185 }
186
187 memset(&dcmd, 0, sizeof(dcmd));
188 dcmd.cmd = BRCMF_C_SET_VAR;
189 dcmd.buf = buf;
190 dcmd.len = buflen;
191 dcmd.set = true;
192
193 ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
194 if (ret < 0) {
195 brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
196 brcmf_ifname(drvr, 0),
197 le32_to_cpu(dcmd_le_value));
198 }
199
200 kfree(buf);
201
202 /* Finally, pick up the PROMISC flag as well, like the NIC
203 driver does */
204
205 dcmd_value = (ndev->flags & IFF_PROMISC) ? true : false;
206 dcmd_le_value = cpu_to_le32(dcmd_value);
207
208 memset(&dcmd, 0, sizeof(dcmd));
209 dcmd.cmd = BRCMF_C_SET_PROMISC;
210 dcmd.buf = &dcmd_le_value;
211 dcmd.len = sizeof(dcmd_le_value);
212 dcmd.set = true;
213
214 ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
215 if (ret < 0) {
216 brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
217 brcmf_ifname(drvr, 0),
218 le32_to_cpu(dcmd_le_value));
219 }
220} 126}
221 127
222static void 128static void
223_brcmf_set_mac_address(struct work_struct *work) 129_brcmf_set_mac_address(struct work_struct *work)
224{ 130{
225 char buf[32]; 131 struct brcmf_if *ifp;
226 struct brcmf_dcmd dcmd; 132 s32 err;
227 int ret;
228
229 struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
230 setmacaddr_work);
231 133
232 brcmf_dbg(TRACE, "enter\n"); 134 brcmf_dbg(TRACE, "enter\n");
233 if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue,
234 ETH_ALEN, buf, 32)) {
235 brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
236 brcmf_ifname(drvr, 0));
237 return;
238 }
239 memset(&dcmd, 0, sizeof(dcmd));
240 dcmd.cmd = BRCMF_C_SET_VAR;
241 dcmd.buf = buf;
242 dcmd.len = 32;
243 dcmd.set = true;
244
245 ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
246 if (ret < 0)
247 brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
248 brcmf_ifname(drvr, 0));
249 else
250 memcpy(drvr->iflist[0]->ndev->dev_addr,
251 drvr->macvalue, ETH_ALEN);
252 135
253 return; 136 ifp = container_of(work, struct brcmf_if, setmacaddr_work);
137 err = brcmf_fil_iovar_data_set(ifp, "cur_etheraddr", ifp->mac_addr,
138 ETH_ALEN);
139 if (err < 0) {
140 brcmf_err("Setting cur_etheraddr failed, %d\n", err);
141 } else {
142 brcmf_dbg(TRACE, "MAC address updated to %pM\n",
143 ifp->mac_addr);
144 memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
145 }
254} 146}
255 147
256static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr) 148static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
257{ 149{
258 struct brcmf_if *ifp = netdev_priv(ndev); 150 struct brcmf_if *ifp = netdev_priv(ndev);
259 struct brcmf_pub *drvr = ifp->drvr;
260 struct sockaddr *sa = (struct sockaddr *)addr; 151 struct sockaddr *sa = (struct sockaddr *)addr;
261 152
262 memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN); 153 memcpy(&ifp->mac_addr, sa->sa_data, ETH_ALEN);
263 schedule_work(&drvr->setmacaddr_work); 154 schedule_work(&ifp->setmacaddr_work);
264 return 0; 155 return 0;
265} 156}
266 157
267static void brcmf_netdev_set_multicast_list(struct net_device *ndev) 158static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
268{ 159{
269 struct brcmf_if *ifp = netdev_priv(ndev); 160 struct brcmf_if *ifp = netdev_priv(ndev);
270 struct brcmf_pub *drvr = ifp->drvr;
271 161
272 schedule_work(&drvr->multicast_work); 162 schedule_work(&ifp->multicast_work);
273} 163}
274 164
275static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) 165static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
@@ -282,8 +172,8 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
282 172
283 /* Reject if down */ 173 /* Reject if down */
284 if (!drvr->bus_if->drvr_up || 174 if (!drvr->bus_if->drvr_up ||
285 (drvr->bus_if->state == BRCMF_BUS_DOWN)) { 175 (drvr->bus_if->state != BRCMF_BUS_DATA)) {
286 brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n", 176 brcmf_err("xmit rejected drvup=%d state=%d\n",
287 drvr->bus_if->drvr_up, 177 drvr->bus_if->drvr_up,
288 drvr->bus_if->state); 178 drvr->bus_if->state);
289 netif_stop_queue(ndev); 179 netif_stop_queue(ndev);
@@ -291,7 +181,7 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
291 } 181 }
292 182
293 if (!drvr->iflist[ifp->idx]) { 183 if (!drvr->iflist[ifp->idx]) {
294 brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx); 184 brcmf_err("bad ifidx %d\n", ifp->idx);
295 netif_stop_queue(ndev); 185 netif_stop_queue(ndev);
296 return -ENODEV; 186 return -ENODEV;
297 } 187 }
@@ -307,7 +197,7 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
307 dev_kfree_skb(skb); 197 dev_kfree_skb(skb);
308 skb = skb2; 198 skb = skb2;
309 if (skb == NULL) { 199 if (skb == NULL) {
310 brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n", 200 brcmf_err("%s: skb_realloc_headroom failed\n",
311 brcmf_ifname(drvr, ifp->idx)); 201 brcmf_ifname(drvr, ifp->idx));
312 ret = -ENOMEM; 202 ret = -ENOMEM;
313 goto done; 203 goto done;
@@ -329,7 +219,7 @@ static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
329 brcmf_proto_hdrpush(drvr, ifp->idx, skb); 219 brcmf_proto_hdrpush(drvr, ifp->idx, skb);
330 220
331 /* Use bus module to send data frame */ 221 /* Use bus module to send data frame */
332 ret = drvr->bus_if->brcmf_bus_txdata(drvr->dev, skb); 222 ret = brcmf_bus_txdata(drvr->bus_if, skb);
333 223
334done: 224done:
335 if (ret) 225 if (ret)
@@ -360,32 +250,13 @@ void brcmf_txflowblock(struct device *dev, bool state)
360 } 250 }
361} 251}
362 252
363static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx, 253void brcmf_rx_frame(struct device *dev, u8 ifidx,
364 void *pktdata, struct brcmf_event_msg *event,
365 void **data)
366{
367 int bcmerror = 0;
368
369 bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data);
370 if (bcmerror != 0)
371 return bcmerror;
372
373 if (drvr->iflist[*ifidx]->ndev)
374 brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev,
375 event, *data);
376
377 return bcmerror;
378}
379
380void brcmf_rx_frame(struct device *dev, int ifidx,
381 struct sk_buff_head *skb_list) 254 struct sk_buff_head *skb_list)
382{ 255{
383 unsigned char *eth; 256 unsigned char *eth;
384 uint len; 257 uint len;
385 void *data;
386 struct sk_buff *skb, *pnext; 258 struct sk_buff *skb, *pnext;
387 struct brcmf_if *ifp; 259 struct brcmf_if *ifp;
388 struct brcmf_event_msg event;
389 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 260 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
390 struct brcmf_pub *drvr = bus_if->drvr; 261 struct brcmf_pub *drvr = bus_if->drvr;
391 262
@@ -432,10 +303,7 @@ void brcmf_rx_frame(struct device *dev, int ifidx,
432 skb_pull(skb, ETH_HLEN); 303 skb_pull(skb, ETH_HLEN);
433 304
434 /* Process special event packets and then discard them */ 305 /* Process special event packets and then discard them */
435 if (ntohs(skb->protocol) == ETH_P_LINK_CTL) 306 brcmf_fweh_process_skb(drvr, skb, &ifidx);
436 brcmf_host_event(drvr, &ifidx,
437 skb_mac_header(skb),
438 &event, &data);
439 307
440 if (drvr->iflist[ifidx]) { 308 if (drvr->iflist[ifidx]) {
441 ifp = drvr->iflist[ifidx]; 309 ifp = drvr->iflist[ifidx];
@@ -471,9 +339,11 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
471 eh = (struct ethhdr *)(txp->data); 339 eh = (struct ethhdr *)(txp->data);
472 type = ntohs(eh->h_proto); 340 type = ntohs(eh->h_proto);
473 341
474 if (type == ETH_P_PAE) 342 if (type == ETH_P_PAE) {
475 atomic_dec(&drvr->pend_8021x_cnt); 343 atomic_dec(&drvr->pend_8021x_cnt);
476 344 if (waitqueue_active(&drvr->pend_8021x_wait))
345 wake_up(&drvr->pend_8021x_wait);
346 }
477} 347}
478 348
479static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) 349static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
@@ -497,83 +367,26 @@ static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
497 return &ifp->stats; 367 return &ifp->stats;
498} 368}
499 369
500/* Retrieve current toe component enables, which are kept 370/*
501 as a bitmap in toe_ol iovar */ 371 * Set current toe component enables in toe_ol iovar,
502static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol) 372 * and set toe global enable iovar
503{ 373 */
504 struct brcmf_dcmd dcmd; 374static int brcmf_toe_set(struct brcmf_if *ifp, u32 toe_ol)
505 __le32 toe_le;
506 char buf[32];
507 int ret;
508
509 memset(&dcmd, 0, sizeof(dcmd));
510
511 dcmd.cmd = BRCMF_C_GET_VAR;
512 dcmd.buf = buf;
513 dcmd.len = (uint) sizeof(buf);
514 dcmd.set = false;
515
516 strcpy(buf, "toe_ol");
517 ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
518 if (ret < 0) {
519 /* Check for older dongle image that doesn't support toe_ol */
520 if (ret == -EIO) {
521 brcmf_dbg(ERROR, "%s: toe not supported by device\n",
522 brcmf_ifname(drvr, ifidx));
523 return -EOPNOTSUPP;
524 }
525
526 brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
527 brcmf_ifname(drvr, ifidx), ret);
528 return ret;
529 }
530
531 memcpy(&toe_le, buf, sizeof(u32));
532 *toe_ol = le32_to_cpu(toe_le);
533 return 0;
534}
535
536/* Set current toe component enables in toe_ol iovar,
537 and set toe global enable iovar */
538static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol)
539{ 375{
540 struct brcmf_dcmd dcmd; 376 s32 err;
541 char buf[32];
542 int ret;
543 __le32 toe_le = cpu_to_le32(toe_ol);
544
545 memset(&dcmd, 0, sizeof(dcmd));
546
547 dcmd.cmd = BRCMF_C_SET_VAR;
548 dcmd.buf = buf;
549 dcmd.len = (uint) sizeof(buf);
550 dcmd.set = true;
551
552 /* Set toe_ol as requested */
553 strcpy(buf, "toe_ol");
554 memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32));
555 377
556 ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len); 378 err = brcmf_fil_iovar_int_set(ifp, "toe_ol", toe_ol);
557 if (ret < 0) { 379 if (err < 0) {
558 brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n", 380 brcmf_err("Setting toe_ol failed, %d\n", err);
559 brcmf_ifname(drvr, ifidx), ret); 381 return err;
560 return ret;
561 } 382 }
562 383
563 /* Enable toe globally only if any components are enabled. */ 384 err = brcmf_fil_iovar_int_set(ifp, "toe", (toe_ol != 0));
564 toe_le = cpu_to_le32(toe_ol != 0); 385 if (err < 0)
565 386 brcmf_err("Setting toe failed, %d\n", err);
566 strcpy(buf, "toe");
567 memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32));
568 387
569 ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len); 388 return err;
570 if (ret < 0) {
571 brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
572 brcmf_ifname(drvr, ifidx), ret);
573 return ret;
574 }
575 389
576 return 0;
577} 390}
578 391
579static void brcmf_ethtool_get_drvinfo(struct net_device *ndev, 392static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
@@ -584,15 +397,16 @@ static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
584 397
585 sprintf(info->driver, KBUILD_MODNAME); 398 sprintf(info->driver, KBUILD_MODNAME);
586 sprintf(info->version, "%lu", drvr->drv_version); 399 sprintf(info->version, "%lu", drvr->drv_version);
587 sprintf(info->bus_info, "%s", dev_name(drvr->dev)); 400 sprintf(info->bus_info, "%s", dev_name(drvr->bus_if->dev));
588} 401}
589 402
590static const struct ethtool_ops brcmf_ethtool_ops = { 403static const struct ethtool_ops brcmf_ethtool_ops = {
591 .get_drvinfo = brcmf_ethtool_get_drvinfo, 404 .get_drvinfo = brcmf_ethtool_get_drvinfo,
592}; 405};
593 406
594static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr) 407static int brcmf_ethtool(struct brcmf_if *ifp, void __user *uaddr)
595{ 408{
409 struct brcmf_pub *drvr = ifp->drvr;
596 struct ethtool_drvinfo info; 410 struct ethtool_drvinfo info;
597 char drvname[sizeof(info.driver)]; 411 char drvname[sizeof(info.driver)];
598 u32 cmd; 412 u32 cmd;
@@ -626,15 +440,12 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
626 440
627 /* otherwise, require dongle to be up */ 441 /* otherwise, require dongle to be up */
628 else if (!drvr->bus_if->drvr_up) { 442 else if (!drvr->bus_if->drvr_up) {
629 brcmf_dbg(ERROR, "dongle is not up\n"); 443 brcmf_err("dongle is not up\n");
630 return -ENODEV; 444 return -ENODEV;
631 } 445 }
632
633 /* finally, report dongle driver type */ 446 /* finally, report dongle driver type */
634 else if (drvr->iswl)
635 sprintf(info.driver, "wl");
636 else 447 else
637 sprintf(info.driver, "xx"); 448 sprintf(info.driver, "wl");
638 449
639 sprintf(info.version, "%lu", drvr->drv_version); 450 sprintf(info.version, "%lu", drvr->drv_version);
640 if (copy_to_user(uaddr, &info, sizeof(info))) 451 if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -646,7 +457,7 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
646 /* Get toe offload components from dongle */ 457 /* Get toe offload components from dongle */
647 case ETHTOOL_GRXCSUM: 458 case ETHTOOL_GRXCSUM:
648 case ETHTOOL_GTXCSUM: 459 case ETHTOOL_GTXCSUM:
649 ret = brcmf_toe_get(drvr, 0, &toe_cmpnt); 460 ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
650 if (ret < 0) 461 if (ret < 0)
651 return ret; 462 return ret;
652 463
@@ -667,7 +478,7 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
667 return -EFAULT; 478 return -EFAULT;
668 479
669 /* Read the current settings, update and write back */ 480 /* Read the current settings, update and write back */
670 ret = brcmf_toe_get(drvr, 0, &toe_cmpnt); 481 ret = brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_cmpnt);
671 if (ret < 0) 482 if (ret < 0)
672 return ret; 483 return ret;
673 484
@@ -679,18 +490,16 @@ static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
679 else 490 else
680 toe_cmpnt &= ~csum_dir; 491 toe_cmpnt &= ~csum_dir;
681 492
682 ret = brcmf_toe_set(drvr, 0, toe_cmpnt); 493 ret = brcmf_toe_set(ifp, toe_cmpnt);
683 if (ret < 0) 494 if (ret < 0)
684 return ret; 495 return ret;
685 496
686 /* If setting TX checksum mode, tell Linux the new mode */ 497 /* If setting TX checksum mode, tell Linux the new mode */
687 if (cmd == ETHTOOL_STXCSUM) { 498 if (cmd == ETHTOOL_STXCSUM) {
688 if (edata.data) 499 if (edata.data)
689 drvr->iflist[0]->ndev->features |= 500 ifp->ndev->features |= NETIF_F_IP_CSUM;
690 NETIF_F_IP_CSUM;
691 else 501 else
692 drvr->iflist[0]->ndev->features &= 502 ifp->ndev->features &= ~NETIF_F_IP_CSUM;
693 ~NETIF_F_IP_CSUM;
694 } 503 }
695 504
696 break; 505 break;
@@ -714,80 +523,23 @@ static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
714 return -1; 523 return -1;
715 524
716 if (cmd == SIOCETHTOOL) 525 if (cmd == SIOCETHTOOL)
717 return brcmf_ethtool(drvr, ifr->ifr_data); 526 return brcmf_ethtool(ifp, ifr->ifr_data);
718 527
719 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
720} 529}
721 530
722/* called only from within this driver. Sends a command to the dongle. */
723s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
724{
725 struct brcmf_dcmd dcmd;
726 s32 err = 0;
727 int buflen = 0;
728 bool is_set_key_cmd;
729 struct brcmf_if *ifp = netdev_priv(ndev);
730 struct brcmf_pub *drvr = ifp->drvr;
731
732 memset(&dcmd, 0, sizeof(dcmd));
733 dcmd.cmd = cmd;
734 dcmd.buf = arg;
735 dcmd.len = len;
736
737 if (dcmd.buf != NULL)
738 buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
739
740 /* send to dongle (must be up, and wl) */
741 if ((drvr->bus_if->state != BRCMF_BUS_DATA)) {
742 brcmf_dbg(ERROR, "DONGLE_DOWN\n");
743 err = -EIO;
744 goto done;
745 }
746
747 if (!drvr->iswl) {
748 err = -EIO;
749 goto done;
750 }
751
752 /*
753 * Intercept BRCMF_C_SET_KEY CMD - serialize M4 send and
754 * set key CMD to prevent M4 encryption.
755 */
756 is_set_key_cmd = ((dcmd.cmd == BRCMF_C_SET_KEY) ||
757 ((dcmd.cmd == BRCMF_C_SET_VAR) &&
758 !(strncmp("wsec_key", dcmd.buf, 9))) ||
759 ((dcmd.cmd == BRCMF_C_SET_VAR) &&
760 !(strncmp("bsscfg:wsec_key", dcmd.buf, 15))));
761 if (is_set_key_cmd)
762 brcmf_netdev_wait_pend8021x(ndev);
763
764 err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen);
765
766done:
767 if (err > 0)
768 err = 0;
769
770 return err;
771}
772
773int brcmf_netlink_dcmd(struct net_device *ndev, struct brcmf_dcmd *dcmd)
774{
775 brcmf_dbg(TRACE, "enter: cmd %x buf %p len %d\n",
776 dcmd->cmd, dcmd->buf, dcmd->len);
777
778 return brcmf_exec_dcmd(ndev, dcmd->cmd, dcmd->buf, dcmd->len);
779}
780
781static int brcmf_netdev_stop(struct net_device *ndev) 531static int brcmf_netdev_stop(struct net_device *ndev)
782{ 532{
783 struct brcmf_if *ifp = netdev_priv(ndev); 533 struct brcmf_if *ifp = netdev_priv(ndev);
784 struct brcmf_pub *drvr = ifp->drvr; 534 struct brcmf_pub *drvr = ifp->drvr;
785 535
786 brcmf_dbg(TRACE, "Enter\n"); 536 brcmf_dbg(TRACE, "Enter\n");
787 brcmf_cfg80211_down(drvr->config); 537
788 if (drvr->bus_if->drvr_up == 0) 538 if (drvr->bus_if->drvr_up == 0)
789 return 0; 539 return 0;
790 540
541 brcmf_cfg80211_down(ndev);
542
791 /* Set state and stop OS transmissions */ 543 /* Set state and stop OS transmissions */
792 drvr->bus_if->drvr_up = false; 544 drvr->bus_if->drvr_up = false;
793 netif_stop_queue(ndev); 545 netif_stop_queue(ndev);
@@ -802,39 +554,36 @@ static int brcmf_netdev_open(struct net_device *ndev)
802 struct brcmf_bus *bus_if = drvr->bus_if; 554 struct brcmf_bus *bus_if = drvr->bus_if;
803 u32 toe_ol; 555 u32 toe_ol;
804 s32 ret = 0; 556 s32 ret = 0;
805 uint up = 0;
806 557
807 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx); 558 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
808 559
809 if (ifp->idx == 0) { /* do it only for primary eth0 */ 560 /* If bus is not ready, can't continue */
810 /* If bus is not ready, can't continue */ 561 if (bus_if->state != BRCMF_BUS_DATA) {
811 if (bus_if->state != BRCMF_BUS_DATA) { 562 brcmf_err("failed bus is not ready\n");
812 brcmf_dbg(ERROR, "failed bus is not ready\n"); 563 return -EAGAIN;
813 return -EAGAIN; 564 }
814 }
815 565
816 atomic_set(&drvr->pend_8021x_cnt, 0); 566 atomic_set(&drvr->pend_8021x_cnt, 0);
817 567
818 memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN); 568 memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
819 569
820 /* Get current TOE mode from dongle */ 570 /* Get current TOE mode from dongle */
821 if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0 571 if (brcmf_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0
822 && (toe_ol & TOE_TX_CSUM_OL) != 0) 572 && (toe_ol & TOE_TX_CSUM_OL) != 0)
823 drvr->iflist[ifp->idx]->ndev->features |= 573 drvr->iflist[ifp->idx]->ndev->features |=
824 NETIF_F_IP_CSUM; 574 NETIF_F_IP_CSUM;
825 else 575 else
826 drvr->iflist[ifp->idx]->ndev->features &= 576 drvr->iflist[ifp->idx]->ndev->features &=
827 ~NETIF_F_IP_CSUM; 577 ~NETIF_F_IP_CSUM;
828 }
829 578
830 /* make sure RF is ready for work */ 579 /* make sure RF is ready for work */
831 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up)); 580 brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
832 581
833 /* Allow transmit calls */ 582 /* Allow transmit calls */
834 netif_start_queue(ndev); 583 netif_start_queue(ndev);
835 drvr->bus_if->drvr_up = true; 584 drvr->bus_if->drvr_up = true;
836 if (brcmf_cfg80211_up(drvr->config)) { 585 if (brcmf_cfg80211_up(ndev)) {
837 brcmf_dbg(ERROR, "failed to bring up cfg80211\n"); 586 brcmf_err("failed to bring up cfg80211\n");
838 return -1; 587 return -1;
839 } 588 }
840 589
@@ -851,51 +600,41 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
851 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list 600 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
852}; 601};
853 602
854static int brcmf_net_attach(struct brcmf_if *ifp) 603static const struct net_device_ops brcmf_netdev_ops_virt = {
604 .ndo_open = brcmf_cfg80211_up,
605 .ndo_stop = brcmf_cfg80211_down,
606 .ndo_get_stats = brcmf_netdev_get_stats,
607 .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
608 .ndo_start_xmit = brcmf_netdev_start_xmit,
609 .ndo_set_mac_address = brcmf_netdev_set_mac_address,
610 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
611};
612
613int brcmf_net_attach(struct brcmf_if *ifp)
855{ 614{
856 struct brcmf_pub *drvr = ifp->drvr; 615 struct brcmf_pub *drvr = ifp->drvr;
857 struct net_device *ndev; 616 struct net_device *ndev;
858 u8 temp_addr[ETH_ALEN];
859
860 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
861 617
862 ndev = drvr->iflist[ifp->idx]->ndev; 618 brcmf_dbg(TRACE, "ifidx %d mac %pM\n", ifp->idx, ifp->mac_addr);
863 ndev->netdev_ops = &brcmf_netdev_ops_pri; 619 ndev = ifp->ndev;
864 620
865 /* 621 /* set appropriate operations */
866 * determine mac address to use 622 if (!ifp->idx)
867 */ 623 ndev->netdev_ops = &brcmf_netdev_ops_pri;
868 if (is_valid_ether_addr(ifp->mac_addr))
869 memcpy(temp_addr, ifp->mac_addr, ETH_ALEN);
870 else 624 else
871 memcpy(temp_addr, drvr->mac, ETH_ALEN); 625 ndev->netdev_ops = &brcmf_netdev_ops_virt;
872
873 if (ifp->idx == 1) {
874 brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
875 /* ACCESSPOINT INTERFACE CASE */
876 temp_addr[0] |= 0X02; /* set bit 2 ,
877 - Locally Administered address */
878 626
879 }
880 ndev->hard_header_len = ETH_HLEN + drvr->hdrlen; 627 ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
881 ndev->ethtool_ops = &brcmf_ethtool_ops; 628 ndev->ethtool_ops = &brcmf_ethtool_ops;
882 629
883 drvr->rxsz = ndev->mtu + ndev->hard_header_len + 630 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
884 drvr->hdrlen; 631 drvr->hdrlen;
885 632
886 memcpy(ndev->dev_addr, temp_addr, ETH_ALEN); 633 /* set the mac address */
887 634 memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
888 /* attach to cfg80211 for primary interface */
889 if (!ifp->idx) {
890 drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
891 if (drvr->config == NULL) {
892 brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
893 goto fail;
894 }
895 }
896 635
897 if (register_netdev(ndev) != 0) { 636 if (register_netdev(ndev) != 0) {
898 brcmf_dbg(ERROR, "couldn't register the net device\n"); 637 brcmf_err("couldn't register the net device\n");
899 goto fail; 638 goto fail;
900 } 639 }
901 640
@@ -908,13 +647,12 @@ fail:
908 return -EBADE; 647 return -EBADE;
909} 648}
910 649
911int 650struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, int ifidx, s32 bssidx,
912brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr) 651 char *name, u8 *addr_mask)
913{ 652{
914 struct brcmf_if *ifp; 653 struct brcmf_if *ifp;
915 struct net_device *ndev; 654 struct net_device *ndev;
916 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 655 int i;
917 struct brcmf_pub *drvr = bus_if->drvr;
918 656
919 brcmf_dbg(TRACE, "idx %d\n", ifidx); 657 brcmf_dbg(TRACE, "idx %d\n", ifidx);
920 658
@@ -924,19 +662,24 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
924 * in case we missed the BRCMF_E_IF_DEL event. 662 * in case we missed the BRCMF_E_IF_DEL event.
925 */ 663 */
926 if (ifp) { 664 if (ifp) {
927 brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n", 665 brcmf_err("ERROR: netdev:%s already exists\n",
928 ifp->ndev->name); 666 ifp->ndev->name);
929 netif_stop_queue(ifp->ndev); 667 if (ifidx) {
930 unregister_netdev(ifp->ndev); 668 netif_stop_queue(ifp->ndev);
931 free_netdev(ifp->ndev); 669 unregister_netdev(ifp->ndev);
932 drvr->iflist[ifidx] = NULL; 670 free_netdev(ifp->ndev);
671 drvr->iflist[ifidx] = NULL;
672 } else {
673 brcmf_err("ignore IF event\n");
674 return ERR_PTR(-EINVAL);
675 }
933 } 676 }
934 677
935 /* Allocate netdev, including space for private structure */ 678 /* Allocate netdev, including space for private structure */
936 ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup); 679 ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
937 if (!ndev) { 680 if (!ndev) {
938 brcmf_dbg(ERROR, "OOM - alloc_netdev\n"); 681 brcmf_err("OOM - alloc_netdev\n");
939 return -ENOMEM; 682 return ERR_PTR(-ENOMEM);
940 } 683 }
941 684
942 ifp = netdev_priv(ndev); 685 ifp = netdev_priv(ndev);
@@ -944,20 +687,19 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
944 ifp->drvr = drvr; 687 ifp->drvr = drvr;
945 drvr->iflist[ifidx] = ifp; 688 drvr->iflist[ifidx] = ifp;
946 ifp->idx = ifidx; 689 ifp->idx = ifidx;
947 if (mac_addr != NULL) 690 ifp->bssidx = bssidx;
948 memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
949 691
950 if (brcmf_net_attach(ifp)) { 692 INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address);
951 brcmf_dbg(ERROR, "brcmf_net_attach failed"); 693 INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
952 free_netdev(ifp->ndev);
953 drvr->iflist[ifidx] = NULL;
954 return -EOPNOTSUPP;
955 }
956 694
957 brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n", 695 if (addr_mask != NULL)
958 current->pid, ifp->ndev->name); 696 for (i = 0; i < ETH_ALEN; i++)
697 ifp->mac_addr[i] = drvr->mac[i] ^ addr_mask[i];
959 698
960 return 0; 699 brcmf_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n",
700 current->pid, ifp->ndev->name, ifp->mac_addr);
701
702 return ifp;
961} 703}
962 704
963void brcmf_del_if(struct brcmf_pub *drvr, int ifidx) 705void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
@@ -968,7 +710,7 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
968 710
969 ifp = drvr->iflist[ifidx]; 711 ifp = drvr->iflist[ifidx];
970 if (!ifp) { 712 if (!ifp) {
971 brcmf_dbg(ERROR, "Null interface\n"); 713 brcmf_err("Null interface\n");
972 return; 714 return;
973 } 715 }
974 if (ifp->ndev) { 716 if (ifp->ndev) {
@@ -982,6 +724,9 @@ void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
982 netif_stop_queue(ifp->ndev); 724 netif_stop_queue(ifp->ndev);
983 } 725 }
984 726
727 cancel_work_sync(&ifp->setmacaddr_work);
728 cancel_work_sync(&ifp->multicast_work);
729
985 unregister_netdev(ifp->ndev); 730 unregister_netdev(ifp->ndev);
986 drvr->iflist[ifidx] = NULL; 731 drvr->iflist[ifidx] = NULL;
987 if (ifidx == 0) 732 if (ifidx == 0)
@@ -1008,7 +753,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
1008 drvr->hdrlen = bus_hdrlen; 753 drvr->hdrlen = bus_hdrlen;
1009 drvr->bus_if = dev_get_drvdata(dev); 754 drvr->bus_if = dev_get_drvdata(dev);
1010 drvr->bus_if->drvr = drvr; 755 drvr->bus_if->drvr = drvr;
1011 drvr->dev = dev;
1012 756
1013 /* create device debugfs folder */ 757 /* create device debugfs folder */
1014 brcmf_debugfs_attach(drvr); 758 brcmf_debugfs_attach(drvr);
@@ -1016,15 +760,17 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
1016 /* Attach and link in the protocol */ 760 /* Attach and link in the protocol */
1017 ret = brcmf_proto_attach(drvr); 761 ret = brcmf_proto_attach(drvr);
1018 if (ret != 0) { 762 if (ret != 0) {
1019 brcmf_dbg(ERROR, "brcmf_prot_attach failed\n"); 763 brcmf_err("brcmf_prot_attach failed\n");
1020 goto fail; 764 goto fail;
1021 } 765 }
1022 766
1023 INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address); 767 /* attach firmware event handler */
1024 INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list); 768 brcmf_fweh_attach(drvr);
1025 769
1026 INIT_LIST_HEAD(&drvr->bus_if->dcmd_list); 770 INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
1027 771
772 init_waitqueue_head(&drvr->pend_8021x_wait);
773
1028 return ret; 774 return ret;
1029 775
1030fail: 776fail:
@@ -1036,63 +782,53 @@ fail:
1036int brcmf_bus_start(struct device *dev) 782int brcmf_bus_start(struct device *dev)
1037{ 783{
1038 int ret = -1; 784 int ret = -1;
1039 /* Room for "event_msgs" + '\0' + bitvec */
1040 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
1041 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 785 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1042 struct brcmf_pub *drvr = bus_if->drvr; 786 struct brcmf_pub *drvr = bus_if->drvr;
787 struct brcmf_if *ifp;
1043 788
1044 brcmf_dbg(TRACE, "\n"); 789 brcmf_dbg(TRACE, "\n");
1045 790
1046 /* Bring up the bus */ 791 /* Bring up the bus */
1047 ret = bus_if->brcmf_bus_init(dev); 792 ret = brcmf_bus_init(bus_if);
1048 if (ret != 0) { 793 if (ret != 0) {
1049 brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret); 794 brcmf_err("brcmf_sdbrcm_bus_init failed %d\n", ret);
1050 return ret; 795 return ret;
1051 } 796 }
1052 797
1053 brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN, 798 /* add primary networking interface */
1054 iovbuf, sizeof(iovbuf)); 799 ifp = brcmf_add_if(drvr, 0, 0, "wlan%d", NULL);
1055 brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR, iovbuf, 800 if (IS_ERR(ifp))
1056 sizeof(iovbuf)); 801 return PTR_ERR(ifp);
1057 memcpy(drvr->eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN); 802
1058 803 /* signal bus ready */
1059 setbit(drvr->eventmask, BRCMF_E_SET_SSID); 804 bus_if->state = BRCMF_BUS_DATA;
1060 setbit(drvr->eventmask, BRCMF_E_PRUNE); 805
1061 setbit(drvr->eventmask, BRCMF_E_AUTH); 806 /* Bus is ready, do any initialization */
1062 setbit(drvr->eventmask, BRCMF_E_REASSOC); 807 ret = brcmf_c_preinit_dcmds(ifp);
1063 setbit(drvr->eventmask, BRCMF_E_REASSOC_IND);
1064 setbit(drvr->eventmask, BRCMF_E_DEAUTH_IND);
1065 setbit(drvr->eventmask, BRCMF_E_DISASSOC_IND);
1066 setbit(drvr->eventmask, BRCMF_E_DISASSOC);
1067 setbit(drvr->eventmask, BRCMF_E_JOIN);
1068 setbit(drvr->eventmask, BRCMF_E_ASSOC_IND);
1069 setbit(drvr->eventmask, BRCMF_E_PSK_SUP);
1070 setbit(drvr->eventmask, BRCMF_E_LINK);
1071 setbit(drvr->eventmask, BRCMF_E_NDIS_LINK);
1072 setbit(drvr->eventmask, BRCMF_E_MIC_ERROR);
1073 setbit(drvr->eventmask, BRCMF_E_PMKID_CACHE);
1074 setbit(drvr->eventmask, BRCMF_E_TXFAIL);
1075 setbit(drvr->eventmask, BRCMF_E_JOIN_START);
1076 setbit(drvr->eventmask, BRCMF_E_SCAN_COMPLETE);
1077
1078/* enable dongle roaming event */
1079
1080 drvr->pktfilter_count = 1;
1081 /* Setup filter to allow only unicast */
1082 drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
1083
1084 /* Bus is ready, do any protocol initialization */
1085 ret = brcmf_proto_init(drvr);
1086 if (ret < 0) 808 if (ret < 0)
1087 return ret; 809 goto fail;
1088 810
1089 /* add primary networking interface */ 811 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
1090 ret = brcmf_add_if(dev, 0, "wlan%d", drvr->mac); 812 if (drvr->config == NULL) {
813 ret = -ENOMEM;
814 goto fail;
815 }
816
817 ret = brcmf_fweh_activate_events(ifp);
1091 if (ret < 0) 818 if (ret < 0)
819 goto fail;
820
821 ret = brcmf_net_attach(ifp);
822fail:
823 if (ret < 0) {
824 brcmf_err("failed: %d\n", ret);
825 if (drvr->config)
826 brcmf_cfg80211_detach(drvr->config);
827 free_netdev(drvr->iflist[0]->ndev);
828 drvr->iflist[0] = NULL;
1092 return ret; 829 return ret;
830 }
1093 831
1094 /* signal bus ready */
1095 bus_if->state = BRCMF_BUS_DATA;
1096 return 0; 832 return 0;
1097} 833}
1098 834
@@ -1105,7 +841,7 @@ static void brcmf_bus_detach(struct brcmf_pub *drvr)
1105 brcmf_proto_stop(drvr); 841 brcmf_proto_stop(drvr);
1106 842
1107 /* Stop the bus module */ 843 /* Stop the bus module */
1108 drvr->bus_if->brcmf_bus_stop(drvr->dev); 844 brcmf_bus_stop(drvr->bus_if);
1109 } 845 }
1110} 846}
1111 847
@@ -1117,6 +853,11 @@ void brcmf_detach(struct device *dev)
1117 853
1118 brcmf_dbg(TRACE, "Enter\n"); 854 brcmf_dbg(TRACE, "Enter\n");
1119 855
856 if (drvr == NULL)
857 return;
858
859 /* stop firmware event handling */
860 brcmf_fweh_detach(drvr);
1120 861
1121 /* make sure primary interface removed last */ 862 /* make sure primary interface removed last */
1122 for (i = BRCMF_MAX_IFS-1; i > -1; i--) 863 for (i = BRCMF_MAX_IFS-1; i > -1; i--)
@@ -1126,8 +867,6 @@ void brcmf_detach(struct device *dev)
1126 brcmf_bus_detach(drvr); 867 brcmf_bus_detach(drvr);
1127 868
1128 if (drvr->prot) { 869 if (drvr->prot) {
1129 cancel_work_sync(&drvr->setmacaddr_work);
1130 cancel_work_sync(&drvr->multicast_work);
1131 brcmf_proto_detach(drvr); 870 brcmf_proto_detach(drvr);
1132 } 871 }
1133 872
@@ -1141,63 +880,20 @@ static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
1141 return atomic_read(&drvr->pend_8021x_cnt); 880 return atomic_read(&drvr->pend_8021x_cnt);
1142} 881}
1143 882
1144#define MAX_WAIT_FOR_8021X_TX 10
1145
1146int brcmf_netdev_wait_pend8021x(struct net_device *ndev) 883int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
1147{ 884{
1148 struct brcmf_if *ifp = netdev_priv(ndev); 885 struct brcmf_if *ifp = netdev_priv(ndev);
1149 struct brcmf_pub *drvr = ifp->drvr; 886 struct brcmf_pub *drvr = ifp->drvr;
1150 int timeout = 10 * HZ / 1000; 887 int err;
1151 int ntimes = MAX_WAIT_FOR_8021X_TX;
1152 int pend = brcmf_get_pend_8021x_cnt(drvr);
1153
1154 while (ntimes && pend) {
1155 if (pend) {
1156 set_current_state(TASK_INTERRUPTIBLE);
1157 schedule_timeout(timeout);
1158 set_current_state(TASK_RUNNING);
1159 ntimes--;
1160 }
1161 pend = brcmf_get_pend_8021x_cnt(drvr);
1162 }
1163 return pend;
1164}
1165 888
1166#ifdef DEBUG 889 err = wait_event_timeout(drvr->pend_8021x_wait,
1167int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size) 890 !brcmf_get_pend_8021x_cnt(drvr),
1168{ 891 msecs_to_jiffies(MAX_WAIT_FOR_8021X_TX));
1169 int ret = 0;
1170 struct file *fp;
1171 mm_segment_t old_fs;
1172 loff_t pos = 0;
1173
1174 /* change to KERNEL_DS address limit */
1175 old_fs = get_fs();
1176 set_fs(KERNEL_DS);
1177
1178 /* open file to write */
1179 fp = filp_open("/tmp/mem_dump", O_WRONLY | O_CREAT, 0640);
1180 if (!fp) {
1181 brcmf_dbg(ERROR, "open file error\n");
1182 ret = -1;
1183 goto exit;
1184 }
1185 892
1186 /* Write buf to file */ 893 WARN_ON(!err);
1187 fp->f_op->write(fp, (char __user *)buf, size, &pos);
1188 894
1189exit: 895 return !err;
1190 /* free buf before return */
1191 kfree(buf);
1192 /* close file before return */
1193 if (fp)
1194 filp_close(fp, NULL);
1195 /* restore previous address limit */
1196 set_fs(old_fs);
1197
1198 return ret;
1199} 896}
1200#endif /* DEBUG */
1201 897
1202static void brcmf_driver_init(struct work_struct *work) 898static void brcmf_driver_init(struct work_struct *work)
1203{ 899{
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 6bc4425a8b0f..48fa70302192 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -27,11 +27,6 @@ extern int brcmf_proto_attach(struct brcmf_pub *drvr);
27/* Unlink, frees allocated protocol memory (including brcmf_proto) */ 27/* Unlink, frees allocated protocol memory (including brcmf_proto) */
28extern void brcmf_proto_detach(struct brcmf_pub *drvr); 28extern void brcmf_proto_detach(struct brcmf_pub *drvr);
29 29
30/* Initialize protocol: sync w/dongle state.
31 * Sets dongle media info (iswl, drv_version, mac address).
32 */
33extern int brcmf_proto_init(struct brcmf_pub *drvr);
34
35/* Stop protocol: sync w/dongle state. */ 30/* Stop protocol: sync w/dongle state. */
36extern void brcmf_proto_stop(struct brcmf_pub *drvr); 31extern void brcmf_proto_stop(struct brcmf_pub *drvr);
37 32
@@ -41,13 +36,7 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr);
41extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, 36extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
42 struct sk_buff *txp); 37 struct sk_buff *txp);
43 38
44/* Use protocol to issue command to dongle */ 39/* Sets dongle media info (drv_version, mac address). */
45extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, 40extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
46 struct brcmf_dcmd *dcmd, int len);
47
48extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
49
50extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
51 uint cmd, void *buf, uint len);
52 41
53#endif /* _BRCMF_PROTO_H_ */ 42#endif /* _BRCMF_PROTO_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 3564686add9a..cf857f1edf8c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -533,9 +533,11 @@ struct brcmf_sdio {
533 u8 *rxbuf; /* Buffer for receiving control packets */ 533 u8 *rxbuf; /* Buffer for receiving control packets */
534 uint rxblen; /* Allocated length of rxbuf */ 534 uint rxblen; /* Allocated length of rxbuf */
535 u8 *rxctl; /* Aligned pointer into rxbuf */ 535 u8 *rxctl; /* Aligned pointer into rxbuf */
536 u8 *rxctl_orig; /* pointer for freeing rxctl */
536 u8 *databuf; /* Buffer for receiving big glom packet */ 537 u8 *databuf; /* Buffer for receiving big glom packet */
537 u8 *dataptr; /* Aligned pointer into databuf */ 538 u8 *dataptr; /* Aligned pointer into databuf */
538 uint rxlen; /* Length of valid data in buffer */ 539 uint rxlen; /* Length of valid data in buffer */
540 spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */
539 541
540 u8 sdpcm_ver; /* Bus protocol reported by dongle */ 542 u8 sdpcm_ver; /* Bus protocol reported by dongle */
541 543
@@ -582,8 +584,6 @@ struct brcmf_sdio {
582 struct list_head dpc_tsklst; 584 struct list_head dpc_tsklst;
583 spinlock_t dpc_tl_lock; 585 spinlock_t dpc_tl_lock;
584 586
585 struct semaphore sdsem;
586
587 const struct firmware *firmware; 587 const struct firmware *firmware;
588 u32 fw_ptr; 588 u32 fw_ptr;
589 589
@@ -614,6 +614,12 @@ static const uint max_roundup = 512;
614 614
615#define ALIGNMENT 4 615#define ALIGNMENT 4
616 616
617enum brcmf_sdio_frmtype {
618 BRCMF_SDIO_FT_NORMAL,
619 BRCMF_SDIO_FT_SUPER,
620 BRCMF_SDIO_FT_SUB,
621};
622
617static void pkt_align(struct sk_buff *p, int len, int align) 623static void pkt_align(struct sk_buff *p, int len, int align)
618{ 624{
619 uint datalign; 625 uint datalign;
@@ -683,7 +689,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
683 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 689 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
684 clkreq, &err); 690 clkreq, &err);
685 if (err) { 691 if (err) {
686 brcmf_dbg(ERROR, "HT Avail request error: %d\n", err); 692 brcmf_err("HT Avail request error: %d\n", err);
687 return -EBADE; 693 return -EBADE;
688 } 694 }
689 695
@@ -691,7 +697,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
691 clkctl = brcmf_sdio_regrb(bus->sdiodev, 697 clkctl = brcmf_sdio_regrb(bus->sdiodev,
692 SBSDIO_FUNC1_CHIPCLKCSR, &err); 698 SBSDIO_FUNC1_CHIPCLKCSR, &err);
693 if (err) { 699 if (err) {
694 brcmf_dbg(ERROR, "HT Avail read error: %d\n", err); 700 brcmf_err("HT Avail read error: %d\n", err);
695 return -EBADE; 701 return -EBADE;
696 } 702 }
697 703
@@ -701,7 +707,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
701 devctl = brcmf_sdio_regrb(bus->sdiodev, 707 devctl = brcmf_sdio_regrb(bus->sdiodev,
702 SBSDIO_DEVICE_CTL, &err); 708 SBSDIO_DEVICE_CTL, &err);
703 if (err) { 709 if (err) {
704 brcmf_dbg(ERROR, "Devctl error setting CA: %d\n", 710 brcmf_err("Devctl error setting CA: %d\n",
705 err); 711 err);
706 return -EBADE; 712 return -EBADE;
707 } 713 }
@@ -735,11 +741,11 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
735 usleep_range(5000, 10000); 741 usleep_range(5000, 10000);
736 } 742 }
737 if (err) { 743 if (err) {
738 brcmf_dbg(ERROR, "HT Avail request error: %d\n", err); 744 brcmf_err("HT Avail request error: %d\n", err);
739 return -EBADE; 745 return -EBADE;
740 } 746 }
741 if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { 747 if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) {
742 brcmf_dbg(ERROR, "HT Avail timeout (%d): clkctl 0x%02x\n", 748 brcmf_err("HT Avail timeout (%d): clkctl 0x%02x\n",
743 PMU_MAX_TRANSITION_DLY, clkctl); 749 PMU_MAX_TRANSITION_DLY, clkctl);
744 return -EBADE; 750 return -EBADE;
745 } 751 }
@@ -751,7 +757,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
751#if defined(DEBUG) 757#if defined(DEBUG)
752 if (!bus->alp_only) { 758 if (!bus->alp_only) {
753 if (SBSDIO_ALPONLY(clkctl)) 759 if (SBSDIO_ALPONLY(clkctl))
754 brcmf_dbg(ERROR, "HT Clock should be on\n"); 760 brcmf_err("HT Clock should be on\n");
755 } 761 }
756#endif /* defined (DEBUG) */ 762#endif /* defined (DEBUG) */
757 763
@@ -773,7 +779,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
773 clkreq, &err); 779 clkreq, &err);
774 brcmf_dbg(INFO, "CLKCTL: turned OFF\n"); 780 brcmf_dbg(INFO, "CLKCTL: turned OFF\n");
775 if (err) { 781 if (err) {
776 brcmf_dbg(ERROR, "Failed access turning clock off: %d\n", 782 brcmf_err("Failed access turning clock off: %d\n",
777 err); 783 err);
778 return -EBADE; 784 return -EBADE;
779 } 785 }
@@ -830,7 +836,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
830 else if (bus->clkstate == CLK_AVAIL) 836 else if (bus->clkstate == CLK_AVAIL)
831 brcmf_sdbrcm_htclk(bus, false, false); 837 brcmf_sdbrcm_htclk(bus, false, false);
832 else 838 else
833 brcmf_dbg(ERROR, "request for %d -> %d\n", 839 brcmf_err("request for %d -> %d\n",
834 bus->clkstate, target); 840 bus->clkstate, target);
835 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); 841 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
836 break; 842 break;
@@ -874,7 +880,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
874 brcmf_dbg(INFO, "Dongle reports NAK handled, expect rtx of %d\n", 880 brcmf_dbg(INFO, "Dongle reports NAK handled, expect rtx of %d\n",
875 bus->rx_seq); 881 bus->rx_seq);
876 if (!bus->rxskip) 882 if (!bus->rxskip)
877 brcmf_dbg(ERROR, "unexpected NAKHANDLED!\n"); 883 brcmf_err("unexpected NAKHANDLED!\n");
878 884
879 bus->rxskip = false; 885 bus->rxskip = false;
880 intstatus |= I_HMB_FRAME_IND; 886 intstatus |= I_HMB_FRAME_IND;
@@ -888,7 +894,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
888 (hmb_data & HMB_DATA_VERSION_MASK) >> 894 (hmb_data & HMB_DATA_VERSION_MASK) >>
889 HMB_DATA_VERSION_SHIFT; 895 HMB_DATA_VERSION_SHIFT;
890 if (bus->sdpcm_ver != SDPCM_PROT_VERSION) 896 if (bus->sdpcm_ver != SDPCM_PROT_VERSION)
891 brcmf_dbg(ERROR, "Version mismatch, dongle reports %d, " 897 brcmf_err("Version mismatch, dongle reports %d, "
892 "expecting %d\n", 898 "expecting %d\n",
893 bus->sdpcm_ver, SDPCM_PROT_VERSION); 899 bus->sdpcm_ver, SDPCM_PROT_VERSION);
894 else 900 else
@@ -921,7 +927,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
921 HMB_DATA_FC | 927 HMB_DATA_FC |
922 HMB_DATA_FWREADY | 928 HMB_DATA_FWREADY |
923 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK)) 929 HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK))
924 brcmf_dbg(ERROR, "Unknown mailbox data content: 0x%02x\n", 930 brcmf_err("Unknown mailbox data content: 0x%02x\n",
925 hmb_data); 931 hmb_data);
926 932
927 return intstatus; 933 return intstatus;
@@ -934,7 +940,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
934 u8 hi, lo; 940 u8 hi, lo;
935 int err; 941 int err;
936 942
937 brcmf_dbg(ERROR, "%sterminate frame%s\n", 943 brcmf_err("%sterminate frame%s\n",
938 abort ? "abort command, " : "", 944 abort ? "abort command, " : "",
939 rtx ? ", send NAK" : ""); 945 rtx ? ", send NAK" : "");
940 946
@@ -957,14 +963,14 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
957 break; 963 break;
958 964
959 if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { 965 if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) {
960 brcmf_dbg(ERROR, "count growing: last 0x%04x now 0x%04x\n", 966 brcmf_err("count growing: last 0x%04x now 0x%04x\n",
961 lastrbc, (hi << 8) + lo); 967 lastrbc, (hi << 8) + lo);
962 } 968 }
963 lastrbc = (hi << 8) + lo; 969 lastrbc = (hi << 8) + lo;
964 } 970 }
965 971
966 if (!retries) 972 if (!retries)
967 brcmf_dbg(ERROR, "count never zeroed: last 0x%04x\n", lastrbc); 973 brcmf_err("count never zeroed: last 0x%04x\n", lastrbc);
968 else 974 else
969 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries); 975 brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
970 976
@@ -1031,8 +1037,9 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
1031 } 1037 }
1032} 1038}
1033 1039
1034static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header, 1040static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1035 struct brcmf_sdio_read *rd) 1041 struct brcmf_sdio_read *rd,
1042 enum brcmf_sdio_frmtype type)
1036{ 1043{
1037 u16 len, checksum; 1044 u16 len, checksum;
1038 u8 rx_seq, fc, tx_seq_max; 1045 u8 rx_seq, fc, tx_seq_max;
@@ -1047,17 +1054,26 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1047 /* All zero means no more to read */ 1054 /* All zero means no more to read */
1048 if (!(len | checksum)) { 1055 if (!(len | checksum)) {
1049 bus->rxpending = false; 1056 bus->rxpending = false;
1050 return false; 1057 return -ENODATA;
1051 } 1058 }
1052 if ((u16)(~(len ^ checksum))) { 1059 if ((u16)(~(len ^ checksum))) {
1053 brcmf_dbg(ERROR, "HW header checksum error\n"); 1060 brcmf_err("HW header checksum error\n");
1054 bus->sdcnt.rx_badhdr++; 1061 bus->sdcnt.rx_badhdr++;
1055 brcmf_sdbrcm_rxfail(bus, false, false); 1062 brcmf_sdbrcm_rxfail(bus, false, false);
1056 return false; 1063 return -EIO;
1057 } 1064 }
1058 if (len < SDPCM_HDRLEN) { 1065 if (len < SDPCM_HDRLEN) {
1059 brcmf_dbg(ERROR, "HW header length error\n"); 1066 brcmf_err("HW header length error\n");
1060 return false; 1067 return -EPROTO;
1068 }
1069 if (type == BRCMF_SDIO_FT_SUPER &&
1070 (roundup(len, bus->blocksize) != rd->len)) {
1071 brcmf_err("HW superframe header length error\n");
1072 return -EPROTO;
1073 }
1074 if (type == BRCMF_SDIO_FT_SUB && len > rd->len) {
1075 brcmf_err("HW subframe header length error\n");
1076 return -EPROTO;
1061 } 1077 }
1062 rd->len = len; 1078 rd->len = len;
1063 1079
@@ -1071,35 +1087,56 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1071 * Byte 5: Maximum Sequence number allow for Tx 1087 * Byte 5: Maximum Sequence number allow for Tx
1072 * Byte 6~7: Reserved 1088 * Byte 6~7: Reserved
1073 */ 1089 */
1090 if (type == BRCMF_SDIO_FT_SUPER &&
1091 SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
1092 brcmf_err("Glom descriptor found in superframe head\n");
1093 rd->len = 0;
1094 return -EINVAL;
1095 }
1074 rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]); 1096 rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
1075 rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]); 1097 rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
1076 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL) { 1098 if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
1077 brcmf_dbg(ERROR, "HW header length too long\n"); 1099 type != BRCMF_SDIO_FT_SUPER) {
1100 brcmf_err("HW header length too long\n");
1078 bus->sdiodev->bus_if->dstats.rx_errors++; 1101 bus->sdiodev->bus_if->dstats.rx_errors++;
1079 bus->sdcnt.rx_toolong++; 1102 bus->sdcnt.rx_toolong++;
1080 brcmf_sdbrcm_rxfail(bus, false, false); 1103 brcmf_sdbrcm_rxfail(bus, false, false);
1081 rd->len = 0; 1104 rd->len = 0;
1082 return false; 1105 return -EPROTO;
1106 }
1107 if (type == BRCMF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) {
1108 brcmf_err("Wrong channel for superframe\n");
1109 rd->len = 0;
1110 return -EINVAL;
1111 }
1112 if (type == BRCMF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL &&
1113 rd->channel != SDPCM_EVENT_CHANNEL) {
1114 brcmf_err("Wrong channel for subframe\n");
1115 rd->len = 0;
1116 return -EINVAL;
1083 } 1117 }
1084 rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]); 1118 rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1085 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) { 1119 if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
1086 brcmf_dbg(ERROR, "seq %d: bad data offset\n", rx_seq); 1120 brcmf_err("seq %d: bad data offset\n", rx_seq);
1087 bus->sdcnt.rx_badhdr++; 1121 bus->sdcnt.rx_badhdr++;
1088 brcmf_sdbrcm_rxfail(bus, false, false); 1122 brcmf_sdbrcm_rxfail(bus, false, false);
1089 rd->len = 0; 1123 rd->len = 0;
1090 return false; 1124 return -ENXIO;
1091 } 1125 }
1092 if (rd->seq_num != rx_seq) { 1126 if (rd->seq_num != rx_seq) {
1093 brcmf_dbg(ERROR, "seq %d: sequence number error, expect %d\n", 1127 brcmf_err("seq %d: sequence number error, expect %d\n",
1094 rx_seq, rd->seq_num); 1128 rx_seq, rd->seq_num);
1095 bus->sdcnt.rx_badseq++; 1129 bus->sdcnt.rx_badseq++;
1096 rd->seq_num = rx_seq; 1130 rd->seq_num = rx_seq;
1097 } 1131 }
1132 /* no need to check the reset for subframe */
1133 if (type == BRCMF_SDIO_FT_SUB)
1134 return 0;
1098 rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET]; 1135 rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1099 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) { 1136 if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
1100 /* only warm for NON glom packet */ 1137 /* only warm for NON glom packet */
1101 if (rd->channel != SDPCM_GLOM_CHANNEL) 1138 if (rd->channel != SDPCM_GLOM_CHANNEL)
1102 brcmf_dbg(ERROR, "seq %d: next length error\n", rx_seq); 1139 brcmf_err("seq %d: next length error\n", rx_seq);
1103 rd->len_nxtfrm = 0; 1140 rd->len_nxtfrm = 0;
1104 } 1141 }
1105 fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]); 1142 fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
@@ -1113,12 +1150,12 @@ static bool brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
1113 } 1150 }
1114 tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]); 1151 tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
1115 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) { 1152 if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
1116 brcmf_dbg(ERROR, "seq %d: max tx seq number error\n", rx_seq); 1153 brcmf_err("seq %d: max tx seq number error\n", rx_seq);
1117 tx_seq_max = bus->tx_seq + 2; 1154 tx_seq_max = bus->tx_seq + 2;
1118 } 1155 }
1119 bus->tx_max = tx_seq_max; 1156 bus->tx_max = tx_seq_max;
1120 1157
1121 return true; 1158 return 0;
1122} 1159}
1123 1160
1124static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) 1161static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1126,16 +1163,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1126 u16 dlen, totlen; 1163 u16 dlen, totlen;
1127 u8 *dptr, num = 0; 1164 u8 *dptr, num = 0;
1128 1165
1129 u16 sublen, check; 1166 u16 sublen;
1130 struct sk_buff *pfirst, *pnext; 1167 struct sk_buff *pfirst, *pnext;
1131 1168
1132 int errcode; 1169 int errcode;
1133 u8 chan, seq, doff, sfdoff; 1170 u8 doff, sfdoff;
1134 u8 txmax;
1135 1171
1136 int ifidx = 0; 1172 int ifidx = 0;
1137 bool usechain = bus->use_rxchain; 1173 bool usechain = bus->use_rxchain;
1138 u16 next_len; 1174
1175 struct brcmf_sdio_read rd_new;
1139 1176
1140 /* If packets, issue read(s) and send up packet chain */ 1177 /* If packets, issue read(s) and send up packet chain */
1141 /* Return sequence numbers consumed? */ 1178 /* Return sequence numbers consumed? */
@@ -1149,7 +1186,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1149 dlen = (u16) (bus->glomd->len); 1186 dlen = (u16) (bus->glomd->len);
1150 dptr = bus->glomd->data; 1187 dptr = bus->glomd->data;
1151 if (!dlen || (dlen & 1)) { 1188 if (!dlen || (dlen & 1)) {
1152 brcmf_dbg(ERROR, "bad glomd len(%d), ignore descriptor\n", 1189 brcmf_err("bad glomd len(%d), ignore descriptor\n",
1153 dlen); 1190 dlen);
1154 dlen = 0; 1191 dlen = 0;
1155 } 1192 }
@@ -1161,13 +1198,13 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1161 dptr += sizeof(u16); 1198 dptr += sizeof(u16);
1162 if ((sublen < SDPCM_HDRLEN) || 1199 if ((sublen < SDPCM_HDRLEN) ||
1163 ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) { 1200 ((num == 0) && (sublen < (2 * SDPCM_HDRLEN)))) {
1164 brcmf_dbg(ERROR, "descriptor len %d bad: %d\n", 1201 brcmf_err("descriptor len %d bad: %d\n",
1165 num, sublen); 1202 num, sublen);
1166 pnext = NULL; 1203 pnext = NULL;
1167 break; 1204 break;
1168 } 1205 }
1169 if (sublen % BRCMF_SDALIGN) { 1206 if (sublen % BRCMF_SDALIGN) {
1170 brcmf_dbg(ERROR, "sublen %d not multiple of %d\n", 1207 brcmf_err("sublen %d not multiple of %d\n",
1171 sublen, BRCMF_SDALIGN); 1208 sublen, BRCMF_SDALIGN);
1172 usechain = false; 1209 usechain = false;
1173 } 1210 }
@@ -1184,7 +1221,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1184 /* Allocate/chain packet for next subframe */ 1221 /* Allocate/chain packet for next subframe */
1185 pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN); 1222 pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
1186 if (pnext == NULL) { 1223 if (pnext == NULL) {
1187 brcmf_dbg(ERROR, "bcm_pkt_buf_get_skb failed, num %d len %d\n", 1224 brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
1188 num, sublen); 1225 num, sublen);
1189 break; 1226 break;
1190 } 1227 }
@@ -1235,6 +1272,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1235 * read directly into the chained packet, or allocate a large 1272 * read directly into the chained packet, or allocate a large
1236 * packet and and copy into the chain. 1273 * packet and and copy into the chain.
1237 */ 1274 */
1275 sdio_claim_host(bus->sdiodev->func[1]);
1238 if (usechain) { 1276 if (usechain) {
1239 errcode = brcmf_sdcard_recv_chain(bus->sdiodev, 1277 errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
1240 bus->sdiodev->sbwad, 1278 bus->sdiodev->sbwad,
@@ -1246,24 +1284,26 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1246 bus->dataptr, dlen); 1284 bus->dataptr, dlen);
1247 sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen); 1285 sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
1248 if (sublen != dlen) { 1286 if (sublen != dlen) {
1249 brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n", 1287 brcmf_err("FAILED TO COPY, dlen %d sublen %d\n",
1250 dlen, sublen); 1288 dlen, sublen);
1251 errcode = -1; 1289 errcode = -1;
1252 } 1290 }
1253 pnext = NULL; 1291 pnext = NULL;
1254 } else { 1292 } else {
1255 brcmf_dbg(ERROR, "COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n", 1293 brcmf_err("COULDN'T ALLOC %d-BYTE GLOM, FORCE FAILURE\n",
1256 dlen); 1294 dlen);
1257 errcode = -1; 1295 errcode = -1;
1258 } 1296 }
1297 sdio_release_host(bus->sdiodev->func[1]);
1259 bus->sdcnt.f2rxdata++; 1298 bus->sdcnt.f2rxdata++;
1260 1299
1261 /* On failure, kill the superframe, allow a couple retries */ 1300 /* On failure, kill the superframe, allow a couple retries */
1262 if (errcode < 0) { 1301 if (errcode < 0) {
1263 brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n", 1302 brcmf_err("glom read of %d bytes failed: %d\n",
1264 dlen, errcode); 1303 dlen, errcode);
1265 bus->sdiodev->bus_if->dstats.rx_errors++; 1304 bus->sdiodev->bus_if->dstats.rx_errors++;
1266 1305
1306 sdio_claim_host(bus->sdiodev->func[1]);
1267 if (bus->glomerr++ < 3) { 1307 if (bus->glomerr++ < 3) {
1268 brcmf_sdbrcm_rxfail(bus, true, true); 1308 brcmf_sdbrcm_rxfail(bus, true, true);
1269 } else { 1309 } else {
@@ -1272,6 +1312,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1272 bus->sdcnt.rxglomfail++; 1312 bus->sdcnt.rxglomfail++;
1273 brcmf_sdbrcm_free_glom(bus); 1313 brcmf_sdbrcm_free_glom(bus);
1274 } 1314 }
1315 sdio_release_host(bus->sdiodev->func[1]);
1275 return 0; 1316 return 0;
1276 } 1317 }
1277 1318
@@ -1279,68 +1320,17 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1279 pfirst->data, min_t(int, pfirst->len, 48), 1320 pfirst->data, min_t(int, pfirst->len, 48),
1280 "SUPERFRAME:\n"); 1321 "SUPERFRAME:\n");
1281 1322
1282 /* Validate the superframe header */ 1323 rd_new.seq_num = rxseq;
1283 dptr = (u8 *) (pfirst->data); 1324 rd_new.len = dlen;
1284 sublen = get_unaligned_le16(dptr); 1325 sdio_claim_host(bus->sdiodev->func[1]);
1285 check = get_unaligned_le16(dptr + sizeof(u16)); 1326 errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
1286 1327 BRCMF_SDIO_FT_SUPER);
1287 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); 1328 sdio_release_host(bus->sdiodev->func[1]);
1288 seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]); 1329 bus->cur_read.len = rd_new.len_nxtfrm << 4;
1289 next_len = dptr[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
1290 if ((next_len << 4) > MAX_RX_DATASZ) {
1291 brcmf_dbg(INFO, "nextlen too large (%d) seq %d\n",
1292 next_len, seq);
1293 next_len = 0;
1294 }
1295 bus->cur_read.len = next_len << 4;
1296 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1297 txmax = SDPCM_WINDOW_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1298
1299 errcode = 0;
1300 if ((u16)~(sublen ^ check)) {
1301 brcmf_dbg(ERROR, "(superframe): HW hdr error: len/check 0x%04x/0x%04x\n",
1302 sublen, check);
1303 errcode = -1;
1304 } else if (roundup(sublen, bus->blocksize) != dlen) {
1305 brcmf_dbg(ERROR, "(superframe): len 0x%04x, rounded 0x%04x, expect 0x%04x\n",
1306 sublen, roundup(sublen, bus->blocksize),
1307 dlen);
1308 errcode = -1;
1309 } else if (SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]) !=
1310 SDPCM_GLOM_CHANNEL) {
1311 brcmf_dbg(ERROR, "(superframe): bad channel %d\n",
1312 SDPCM_PACKET_CHANNEL(
1313 &dptr[SDPCM_FRAMETAG_LEN]));
1314 errcode = -1;
1315 } else if (SDPCM_GLOMDESC(&dptr[SDPCM_FRAMETAG_LEN])) {
1316 brcmf_dbg(ERROR, "(superframe): got 2nd descriptor?\n");
1317 errcode = -1;
1318 } else if ((doff < SDPCM_HDRLEN) ||
1319 (doff > (pfirst->len - SDPCM_HDRLEN))) {
1320 brcmf_dbg(ERROR, "(superframe): Bad data offset %d: HW %d pkt %d min %d\n",
1321 doff, sublen, pfirst->len, SDPCM_HDRLEN);
1322 errcode = -1;
1323 }
1324
1325 /* Check sequence number of superframe SW header */
1326 if (rxseq != seq) {
1327 brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
1328 seq, rxseq);
1329 bus->sdcnt.rx_badseq++;
1330 rxseq = seq;
1331 }
1332
1333 /* Check window for sanity */
1334 if ((u8) (txmax - bus->tx_seq) > 0x40) {
1335 brcmf_dbg(ERROR, "unlikely tx max %d with tx_seq %d\n",
1336 txmax, bus->tx_seq);
1337 txmax = bus->tx_seq + 2;
1338 }
1339 bus->tx_max = txmax;
1340 1330
1341 /* Remove superframe header, remember offset */ 1331 /* Remove superframe header, remember offset */
1342 skb_pull(pfirst, doff); 1332 skb_pull(pfirst, rd_new.dat_offset);
1343 sfdoff = doff; 1333 sfdoff = rd_new.dat_offset;
1344 num = 0; 1334 num = 0;
1345 1335
1346 /* Validate all the subframe headers */ 1336 /* Validate all the subframe headers */
@@ -1349,40 +1339,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1349 if (errcode) 1339 if (errcode)
1350 break; 1340 break;
1351 1341
1352 dptr = (u8 *) (pnext->data); 1342 rd_new.len = pnext->len;
1353 dlen = (u16) (pnext->len); 1343 rd_new.seq_num = rxseq++;
1354 sublen = get_unaligned_le16(dptr); 1344 sdio_claim_host(bus->sdiodev->func[1]);
1355 check = get_unaligned_le16(dptr + sizeof(u16)); 1345 errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
1356 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]); 1346 BRCMF_SDIO_FT_SUB);
1357 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); 1347 sdio_release_host(bus->sdiodev->func[1]);
1358 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(), 1348 brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
1359 dptr, 32, "subframe:\n"); 1349 pnext->data, 32, "subframe:\n");
1360 1350
1361 if ((u16)~(sublen ^ check)) {
1362 brcmf_dbg(ERROR, "(subframe %d): HW hdr error: len/check 0x%04x/0x%04x\n",
1363 num, sublen, check);
1364 errcode = -1;
1365 } else if ((sublen > dlen) || (sublen < SDPCM_HDRLEN)) {
1366 brcmf_dbg(ERROR, "(subframe %d): length mismatch: len 0x%04x, expect 0x%04x\n",
1367 num, sublen, dlen);
1368 errcode = -1;
1369 } else if ((chan != SDPCM_DATA_CHANNEL) &&
1370 (chan != SDPCM_EVENT_CHANNEL)) {
1371 brcmf_dbg(ERROR, "(subframe %d): bad channel %d\n",
1372 num, chan);
1373 errcode = -1;
1374 } else if ((doff < SDPCM_HDRLEN) || (doff > sublen)) {
1375 brcmf_dbg(ERROR, "(subframe %d): Bad data offset %d: HW %d min %d\n",
1376 num, doff, sublen, SDPCM_HDRLEN);
1377 errcode = -1;
1378 }
1379 /* increase the subframe count */
1380 num++; 1351 num++;
1381 } 1352 }
1382 1353
1383 if (errcode) { 1354 if (errcode) {
1384 /* Terminate frame on error, request 1355 /* Terminate frame on error, request
1385 a couple retries */ 1356 a couple retries */
1357 sdio_claim_host(bus->sdiodev->func[1]);
1386 if (bus->glomerr++ < 3) { 1358 if (bus->glomerr++ < 3) {
1387 /* Restore superframe header space */ 1359 /* Restore superframe header space */
1388 skb_push(pfirst, sfdoff); 1360 skb_push(pfirst, sfdoff);
@@ -1393,6 +1365,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1393 bus->sdcnt.rxglomfail++; 1365 bus->sdcnt.rxglomfail++;
1394 brcmf_sdbrcm_free_glom(bus); 1366 brcmf_sdbrcm_free_glom(bus);
1395 } 1367 }
1368 sdio_release_host(bus->sdiodev->func[1]);
1396 bus->cur_read.len = 0; 1369 bus->cur_read.len = 0;
1397 return 0; 1370 return 0;
1398 } 1371 }
@@ -1402,27 +1375,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1402 skb_queue_walk_safe(&bus->glom, pfirst, pnext) { 1375 skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
1403 dptr = (u8 *) (pfirst->data); 1376 dptr = (u8 *) (pfirst->data);
1404 sublen = get_unaligned_le16(dptr); 1377 sublen = get_unaligned_le16(dptr);
1405 chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
1406 seq = SDPCM_PACKET_SEQUENCE(&dptr[SDPCM_FRAMETAG_LEN]);
1407 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]); 1378 doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
1408 1379
1409 brcmf_dbg(GLOM, "Get subframe %d, %p(%p/%d), sublen %d chan %d seq %d\n",
1410 num, pfirst, pfirst->data,
1411 pfirst->len, sublen, chan, seq);
1412
1413 /* precondition: chan == SDPCM_DATA_CHANNEL ||
1414 chan == SDPCM_EVENT_CHANNEL */
1415
1416 if (rxseq != seq) {
1417 brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
1418 seq, rxseq);
1419 bus->sdcnt.rx_badseq++;
1420 rxseq = seq;
1421 }
1422 rxseq++;
1423
1424 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(), 1380 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
1425 dptr, dlen, "Rx Subframe Data:\n"); 1381 dptr, pfirst->len,
1382 "Rx Subframe Data:\n");
1426 1383
1427 __skb_trim(pfirst, sublen); 1384 __skb_trim(pfirst, sublen);
1428 skb_pull(pfirst, doff); 1385 skb_pull(pfirst, doff);
@@ -1433,7 +1390,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1433 continue; 1390 continue;
1434 } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, 1391 } else if (brcmf_proto_hdrpull(bus->sdiodev->dev,
1435 &ifidx, pfirst) != 0) { 1392 &ifidx, pfirst) != 0) {
1436 brcmf_dbg(ERROR, "rx protocol error\n"); 1393 brcmf_err("rx protocol error\n");
1437 bus->sdiodev->bus_if->dstats.rx_errors++; 1394 bus->sdiodev->bus_if->dstats.rx_errors++;
1438 skb_unlink(pfirst, &bus->glom); 1395 skb_unlink(pfirst, &bus->glom);
1439 brcmu_pkt_buf_free_skb(pfirst); 1396 brcmu_pkt_buf_free_skb(pfirst);
@@ -1449,11 +1406,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
1449 pfirst->prev); 1406 pfirst->prev);
1450 } 1407 }
1451 /* sent any remaining packets up */ 1408 /* sent any remaining packets up */
1452 if (bus->glom.qlen) { 1409 if (bus->glom.qlen)
1453 up(&bus->sdsem);
1454 brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom); 1410 brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
1455 down(&bus->sdsem);
1456 }
1457 1411
1458 bus->sdcnt.rxglomframes++; 1412 bus->sdcnt.rxglomframes++;
1459 bus->sdcnt.rxglompkts += bus->glom.qlen; 1413 bus->sdcnt.rxglompkts += bus->glom.qlen;
@@ -1494,21 +1448,24 @@ static void
1494brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) 1448brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1495{ 1449{
1496 uint rdlen, pad; 1450 uint rdlen, pad;
1497 1451 u8 *buf = NULL, *rbuf;
1498 int sdret; 1452 int sdret;
1499 1453
1500 brcmf_dbg(TRACE, "Enter\n"); 1454 brcmf_dbg(TRACE, "Enter\n");
1501 1455
1502 /* Set rxctl for frame (w/optional alignment) */ 1456 if (bus->rxblen)
1503 bus->rxctl = bus->rxbuf; 1457 buf = vzalloc(bus->rxblen);
1504 bus->rxctl += BRCMF_FIRSTREAD; 1458 if (!buf) {
1505 pad = ((unsigned long)bus->rxctl % BRCMF_SDALIGN); 1459 brcmf_err("no memory for control frame\n");
1460 goto done;
1461 }
1462 rbuf = bus->rxbuf;
1463 pad = ((unsigned long)rbuf % BRCMF_SDALIGN);
1506 if (pad) 1464 if (pad)
1507 bus->rxctl += (BRCMF_SDALIGN - pad); 1465 rbuf += (BRCMF_SDALIGN - pad);
1508 bus->rxctl -= BRCMF_FIRSTREAD;
1509 1466
1510 /* Copy the already-read portion over */ 1467 /* Copy the already-read portion over */
1511 memcpy(bus->rxctl, hdr, BRCMF_FIRSTREAD); 1468 memcpy(buf, hdr, BRCMF_FIRSTREAD);
1512 if (len <= BRCMF_FIRSTREAD) 1469 if (len <= BRCMF_FIRSTREAD)
1513 goto gotpkt; 1470 goto gotpkt;
1514 1471
@@ -1529,7 +1486,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1529 1486
1530 /* Drop if the read is too big or it exceeds our maximum */ 1487 /* Drop if the read is too big or it exceeds our maximum */
1531 if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) { 1488 if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
1532 brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n", 1489 brcmf_err("%d-byte control read exceeds %d-byte buffer\n",
1533 rdlen, bus->sdiodev->bus_if->maxctl); 1490 rdlen, bus->sdiodev->bus_if->maxctl);
1534 bus->sdiodev->bus_if->dstats.rx_errors++; 1491 bus->sdiodev->bus_if->dstats.rx_errors++;
1535 brcmf_sdbrcm_rxfail(bus, false, false); 1492 brcmf_sdbrcm_rxfail(bus, false, false);
@@ -1537,7 +1494,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1537 } 1494 }
1538 1495
1539 if ((len - doff) > bus->sdiodev->bus_if->maxctl) { 1496 if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
1540 brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", 1497 brcmf_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
1541 len, len - doff, bus->sdiodev->bus_if->maxctl); 1498 len, len - doff, bus->sdiodev->bus_if->maxctl);
1542 bus->sdiodev->bus_if->dstats.rx_errors++; 1499 bus->sdiodev->bus_if->dstats.rx_errors++;
1543 bus->sdcnt.rx_toolong++; 1500 bus->sdcnt.rx_toolong++;
@@ -1545,30 +1502,40 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
1545 goto done; 1502 goto done;
1546 } 1503 }
1547 1504
1548 /* Read remainder of frame body into the rxctl buffer */ 1505 /* Read remain of frame body */
1549 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, 1506 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1550 bus->sdiodev->sbwad, 1507 bus->sdiodev->sbwad,
1551 SDIO_FUNC_2, 1508 SDIO_FUNC_2,
1552 F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen); 1509 F2SYNC, rbuf, rdlen);
1553 bus->sdcnt.f2rxdata++; 1510 bus->sdcnt.f2rxdata++;
1554 1511
1555 /* Control frame failures need retransmission */ 1512 /* Control frame failures need retransmission */
1556 if (sdret < 0) { 1513 if (sdret < 0) {
1557 brcmf_dbg(ERROR, "read %d control bytes failed: %d\n", 1514 brcmf_err("read %d control bytes failed: %d\n",
1558 rdlen, sdret); 1515 rdlen, sdret);
1559 bus->sdcnt.rxc_errors++; 1516 bus->sdcnt.rxc_errors++;
1560 brcmf_sdbrcm_rxfail(bus, true, true); 1517 brcmf_sdbrcm_rxfail(bus, true, true);
1561 goto done; 1518 goto done;
1562 } 1519 } else
1520 memcpy(buf + BRCMF_FIRSTREAD, rbuf, rdlen);
1563 1521
1564gotpkt: 1522gotpkt:
1565 1523
1566 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(), 1524 brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_CTL_ON(),
1567 bus->rxctl, len, "RxCtrl:\n"); 1525 buf, len, "RxCtrl:\n");
1568 1526
1569 /* Point to valid data and indicate its length */ 1527 /* Point to valid data and indicate its length */
1570 bus->rxctl += doff; 1528 spin_lock_bh(&bus->rxctl_lock);
1529 if (bus->rxctl) {
1530 brcmf_err("last control frame is being processed.\n");
1531 spin_unlock_bh(&bus->rxctl_lock);
1532 vfree(buf);
1533 goto done;
1534 }
1535 bus->rxctl = buf + doff;
1536 bus->rxctl_orig = buf;
1571 bus->rxlen = len - doff; 1537 bus->rxlen = len - doff;
1538 spin_unlock_bh(&bus->rxctl_lock);
1572 1539
1573done: 1540done:
1574 /* Awake any waiters */ 1541 /* Awake any waiters */
@@ -1623,6 +1590,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1623 1590
1624 rd->len_left = rd->len; 1591 rd->len_left = rd->len;
1625 /* read header first for unknow frame length */ 1592 /* read header first for unknow frame length */
1593 sdio_claim_host(bus->sdiodev->func[1]);
1626 if (!rd->len) { 1594 if (!rd->len) {
1627 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, 1595 sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
1628 bus->sdiodev->sbwad, 1596 bus->sdiodev->sbwad,
@@ -1631,10 +1599,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1631 BRCMF_FIRSTREAD); 1599 BRCMF_FIRSTREAD);
1632 bus->sdcnt.f2rxhdrs++; 1600 bus->sdcnt.f2rxhdrs++;
1633 if (sdret < 0) { 1601 if (sdret < 0) {
1634 brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", 1602 brcmf_err("RXHEADER FAILED: %d\n",
1635 sdret); 1603 sdret);
1636 bus->sdcnt.rx_hdrfail++; 1604 bus->sdcnt.rx_hdrfail++;
1637 brcmf_sdbrcm_rxfail(bus, true, true); 1605 brcmf_sdbrcm_rxfail(bus, true, true);
1606 sdio_release_host(bus->sdiodev->func[1]);
1638 continue; 1607 continue;
1639 } 1608 }
1640 1609
@@ -1642,7 +1611,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1642 bus->rxhdr, SDPCM_HDRLEN, 1611 bus->rxhdr, SDPCM_HDRLEN,
1643 "RxHdr:\n"); 1612 "RxHdr:\n");
1644 1613
1645 if (!brcmf_sdio_hdparser(bus, bus->rxhdr, rd)) { 1614 if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
1615 BRCMF_SDIO_FT_NORMAL)) {
1616 sdio_release_host(bus->sdiodev->func[1]);
1646 if (!bus->rxpending) 1617 if (!bus->rxpending)
1647 break; 1618 break;
1648 else 1619 else
@@ -1658,6 +1629,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1658 rd->len_nxtfrm = 0; 1629 rd->len_nxtfrm = 0;
1659 /* treat all packet as event if we don't know */ 1630 /* treat all packet as event if we don't know */
1660 rd->channel = SDPCM_EVENT_CHANNEL; 1631 rd->channel = SDPCM_EVENT_CHANNEL;
1632 sdio_release_host(bus->sdiodev->func[1]);
1661 continue; 1633 continue;
1662 } 1634 }
1663 rd->len_left = rd->len > BRCMF_FIRSTREAD ? 1635 rd->len_left = rd->len > BRCMF_FIRSTREAD ?
@@ -1671,10 +1643,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1671 BRCMF_SDALIGN); 1643 BRCMF_SDALIGN);
1672 if (!pkt) { 1644 if (!pkt) {
1673 /* Give up on data, request rtx of events */ 1645 /* Give up on data, request rtx of events */
1674 brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed\n"); 1646 brcmf_err("brcmu_pkt_buf_get_skb failed\n");
1675 bus->sdiodev->bus_if->dstats.rx_dropped++; 1647 bus->sdiodev->bus_if->dstats.rx_dropped++;
1676 brcmf_sdbrcm_rxfail(bus, false, 1648 brcmf_sdbrcm_rxfail(bus, false,
1677 RETRYCHAN(rd->channel)); 1649 RETRYCHAN(rd->channel));
1650 sdio_release_host(bus->sdiodev->func[1]);
1678 continue; 1651 continue;
1679 } 1652 }
1680 skb_pull(pkt, head_read); 1653 skb_pull(pkt, head_read);
@@ -1683,14 +1656,17 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1683 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1656 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1684 SDIO_FUNC_2, F2SYNC, pkt); 1657 SDIO_FUNC_2, F2SYNC, pkt);
1685 bus->sdcnt.f2rxdata++; 1658 bus->sdcnt.f2rxdata++;
1659 sdio_release_host(bus->sdiodev->func[1]);
1686 1660
1687 if (sdret < 0) { 1661 if (sdret < 0) {
1688 brcmf_dbg(ERROR, "read %d bytes from channel %d failed: %d\n", 1662 brcmf_err("read %d bytes from channel %d failed: %d\n",
1689 rd->len, rd->channel, sdret); 1663 rd->len, rd->channel, sdret);
1690 brcmu_pkt_buf_free_skb(pkt); 1664 brcmu_pkt_buf_free_skb(pkt);
1691 bus->sdiodev->bus_if->dstats.rx_errors++; 1665 bus->sdiodev->bus_if->dstats.rx_errors++;
1666 sdio_claim_host(bus->sdiodev->func[1]);
1692 brcmf_sdbrcm_rxfail(bus, true, 1667 brcmf_sdbrcm_rxfail(bus, true,
1693 RETRYCHAN(rd->channel)); 1668 RETRYCHAN(rd->channel));
1669 sdio_release_host(bus->sdiodev->func[1]);
1694 continue; 1670 continue;
1695 } 1671 }
1696 1672
@@ -1701,20 +1677,24 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1701 } else { 1677 } else {
1702 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN); 1678 memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
1703 rd_new.seq_num = rd->seq_num; 1679 rd_new.seq_num = rd->seq_num;
1704 if (!brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new)) { 1680 sdio_claim_host(bus->sdiodev->func[1]);
1681 if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
1682 BRCMF_SDIO_FT_NORMAL)) {
1705 rd->len = 0; 1683 rd->len = 0;
1706 brcmu_pkt_buf_free_skb(pkt); 1684 brcmu_pkt_buf_free_skb(pkt);
1707 } 1685 }
1708 bus->sdcnt.rx_readahead_cnt++; 1686 bus->sdcnt.rx_readahead_cnt++;
1709 if (rd->len != roundup(rd_new.len, 16)) { 1687 if (rd->len != roundup(rd_new.len, 16)) {
1710 brcmf_dbg(ERROR, "frame length mismatch:read %d, should be %d\n", 1688 brcmf_err("frame length mismatch:read %d, should be %d\n",
1711 rd->len, 1689 rd->len,
1712 roundup(rd_new.len, 16) >> 4); 1690 roundup(rd_new.len, 16) >> 4);
1713 rd->len = 0; 1691 rd->len = 0;
1714 brcmf_sdbrcm_rxfail(bus, true, true); 1692 brcmf_sdbrcm_rxfail(bus, true, true);
1693 sdio_release_host(bus->sdiodev->func[1]);
1715 brcmu_pkt_buf_free_skb(pkt); 1694 brcmu_pkt_buf_free_skb(pkt);
1716 continue; 1695 continue;
1717 } 1696 }
1697 sdio_release_host(bus->sdiodev->func[1]);
1718 rd->len_nxtfrm = rd_new.len_nxtfrm; 1698 rd->len_nxtfrm = rd_new.len_nxtfrm;
1719 rd->channel = rd_new.channel; 1699 rd->channel = rd_new.channel;
1720 rd->dat_offset = rd_new.dat_offset; 1700 rd->dat_offset = rd_new.dat_offset;
@@ -1726,11 +1706,13 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1726 "RxHdr:\n"); 1706 "RxHdr:\n");
1727 1707
1728 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) { 1708 if (rd_new.channel == SDPCM_CONTROL_CHANNEL) {
1729 brcmf_dbg(ERROR, "readahead on control packet %d?\n", 1709 brcmf_err("readahead on control packet %d?\n",
1730 rd_new.seq_num); 1710 rd_new.seq_num);
1731 /* Force retry w/normal header read */ 1711 /* Force retry w/normal header read */
1732 rd->len = 0; 1712 rd->len = 0;
1713 sdio_claim_host(bus->sdiodev->func[1]);
1733 brcmf_sdbrcm_rxfail(bus, false, true); 1714 brcmf_sdbrcm_rxfail(bus, false, true);
1715 sdio_release_host(bus->sdiodev->func[1]);
1734 brcmu_pkt_buf_free_skb(pkt); 1716 brcmu_pkt_buf_free_skb(pkt);
1735 continue; 1717 continue;
1736 } 1718 }
@@ -1751,9 +1733,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1751 skb_pull(pkt, SDPCM_HDRLEN); 1733 skb_pull(pkt, SDPCM_HDRLEN);
1752 bus->glomd = pkt; 1734 bus->glomd = pkt;
1753 } else { 1735 } else {
1754 brcmf_dbg(ERROR, "%s: glom superframe w/o " 1736 brcmf_err("%s: glom superframe w/o "
1755 "descriptor!\n", __func__); 1737 "descriptor!\n", __func__);
1738 sdio_claim_host(bus->sdiodev->func[1]);
1756 brcmf_sdbrcm_rxfail(bus, false, false); 1739 brcmf_sdbrcm_rxfail(bus, false, false);
1740 sdio_release_host(bus->sdiodev->func[1]);
1757 } 1741 }
1758 /* prepare the descriptor for the next read */ 1742 /* prepare the descriptor for the next read */
1759 rd->len = rd->len_nxtfrm << 4; 1743 rd->len = rd->len_nxtfrm << 4;
@@ -1778,16 +1762,13 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1778 continue; 1762 continue;
1779 } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx, 1763 } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx,
1780 pkt) != 0) { 1764 pkt) != 0) {
1781 brcmf_dbg(ERROR, "rx protocol error\n"); 1765 brcmf_err("rx protocol error\n");
1782 brcmu_pkt_buf_free_skb(pkt); 1766 brcmu_pkt_buf_free_skb(pkt);
1783 bus->sdiodev->bus_if->dstats.rx_errors++; 1767 bus->sdiodev->bus_if->dstats.rx_errors++;
1784 continue; 1768 continue;
1785 } 1769 }
1786 1770
1787 /* Unlock during rx call */
1788 up(&bus->sdsem);
1789 brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt); 1771 brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
1790 down(&bus->sdsem);
1791 } 1772 }
1792 1773
1793 rxcount = maxframes - rxleft; 1774 rxcount = maxframes - rxleft;
@@ -1805,15 +1786,6 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1805} 1786}
1806 1787
1807static void 1788static void
1808brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
1809{
1810 up(&bus->sdsem);
1811 wait_event_interruptible_timeout(bus->ctrl_wait, !*lockvar, HZ * 2);
1812 down(&bus->sdsem);
1813 return;
1814}
1815
1816static void
1817brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus) 1789brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1818{ 1790{
1819 if (waitqueue_active(&bus->ctrl_wait)) 1791 if (waitqueue_active(&bus->ctrl_wait))
@@ -1846,7 +1818,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1846 bus->sdiodev->bus_if->tx_realloc++; 1818 bus->sdiodev->bus_if->tx_realloc++;
1847 new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN); 1819 new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
1848 if (!new) { 1820 if (!new) {
1849 brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n", 1821 brcmf_err("couldn't allocate new %d-byte packet\n",
1850 pkt->len + BRCMF_SDALIGN); 1822 pkt->len + BRCMF_SDALIGN);
1851 ret = -ENOMEM; 1823 ret = -ENOMEM;
1852 goto done; 1824 goto done;
@@ -1914,6 +1886,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1914 if (len & (ALIGNMENT - 1)) 1886 if (len & (ALIGNMENT - 1))
1915 len = roundup(len, ALIGNMENT); 1887 len = roundup(len, ALIGNMENT);
1916 1888
1889 sdio_claim_host(bus->sdiodev->func[1]);
1917 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1890 ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1918 SDIO_FUNC_2, F2SYNC, pkt); 1891 SDIO_FUNC_2, F2SYNC, pkt);
1919 bus->sdcnt.f2txdata++; 1892 bus->sdcnt.f2txdata++;
@@ -1941,15 +1914,14 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1941 } 1914 }
1942 1915
1943 } 1916 }
1917 sdio_release_host(bus->sdiodev->func[1]);
1944 if (ret == 0) 1918 if (ret == 0)
1945 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; 1919 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
1946 1920
1947done: 1921done:
1948 /* restore pkt buffer pointer before calling tx complete routine */ 1922 /* restore pkt buffer pointer before calling tx complete routine */
1949 skb_pull(pkt, SDPCM_HDRLEN + pad); 1923 skb_pull(pkt, SDPCM_HDRLEN + pad);
1950 up(&bus->sdsem);
1951 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0); 1924 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
1952 down(&bus->sdsem);
1953 1925
1954 if (free_pkt) 1926 if (free_pkt)
1955 brcmu_pkt_buf_free_skb(pkt); 1927 brcmu_pkt_buf_free_skb(pkt);
@@ -1990,9 +1962,11 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
1990 /* In poll mode, need to check for other events */ 1962 /* In poll mode, need to check for other events */
1991 if (!bus->intr && cnt) { 1963 if (!bus->intr && cnt) {
1992 /* Check device status, signal pending interrupt */ 1964 /* Check device status, signal pending interrupt */
1965 sdio_claim_host(bus->sdiodev->func[1]);
1993 ret = r_sdreg32(bus, &intstatus, 1966 ret = r_sdreg32(bus, &intstatus,
1994 offsetof(struct sdpcmd_regs, 1967 offsetof(struct sdpcmd_regs,
1995 intstatus)); 1968 intstatus));
1969 sdio_release_host(bus->sdiodev->func[1]);
1996 bus->sdcnt.f2txdata++; 1970 bus->sdcnt.f2txdata++;
1997 if (ret != 0) 1971 if (ret != 0)
1998 break; 1972 break;
@@ -2029,7 +2003,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
2029 bus->watchdog_tsk = NULL; 2003 bus->watchdog_tsk = NULL;
2030 } 2004 }
2031 2005
2032 down(&bus->sdsem); 2006 sdio_claim_host(bus->sdiodev->func[1]);
2033 2007
2034 /* Enable clock for device interrupts */ 2008 /* Enable clock for device interrupts */
2035 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 2009 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -2050,7 +2024,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
2050 (saveclk | SBSDIO_FORCE_HT), &err); 2024 (saveclk | SBSDIO_FORCE_HT), &err);
2051 } 2025 }
2052 if (err) 2026 if (err)
2053 brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err); 2027 brcmf_err("Failed to force clock for F2: err %d\n", err);
2054 2028
2055 /* Turn off the bus (F2), free any pending packets */ 2029 /* Turn off the bus (F2), free any pending packets */
2056 brcmf_dbg(INTR, "disable SDIO interrupts\n"); 2030 brcmf_dbg(INTR, "disable SDIO interrupts\n");
@@ -2063,6 +2037,7 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
2063 2037
2064 /* Turn off the backplane clock (only) */ 2038 /* Turn off the backplane clock (only) */
2065 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false); 2039 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
2040 sdio_release_host(bus->sdiodev->func[1]);
2066 2041
2067 /* Clear the data packet queues */ 2042 /* Clear the data packet queues */
2068 brcmu_pktq_flush(&bus->txq, true, NULL, NULL); 2043 brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
@@ -2073,14 +2048,14 @@ static void brcmf_sdbrcm_bus_stop(struct device *dev)
2073 brcmf_sdbrcm_free_glom(bus); 2048 brcmf_sdbrcm_free_glom(bus);
2074 2049
2075 /* Clear rx control and wake any waiters */ 2050 /* Clear rx control and wake any waiters */
2051 spin_lock_bh(&bus->rxctl_lock);
2076 bus->rxlen = 0; 2052 bus->rxlen = 0;
2053 spin_unlock_bh(&bus->rxctl_lock);
2077 brcmf_sdbrcm_dcmd_resp_wake(bus); 2054 brcmf_sdbrcm_dcmd_resp_wake(bus);
2078 2055
2079 /* Reset some F2 state stuff */ 2056 /* Reset some F2 state stuff */
2080 bus->rxskip = false; 2057 bus->rxskip = false;
2081 bus->tx_seq = bus->rx_seq = 0; 2058 bus->tx_seq = bus->rx_seq = 0;
2082
2083 up(&bus->sdsem);
2084} 2059}
2085 2060
2086#ifdef CONFIG_BRCMFMAC_SDIO_OOB 2061#ifdef CONFIG_BRCMFMAC_SDIO_OOB
@@ -2164,7 +2139,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2164 2139
2165 brcmf_dbg(TRACE, "Enter\n"); 2140 brcmf_dbg(TRACE, "Enter\n");
2166 2141
2167 down(&bus->sdsem); 2142 sdio_claim_host(bus->sdiodev->func[1]);
2168 2143
2169 /* If waiting for HTAVAIL, check status */ 2144 /* If waiting for HTAVAIL, check status */
2170 if (bus->clkstate == CLK_PENDING) { 2145 if (bus->clkstate == CLK_PENDING) {
@@ -2175,7 +2150,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2175 devctl = brcmf_sdio_regrb(bus->sdiodev, 2150 devctl = brcmf_sdio_regrb(bus->sdiodev,
2176 SBSDIO_DEVICE_CTL, &err); 2151 SBSDIO_DEVICE_CTL, &err);
2177 if (err) { 2152 if (err) {
2178 brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err); 2153 brcmf_err("error reading DEVCTL: %d\n", err);
2179 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2154 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2180 } 2155 }
2181#endif /* DEBUG */ 2156#endif /* DEBUG */
@@ -2184,7 +2159,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2184 clkctl = brcmf_sdio_regrb(bus->sdiodev, 2159 clkctl = brcmf_sdio_regrb(bus->sdiodev,
2185 SBSDIO_FUNC1_CHIPCLKCSR, &err); 2160 SBSDIO_FUNC1_CHIPCLKCSR, &err);
2186 if (err) { 2161 if (err) {
2187 brcmf_dbg(ERROR, "error reading CSR: %d\n", 2162 brcmf_err("error reading CSR: %d\n",
2188 err); 2163 err);
2189 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2164 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2190 } 2165 }
@@ -2196,7 +2171,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2196 devctl = brcmf_sdio_regrb(bus->sdiodev, 2171 devctl = brcmf_sdio_regrb(bus->sdiodev,
2197 SBSDIO_DEVICE_CTL, &err); 2172 SBSDIO_DEVICE_CTL, &err);
2198 if (err) { 2173 if (err) {
2199 brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", 2174 brcmf_err("error reading DEVCTL: %d\n",
2200 err); 2175 err);
2201 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2176 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2202 } 2177 }
@@ -2204,7 +2179,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2204 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL, 2179 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_DEVICE_CTL,
2205 devctl, &err); 2180 devctl, &err);
2206 if (err) { 2181 if (err) {
2207 brcmf_dbg(ERROR, "error writing DEVCTL: %d\n", 2182 brcmf_err("error writing DEVCTL: %d\n",
2208 err); 2183 err);
2209 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2184 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2210 } 2185 }
@@ -2218,9 +2193,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2218 /* Pending interrupt indicates new device status */ 2193 /* Pending interrupt indicates new device status */
2219 if (atomic_read(&bus->ipend) > 0) { 2194 if (atomic_read(&bus->ipend) > 0) {
2220 atomic_set(&bus->ipend, 0); 2195 atomic_set(&bus->ipend, 0);
2221 sdio_claim_host(bus->sdiodev->func[1]);
2222 err = brcmf_sdio_intr_rstatus(bus); 2196 err = brcmf_sdio_intr_rstatus(bus);
2223 sdio_release_host(bus->sdiodev->func[1]);
2224 } 2197 }
2225 2198
2226 /* Start with leftover status bits */ 2199 /* Start with leftover status bits */
@@ -2249,19 +2222,21 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2249 intstatus |= brcmf_sdbrcm_hostmail(bus); 2222 intstatus |= brcmf_sdbrcm_hostmail(bus);
2250 } 2223 }
2251 2224
2225 sdio_release_host(bus->sdiodev->func[1]);
2226
2252 /* Generally don't ask for these, can get CRC errors... */ 2227 /* Generally don't ask for these, can get CRC errors... */
2253 if (intstatus & I_WR_OOSYNC) { 2228 if (intstatus & I_WR_OOSYNC) {
2254 brcmf_dbg(ERROR, "Dongle reports WR_OOSYNC\n"); 2229 brcmf_err("Dongle reports WR_OOSYNC\n");
2255 intstatus &= ~I_WR_OOSYNC; 2230 intstatus &= ~I_WR_OOSYNC;
2256 } 2231 }
2257 2232
2258 if (intstatus & I_RD_OOSYNC) { 2233 if (intstatus & I_RD_OOSYNC) {
2259 brcmf_dbg(ERROR, "Dongle reports RD_OOSYNC\n"); 2234 brcmf_err("Dongle reports RD_OOSYNC\n");
2260 intstatus &= ~I_RD_OOSYNC; 2235 intstatus &= ~I_RD_OOSYNC;
2261 } 2236 }
2262 2237
2263 if (intstatus & I_SBINT) { 2238 if (intstatus & I_SBINT) {
2264 brcmf_dbg(ERROR, "Dongle reports SBINT\n"); 2239 brcmf_err("Dongle reports SBINT\n");
2265 intstatus &= ~I_SBINT; 2240 intstatus &= ~I_SBINT;
2266 } 2241 }
2267 2242
@@ -2295,6 +2270,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2295 (bus->clkstate == CLK_AVAIL)) { 2270 (bus->clkstate == CLK_AVAIL)) {
2296 int i; 2271 int i;
2297 2272
2273 sdio_claim_host(bus->sdiodev->func[1]);
2298 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad, 2274 err = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
2299 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf, 2275 SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
2300 (u32) bus->ctrl_frame_len); 2276 (u32) bus->ctrl_frame_len);
@@ -2328,6 +2304,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2328 } else { 2304 } else {
2329 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP; 2305 bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
2330 } 2306 }
2307 sdio_release_host(bus->sdiodev->func[1]);
2331 bus->ctrl_frame_stat = false; 2308 bus->ctrl_frame_stat = false;
2332 brcmf_sdbrcm_wait_event_wakeup(bus); 2309 brcmf_sdbrcm_wait_event_wakeup(bus);
2333 } 2310 }
@@ -2342,7 +2319,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2342 } 2319 }
2343 2320
2344 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) { 2321 if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) || (err != 0)) {
2345 brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation\n"); 2322 brcmf_err("failed backplane access over SDIO, halting operation\n");
2346 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 2323 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
2347 atomic_set(&bus->intstatus, 0); 2324 atomic_set(&bus->intstatus, 0);
2348 } else if (atomic_read(&bus->intstatus) || 2325 } else if (atomic_read(&bus->intstatus) ||
@@ -2357,10 +2334,10 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
2357 if ((bus->clkstate != CLK_PENDING) 2334 if ((bus->clkstate != CLK_PENDING)
2358 && bus->idletime == BRCMF_IDLE_IMMEDIATE) { 2335 && bus->idletime == BRCMF_IDLE_IMMEDIATE) {
2359 bus->activity = false; 2336 bus->activity = false;
2337 sdio_claim_host(bus->sdiodev->func[1]);
2360 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); 2338 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
2339 sdio_release_host(bus->sdiodev->func[1]);
2361 } 2340 }
2362
2363 up(&bus->sdsem);
2364} 2341}
2365 2342
2366static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) 2343static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
@@ -2393,7 +2370,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2393 skb_pull(pkt, SDPCM_HDRLEN); 2370 skb_pull(pkt, SDPCM_HDRLEN);
2394 brcmf_txcomplete(bus->sdiodev->dev, pkt, false); 2371 brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
2395 brcmu_pkt_buf_free_skb(pkt); 2372 brcmu_pkt_buf_free_skb(pkt);
2396 brcmf_dbg(ERROR, "out of bus->txq !!!\n"); 2373 brcmf_err("out of bus->txq !!!\n");
2397 ret = -ENOSR; 2374 ret = -ENOSR;
2398 } else { 2375 } else {
2399 ret = 0; 2376 ret = 0;
@@ -2443,7 +2420,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2443 /* Set the backplane window to include the start address */ 2420 /* Set the backplane window to include the start address */
2444 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address); 2421 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, address);
2445 if (bcmerror) { 2422 if (bcmerror) {
2446 brcmf_dbg(ERROR, "window change failed\n"); 2423 brcmf_err("window change failed\n");
2447 goto xfer_done; 2424 goto xfer_done;
2448 } 2425 }
2449 2426
@@ -2455,7 +2432,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2455 bcmerror = brcmf_sdcard_rwdata(bus->sdiodev, write, 2432 bcmerror = brcmf_sdcard_rwdata(bus->sdiodev, write,
2456 sdaddr, data, dsize); 2433 sdaddr, data, dsize);
2457 if (bcmerror) { 2434 if (bcmerror) {
2458 brcmf_dbg(ERROR, "membytes transfer failed\n"); 2435 brcmf_err("membytes transfer failed\n");
2459 break; 2436 break;
2460 } 2437 }
2461 2438
@@ -2467,7 +2444,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2467 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev, 2444 bcmerror = brcmf_sdcard_set_sbaddr_window(bus->sdiodev,
2468 address); 2445 address);
2469 if (bcmerror) { 2446 if (bcmerror) {
2470 brcmf_dbg(ERROR, "window change failed\n"); 2447 brcmf_err("window change failed\n");
2471 break; 2448 break;
2472 } 2449 }
2473 sdaddr = 0; 2450 sdaddr = 0;
@@ -2478,7 +2455,7 @@ brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
2478xfer_done: 2455xfer_done:
2479 /* Return the window to backplane enumeration space for core access */ 2456 /* Return the window to backplane enumeration space for core access */
2480 if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, bus->sdiodev->sbwad)) 2457 if (brcmf_sdcard_set_sbaddr_window(bus->sdiodev, bus->sdiodev->sbwad))
2481 brcmf_dbg(ERROR, "FAILED to set window back to 0x%x\n", 2458 brcmf_err("FAILED to set window back to 0x%x\n",
2482 bus->sdiodev->sbwad); 2459 bus->sdiodev->sbwad);
2483 2460
2484 sdio_release_host(bus->sdiodev->func[1]); 2461 sdio_release_host(bus->sdiodev->func[1]);
@@ -2651,11 +2628,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2651 2628
2652 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */ 2629 /* precondition: IS_ALIGNED((unsigned long)frame, 2) */
2653 2630
2654 /* Need to lock here to protect txseq and SDIO tx calls */
2655 down(&bus->sdsem);
2656
2657 /* Make sure backplane clock is on */ 2631 /* Make sure backplane clock is on */
2632 sdio_claim_host(bus->sdiodev->func[1]);
2658 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 2633 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
2634 sdio_release_host(bus->sdiodev->func[1]);
2659 2635
2660 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */ 2636 /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
2661 *(__le16 *) frame = cpu_to_le16((u16) msglen); 2637 *(__le16 *) frame = cpu_to_le16((u16) msglen);
@@ -2678,7 +2654,9 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2678 bus->ctrl_frame_buf = frame; 2654 bus->ctrl_frame_buf = frame;
2679 bus->ctrl_frame_len = len; 2655 bus->ctrl_frame_len = len;
2680 2656
2681 brcmf_sdbrcm_wait_for_event(bus, &bus->ctrl_frame_stat); 2657 wait_event_interruptible_timeout(bus->ctrl_wait,
2658 !bus->ctrl_frame_stat,
2659 msecs_to_jiffies(2000));
2682 2660
2683 if (!bus->ctrl_frame_stat) { 2661 if (!bus->ctrl_frame_stat) {
2684 brcmf_dbg(INFO, "ctrl_frame_stat == false\n"); 2662 brcmf_dbg(INFO, "ctrl_frame_stat == false\n");
@@ -2697,7 +2675,9 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2697 frame, min_t(u16, len, 16), "TxHdr:\n"); 2675 frame, min_t(u16, len, 16), "TxHdr:\n");
2698 2676
2699 do { 2677 do {
2678 sdio_claim_host(bus->sdiodev->func[1]);
2700 ret = brcmf_tx_frame(bus, frame, len); 2679 ret = brcmf_tx_frame(bus, frame, len);
2680 sdio_release_host(bus->sdiodev->func[1]);
2701 } while (ret < 0 && retries++ < TXRETRIES); 2681 } while (ret < 0 && retries++ < TXRETRIES);
2702 } 2682 }
2703 2683
@@ -2707,13 +2687,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
2707 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); 2687 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2708 2688
2709 bus->activity = false; 2689 bus->activity = false;
2690 sdio_claim_host(bus->sdiodev->func[1]);
2710 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true); 2691 brcmf_sdbrcm_clkctl(bus, CLK_NONE, true);
2692 sdio_release_host(bus->sdiodev->func[1]);
2711 } else { 2693 } else {
2712 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); 2694 spin_unlock_irqrestore(&bus->dpc_tl_lock, flags);
2713 } 2695 }
2714 2696
2715 up(&bus->sdsem);
2716
2717 if (ret) 2697 if (ret)
2718 bus->sdcnt.tx_ctlerrs++; 2698 bus->sdcnt.tx_ctlerrs++;
2719 else 2699 else
@@ -2743,8 +2723,10 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2743 * Read last word in socram to determine 2723 * Read last word in socram to determine
2744 * address of sdpcm_shared structure 2724 * address of sdpcm_shared structure
2745 */ 2725 */
2726 sdio_claim_host(bus->sdiodev->func[1]);
2746 rv = brcmf_sdbrcm_membytes(bus, false, shaddr, 2727 rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
2747 (u8 *)&addr_le, 4); 2728 (u8 *)&addr_le, 4);
2729 sdio_claim_host(bus->sdiodev->func[1]);
2748 if (rv < 0) 2730 if (rv < 0)
2749 return rv; 2731 return rv;
2750 2732
@@ -2757,14 +2739,16 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2757 * NVRAM length at the end of memory should have been overwritten. 2739 * NVRAM length at the end of memory should have been overwritten.
2758 */ 2740 */
2759 if (!brcmf_sdio_valid_shared_address(addr)) { 2741 if (!brcmf_sdio_valid_shared_address(addr)) {
2760 brcmf_dbg(ERROR, "invalid sdpcm_shared address 0x%08X\n", 2742 brcmf_err("invalid sdpcm_shared address 0x%08X\n",
2761 addr); 2743 addr);
2762 return -EINVAL; 2744 return -EINVAL;
2763 } 2745 }
2764 2746
2765 /* Read hndrte_shared structure */ 2747 /* Read hndrte_shared structure */
2748 sdio_claim_host(bus->sdiodev->func[1]);
2766 rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le, 2749 rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
2767 sizeof(struct sdpcm_shared_le)); 2750 sizeof(struct sdpcm_shared_le));
2751 sdio_release_host(bus->sdiodev->func[1]);
2768 if (rv < 0) 2752 if (rv < 0)
2769 return rv; 2753 return rv;
2770 2754
@@ -2778,8 +2762,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
2778 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr); 2762 sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
2779 2763
2780 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) { 2764 if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
2781 brcmf_dbg(ERROR, 2765 brcmf_err("sdpcm_shared version mismatch: dhd %d dongle %d\n",
2782 "sdpcm_shared version mismatch: dhd %d dongle %d\n",
2783 SDPCM_SHARED_VERSION, 2766 SDPCM_SHARED_VERSION,
2784 sh->flags & SDPCM_SHARED_VERSION_MASK); 2767 sh->flags & SDPCM_SHARED_VERSION_MASK);
2785 return -EPROTO; 2768 return -EPROTO;
@@ -2867,12 +2850,14 @@ static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
2867 if ((sh->flags & SDPCM_SHARED_TRAP) == 0) 2850 if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
2868 return 0; 2851 return 0;
2869 2852
2853 sdio_claim_host(bus->sdiodev->func[1]);
2870 error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr, 2854 error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
2871 sizeof(struct brcmf_trap_info)); 2855 sizeof(struct brcmf_trap_info));
2872 if (error < 0) 2856 if (error < 0)
2873 return error; 2857 return error;
2874 2858
2875 nbytes = brcmf_sdio_dump_console(bus, sh, data, count); 2859 nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
2860 sdio_release_host(bus->sdiodev->func[1]);
2876 if (nbytes < 0) 2861 if (nbytes < 0)
2877 return nbytes; 2862 return nbytes;
2878 2863
@@ -2918,6 +2903,7 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
2918 return 0; 2903 return 0;
2919 } 2904 }
2920 2905
2906 sdio_claim_host(bus->sdiodev->func[1]);
2921 if (sh->assert_file_addr != 0) { 2907 if (sh->assert_file_addr != 0) {
2922 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr, 2908 error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr,
2923 (u8 *)file, 80); 2909 (u8 *)file, 80);
@@ -2930,6 +2916,7 @@ static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
2930 if (error < 0) 2916 if (error < 0)
2931 return error; 2917 return error;
2932 } 2918 }
2919 sdio_release_host(bus->sdiodev->func[1]);
2933 2920
2934 res = scnprintf(buf, sizeof(buf), 2921 res = scnprintf(buf, sizeof(buf),
2935 "dongle assert: %s:%d: assert(%s)\n", 2922 "dongle assert: %s:%d: assert(%s)\n",
@@ -2942,9 +2929,7 @@ static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2942 int error; 2929 int error;
2943 struct sdpcm_shared sh; 2930 struct sdpcm_shared sh;
2944 2931
2945 down(&bus->sdsem);
2946 error = brcmf_sdio_readshared(bus, &sh); 2932 error = brcmf_sdio_readshared(bus, &sh);
2947 up(&bus->sdsem);
2948 2933
2949 if (error < 0) 2934 if (error < 0)
2950 return error; 2935 return error;
@@ -2952,10 +2937,10 @@ static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
2952 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) 2937 if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
2953 brcmf_dbg(INFO, "firmware not built with -assert\n"); 2938 brcmf_dbg(INFO, "firmware not built with -assert\n");
2954 else if (sh.flags & SDPCM_SHARED_ASSERT) 2939 else if (sh.flags & SDPCM_SHARED_ASSERT)
2955 brcmf_dbg(ERROR, "assertion in dongle\n"); 2940 brcmf_err("assertion in dongle\n");
2956 2941
2957 if (sh.flags & SDPCM_SHARED_TRAP) 2942 if (sh.flags & SDPCM_SHARED_TRAP)
2958 brcmf_dbg(ERROR, "firmware trap in dongle\n"); 2943 brcmf_err("firmware trap in dongle\n");
2959 2944
2960 return 0; 2945 return 0;
2961} 2946}
@@ -2971,7 +2956,6 @@ static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
2971 if (pos != 0) 2956 if (pos != 0)
2972 return 0; 2957 return 0;
2973 2958
2974 down(&bus->sdsem);
2975 error = brcmf_sdio_readshared(bus, &sh); 2959 error = brcmf_sdio_readshared(bus, &sh);
2976 if (error < 0) 2960 if (error < 0)
2977 goto done; 2961 goto done;
@@ -2988,7 +2972,6 @@ static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
2988 error += nbytes; 2972 error += nbytes;
2989 *ppos += error; 2973 *ppos += error;
2990done: 2974done:
2991 up(&bus->sdsem);
2992 return error; 2975 return error;
2993} 2976}
2994 2977
@@ -3039,6 +3022,7 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3039 int timeleft; 3022 int timeleft;
3040 uint rxlen = 0; 3023 uint rxlen = 0;
3041 bool pending; 3024 bool pending;
3025 u8 *buf;
3042 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 3026 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
3043 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 3027 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
3044 struct brcmf_sdio *bus = sdiodev->bus; 3028 struct brcmf_sdio *bus = sdiodev->bus;
@@ -3048,17 +3032,21 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
3048 /* Wait until control frame is available */ 3032 /* Wait until control frame is available */
3049 timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending); 3033 timeleft = brcmf_sdbrcm_dcmd_resp_wait(bus, &bus->rxlen, &pending);
3050 3034
3051 down(&bus->sdsem); 3035 spin_lock_bh(&bus->rxctl_lock);
3052 rxlen = bus->rxlen; 3036 rxlen = bus->rxlen;
3053 memcpy(msg, bus->rxctl, min(msglen, rxlen)); 3037 memcpy(msg, bus->rxctl, min(msglen, rxlen));
3038 bus->rxctl = NULL;
3039 buf = bus->rxctl_orig;
3040 bus->rxctl_orig = NULL;
3054 bus->rxlen = 0; 3041 bus->rxlen = 0;
3055 up(&bus->sdsem); 3042 spin_unlock_bh(&bus->rxctl_lock);
3043 vfree(buf);
3056 3044
3057 if (rxlen) { 3045 if (rxlen) {
3058 brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n", 3046 brcmf_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n",
3059 rxlen, msglen); 3047 rxlen, msglen);
3060 } else if (timeleft == 0) { 3048 } else if (timeleft == 0) {
3061 brcmf_dbg(ERROR, "resumed on timeout\n"); 3049 brcmf_err("resumed on timeout\n");
3062 brcmf_sdbrcm_checkdied(bus); 3050 brcmf_sdbrcm_checkdied(bus);
3063 } else if (pending) { 3051 } else if (pending) {
3064 brcmf_dbg(CTL, "cancelled\n"); 3052 brcmf_dbg(CTL, "cancelled\n");
@@ -3109,14 +3097,14 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
3109 bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr, 3097 bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr,
3110 nvram_ularray, bus->varsz); 3098 nvram_ularray, bus->varsz);
3111 if (bcmerror) { 3099 if (bcmerror) {
3112 brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n", 3100 brcmf_err("error %d on reading %d nvram bytes at 0x%08x\n",
3113 bcmerror, bus->varsz, varaddr); 3101 bcmerror, bus->varsz, varaddr);
3114 } 3102 }
3115 /* Compare the org NVRAM with the one read from RAM */ 3103 /* Compare the org NVRAM with the one read from RAM */
3116 if (memcmp(bus->vars, nvram_ularray, bus->varsz)) 3104 if (memcmp(bus->vars, nvram_ularray, bus->varsz))
3117 brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n"); 3105 brcmf_err("Downloaded NVRAM image is corrupted\n");
3118 else 3106 else
3119 brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n"); 3107 brcmf_err("Download/Upload/Compare of NVRAM ok\n");
3120 3108
3121 kfree(nvram_ularray); 3109 kfree(nvram_ularray);
3122#endif /* DEBUG */ 3110#endif /* DEBUG */
@@ -3174,14 +3162,14 @@ static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
3174 } 3162 }
3175 } else { 3163 } else {
3176 if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) { 3164 if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
3177 brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n"); 3165 brcmf_err("SOCRAM core is down after reset?\n");
3178 bcmerror = -EBADE; 3166 bcmerror = -EBADE;
3179 goto fail; 3167 goto fail;
3180 } 3168 }
3181 3169
3182 bcmerror = brcmf_sdbrcm_write_vars(bus); 3170 bcmerror = brcmf_sdbrcm_write_vars(bus);
3183 if (bcmerror) { 3171 if (bcmerror) {
3184 brcmf_dbg(ERROR, "no vars written to RAM\n"); 3172 brcmf_err("no vars written to RAM\n");
3185 bcmerror = 0; 3173 bcmerror = 0;
3186 } 3174 }
3187 3175
@@ -3221,7 +3209,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3221 ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME, 3209 ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
3222 &bus->sdiodev->func[2]->dev); 3210 &bus->sdiodev->func[2]->dev);
3223 if (ret) { 3211 if (ret) {
3224 brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret); 3212 brcmf_err("Fail to request firmware %d\n", ret);
3225 return ret; 3213 return ret;
3226 } 3214 }
3227 bus->fw_ptr = 0; 3215 bus->fw_ptr = 0;
@@ -3240,7 +3228,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
3240 brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) { 3228 brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus))) {
3241 ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len); 3229 ret = brcmf_sdbrcm_membytes(bus, true, offset, memptr, len);
3242 if (ret) { 3230 if (ret) {
3243 brcmf_dbg(ERROR, "error %d on writing %d membytes at 0x%08x\n", 3231 brcmf_err("error %d on writing %d membytes at 0x%08x\n",
3244 ret, MEMBLOCK, offset); 3232 ret, MEMBLOCK, offset);
3245 goto err; 3233 goto err;
3246 } 3234 }
@@ -3340,7 +3328,7 @@ static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
3340 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME, 3328 ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
3341 &bus->sdiodev->func[2]->dev); 3329 &bus->sdiodev->func[2]->dev);
3342 if (ret) { 3330 if (ret) {
3343 brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret); 3331 brcmf_err("Fail to request nvram %d\n", ret);
3344 return ret; 3332 return ret;
3345 } 3333 }
3346 3334
@@ -3357,23 +3345,23 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3357 3345
3358 /* Keep arm in reset */ 3346 /* Keep arm in reset */
3359 if (brcmf_sdbrcm_download_state(bus, true)) { 3347 if (brcmf_sdbrcm_download_state(bus, true)) {
3360 brcmf_dbg(ERROR, "error placing ARM core in reset\n"); 3348 brcmf_err("error placing ARM core in reset\n");
3361 goto err; 3349 goto err;
3362 } 3350 }
3363 3351
3364 /* External image takes precedence if specified */ 3352 /* External image takes precedence if specified */
3365 if (brcmf_sdbrcm_download_code_file(bus)) { 3353 if (brcmf_sdbrcm_download_code_file(bus)) {
3366 brcmf_dbg(ERROR, "dongle image file download failed\n"); 3354 brcmf_err("dongle image file download failed\n");
3367 goto err; 3355 goto err;
3368 } 3356 }
3369 3357
3370 /* External nvram takes precedence if specified */ 3358 /* External nvram takes precedence if specified */
3371 if (brcmf_sdbrcm_download_nvram(bus)) 3359 if (brcmf_sdbrcm_download_nvram(bus))
3372 brcmf_dbg(ERROR, "dongle nvram file download failed\n"); 3360 brcmf_err("dongle nvram file download failed\n");
3373 3361
3374 /* Take arm out of reset */ 3362 /* Take arm out of reset */
3375 if (brcmf_sdbrcm_download_state(bus, false)) { 3363 if (brcmf_sdbrcm_download_state(bus, false)) {
3376 brcmf_dbg(ERROR, "error getting out of ARM core reset\n"); 3364 brcmf_err("error getting out of ARM core reset\n");
3377 goto err; 3365 goto err;
3378 } 3366 }
3379 3367
@@ -3388,13 +3376,16 @@ brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
3388{ 3376{
3389 bool ret; 3377 bool ret;
3390 3378
3391 /* Download the firmware */ 3379 sdio_claim_host(bus->sdiodev->func[1]);
3380
3392 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 3381 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3393 3382
3394 ret = _brcmf_sdbrcm_download_firmware(bus) == 0; 3383 ret = _brcmf_sdbrcm_download_firmware(bus) == 0;
3395 3384
3396 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false); 3385 brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
3397 3386
3387 sdio_release_host(bus->sdiodev->func[1]);
3388
3398 return ret; 3389 return ret;
3399} 3390}
3400 3391
@@ -3423,7 +3414,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3423 bus->sdcnt.tickcnt = 0; 3414 bus->sdcnt.tickcnt = 0;
3424 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); 3415 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3425 3416
3426 down(&bus->sdsem); 3417 sdio_claim_host(bus->sdiodev->func[1]);
3427 3418
3428 /* Make sure backplane clock is on, needed to generate F2 interrupt */ 3419 /* Make sure backplane clock is on, needed to generate F2 interrupt */
3429 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 3420 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -3438,7 +3429,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3438 (saveclk | SBSDIO_FORCE_HT), &err); 3429 (saveclk | SBSDIO_FORCE_HT), &err);
3439 } 3430 }
3440 if (err) { 3431 if (err) {
3441 brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err); 3432 brcmf_err("Failed to force clock for F2: err %d\n", err);
3442 goto exit; 3433 goto exit;
3443 } 3434 }
3444 3435
@@ -3484,7 +3475,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3484 if (ret == 0) { 3475 if (ret == 0) {
3485 ret = brcmf_sdio_intr_register(bus->sdiodev); 3476 ret = brcmf_sdio_intr_register(bus->sdiodev);
3486 if (ret != 0) 3477 if (ret != 0)
3487 brcmf_dbg(ERROR, "intr register failed:%d\n", ret); 3478 brcmf_err("intr register failed:%d\n", ret);
3488 } 3479 }
3489 3480
3490 /* If we didn't come up, turn off backplane clock */ 3481 /* If we didn't come up, turn off backplane clock */
@@ -3492,7 +3483,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
3492 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); 3483 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3493 3484
3494exit: 3485exit:
3495 up(&bus->sdsem); 3486 sdio_release_host(bus->sdiodev->func[1]);
3496 3487
3497 return ret; 3488 return ret;
3498} 3489}
@@ -3504,12 +3495,12 @@ void brcmf_sdbrcm_isr(void *arg)
3504 brcmf_dbg(TRACE, "Enter\n"); 3495 brcmf_dbg(TRACE, "Enter\n");
3505 3496
3506 if (!bus) { 3497 if (!bus) {
3507 brcmf_dbg(ERROR, "bus is null pointer, exiting\n"); 3498 brcmf_err("bus is null pointer, exiting\n");
3508 return; 3499 return;
3509 } 3500 }
3510 3501
3511 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) { 3502 if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
3512 brcmf_dbg(ERROR, "bus is down. we have nothing to do\n"); 3503 brcmf_err("bus is down. we have nothing to do\n");
3513 return; 3504 return;
3514 } 3505 }
3515 /* Count the interrupt call */ 3506 /* Count the interrupt call */
@@ -3518,13 +3509,13 @@ void brcmf_sdbrcm_isr(void *arg)
3518 atomic_set(&bus->ipend, 1); 3509 atomic_set(&bus->ipend, 1);
3519 else 3510 else
3520 if (brcmf_sdio_intr_rstatus(bus)) { 3511 if (brcmf_sdio_intr_rstatus(bus)) {
3521 brcmf_dbg(ERROR, "failed backplane access\n"); 3512 brcmf_err("failed backplane access\n");
3522 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN; 3513 bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
3523 } 3514 }
3524 3515
3525 /* Disable additional interrupts (is this needed now)? */ 3516 /* Disable additional interrupts (is this needed now)? */
3526 if (!bus->intr) 3517 if (!bus->intr)
3527 brcmf_dbg(ERROR, "isr w/o interrupt configured!\n"); 3518 brcmf_err("isr w/o interrupt configured!\n");
3528 3519
3529 brcmf_sdbrcm_adddpctsk(bus); 3520 brcmf_sdbrcm_adddpctsk(bus);
3530 queue_work(bus->brcmf_wq, &bus->datawork); 3521 queue_work(bus->brcmf_wq, &bus->datawork);
@@ -3539,8 +3530,6 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3539 3530
3540 brcmf_dbg(TIMER, "Enter\n"); 3531 brcmf_dbg(TIMER, "Enter\n");
3541 3532
3542 down(&bus->sdsem);
3543
3544 /* Poll period: check device if appropriate. */ 3533 /* Poll period: check device if appropriate. */
3545 if (bus->poll && (++bus->polltick >= bus->pollrate)) { 3534 if (bus->poll && (++bus->polltick >= bus->pollrate)) {
3546 u32 intstatus = 0; 3535 u32 intstatus = 0;
@@ -3557,9 +3546,11 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3557 u8 devpend; 3546 u8 devpend;
3558 spin_unlock_irqrestore(&bus->dpc_tl_lock, 3547 spin_unlock_irqrestore(&bus->dpc_tl_lock,
3559 flags); 3548 flags);
3549 sdio_claim_host(bus->sdiodev->func[1]);
3560 devpend = brcmf_sdio_regrb(bus->sdiodev, 3550 devpend = brcmf_sdio_regrb(bus->sdiodev,
3561 SDIO_CCCR_INTx, 3551 SDIO_CCCR_INTx,
3562 NULL); 3552 NULL);
3553 sdio_release_host(bus->sdiodev->func[1]);
3563 intstatus = 3554 intstatus =
3564 devpend & (INTR_STATUS_FUNC1 | 3555 devpend & (INTR_STATUS_FUNC1 |
3565 INTR_STATUS_FUNC2); 3556 INTR_STATUS_FUNC2);
@@ -3584,16 +3575,18 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3584 } 3575 }
3585#ifdef DEBUG 3576#ifdef DEBUG
3586 /* Poll for console output periodically */ 3577 /* Poll for console output periodically */
3587 if (bus_if->state == BRCMF_BUS_DATA && 3578 if (bus_if && bus_if->state == BRCMF_BUS_DATA &&
3588 bus->console_interval != 0) { 3579 bus->console_interval != 0) {
3589 bus->console.count += BRCMF_WD_POLL_MS; 3580 bus->console.count += BRCMF_WD_POLL_MS;
3590 if (bus->console.count >= bus->console_interval) { 3581 if (bus->console.count >= bus->console_interval) {
3591 bus->console.count -= bus->console_interval; 3582 bus->console.count -= bus->console_interval;
3583 sdio_claim_host(bus->sdiodev->func[1]);
3592 /* Make sure backplane clock is on */ 3584 /* Make sure backplane clock is on */
3593 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 3585 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3594 if (brcmf_sdbrcm_readconsole(bus) < 0) 3586 if (brcmf_sdbrcm_readconsole(bus) < 0)
3595 /* stop on error */ 3587 /* stop on error */
3596 bus->console_interval = 0; 3588 bus->console_interval = 0;
3589 sdio_release_host(bus->sdiodev->func[1]);
3597 } 3590 }
3598 } 3591 }
3599#endif /* DEBUG */ 3592#endif /* DEBUG */
@@ -3606,13 +3599,13 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
3606 bus->activity = false; 3599 bus->activity = false;
3607 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); 3600 brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
3608 } else { 3601 } else {
3602 sdio_claim_host(bus->sdiodev->func[1]);
3609 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); 3603 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3604 sdio_release_host(bus->sdiodev->func[1]);
3610 } 3605 }
3611 } 3606 }
3612 } 3607 }
3613 3608
3614 up(&bus->sdsem);
3615
3616 return (atomic_read(&bus->ipend) > 0); 3609 return (atomic_read(&bus->ipend) > 0);
3617} 3610}
3618 3611
@@ -3707,6 +3700,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3707 3700
3708 bus->alp_only = true; 3701 bus->alp_only = true;
3709 3702
3703 sdio_claim_host(bus->sdiodev->func[1]);
3704
3710 pr_debug("F1 signature read @0x18000000=0x%4x\n", 3705 pr_debug("F1 signature read @0x18000000=0x%4x\n",
3711 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL)); 3706 brcmf_sdio_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
3712 3707
@@ -3722,18 +3717,18 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3722 SBSDIO_FUNC1_CHIPCLKCSR, &err); 3717 SBSDIO_FUNC1_CHIPCLKCSR, &err);
3723 3718
3724 if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) { 3719 if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) {
3725 brcmf_dbg(ERROR, "ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", 3720 brcmf_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n",
3726 err, BRCMF_INIT_CLKCTL1, clkctl); 3721 err, BRCMF_INIT_CLKCTL1, clkctl);
3727 goto fail; 3722 goto fail;
3728 } 3723 }
3729 3724
3730 if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) { 3725 if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
3731 brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n"); 3726 brcmf_err("brcmf_sdio_chip_attach failed!\n");
3732 goto fail; 3727 goto fail;
3733 } 3728 }
3734 3729
3735 if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) { 3730 if (!brcmf_sdbrcm_chipmatch((u16) bus->ci->chip)) {
3736 brcmf_dbg(ERROR, "unsupported chip: 0x%04x\n", bus->ci->chip); 3731 brcmf_err("unsupported chip: 0x%04x\n", bus->ci->chip);
3737 goto fail; 3732 goto fail;
3738 } 3733 }
3739 3734
@@ -3743,7 +3738,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3743 /* Get info on the SOCRAM cores... */ 3738 /* Get info on the SOCRAM cores... */
3744 bus->ramsize = bus->ci->ramsize; 3739 bus->ramsize = bus->ci->ramsize;
3745 if (!(bus->ramsize)) { 3740 if (!(bus->ramsize)) {
3746 brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n"); 3741 brcmf_err("failed to find SOCRAM memory!\n");
3747 goto fail; 3742 goto fail;
3748 } 3743 }
3749 3744
@@ -3754,6 +3749,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3754 reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL); 3749 reg_val = brcmf_sdio_regrl(bus->sdiodev, reg_addr, NULL);
3755 brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL); 3750 brcmf_sdio_regwl(bus->sdiodev, reg_addr, reg_val | CC_BPRESEN, NULL);
3756 3751
3752 sdio_release_host(bus->sdiodev->func[1]);
3753
3757 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN); 3754 brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
3758 3755
3759 /* Locate an appropriately-aligned portion of hdrbuf */ 3756 /* Locate an appropriately-aligned portion of hdrbuf */
@@ -3769,6 +3766,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
3769 return true; 3766 return true;
3770 3767
3771fail: 3768fail:
3769 sdio_release_host(bus->sdiodev->func[1]);
3772 return false; 3770 return false;
3773} 3771}
3774 3772
@@ -3776,6 +3774,8 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3776{ 3774{
3777 brcmf_dbg(TRACE, "Enter\n"); 3775 brcmf_dbg(TRACE, "Enter\n");
3778 3776
3777 sdio_claim_host(bus->sdiodev->func[1]);
3778
3779 /* Disable F2 to clear any intermediate frame state on the dongle */ 3779 /* Disable F2 to clear any intermediate frame state on the dongle */
3780 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx, 3780 brcmf_sdio_regwb(bus->sdiodev, SDIO_CCCR_IOEx,
3781 SDIO_FUNC_ENABLE_1, NULL); 3781 SDIO_FUNC_ENABLE_1, NULL);
@@ -3786,6 +3786,8 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
3786 /* Done with backplane-dependent accesses, can drop clock... */ 3786 /* Done with backplane-dependent accesses, can drop clock... */
3787 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); 3787 brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
3788 3788
3789 sdio_release_host(bus->sdiodev->func[1]);
3790
3789 /* ...and initialize clock/power states */ 3791 /* ...and initialize clock/power states */
3790 bus->clkstate = CLK_SDONLY; 3792 bus->clkstate = CLK_SDONLY;
3791 bus->idletime = BRCMF_IDLE_INTERVAL; 3793 bus->idletime = BRCMF_IDLE_INTERVAL;
@@ -3841,8 +3843,10 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
3841 brcmf_dbg(TRACE, "Enter\n"); 3843 brcmf_dbg(TRACE, "Enter\n");
3842 3844
3843 if (bus->ci) { 3845 if (bus->ci) {
3846 sdio_claim_host(bus->sdiodev->func[1]);
3844 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false); 3847 brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
3845 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false); 3848 brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
3849 sdio_release_host(bus->sdiodev->func[1]);
3846 brcmf_sdio_chip_detach(&bus->ci); 3850 brcmf_sdio_chip_detach(&bus->ci);
3847 if (bus->vars && bus->varsz) 3851 if (bus->vars && bus->varsz)
3848 kfree(bus->vars); 3852 kfree(bus->vars);
@@ -3862,7 +3866,8 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3862 brcmf_sdio_intr_unregister(bus->sdiodev); 3866 brcmf_sdio_intr_unregister(bus->sdiodev);
3863 3867
3864 cancel_work_sync(&bus->datawork); 3868 cancel_work_sync(&bus->datawork);
3865 destroy_workqueue(bus->brcmf_wq); 3869 if (bus->brcmf_wq)
3870 destroy_workqueue(bus->brcmf_wq);
3866 3871
3867 if (bus->sdiodev->bus_if->drvr) { 3872 if (bus->sdiodev->bus_if->drvr) {
3868 brcmf_detach(bus->sdiodev->dev); 3873 brcmf_detach(bus->sdiodev->dev);
@@ -3877,6 +3882,14 @@ static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
3877 brcmf_dbg(TRACE, "Disconnected\n"); 3882 brcmf_dbg(TRACE, "Disconnected\n");
3878} 3883}
3879 3884
3885static struct brcmf_bus_ops brcmf_sdio_bus_ops = {
3886 .stop = brcmf_sdbrcm_bus_stop,
3887 .init = brcmf_sdbrcm_bus_init,
3888 .txdata = brcmf_sdbrcm_bus_txdata,
3889 .txctl = brcmf_sdbrcm_bus_txctl,
3890 .rxctl = brcmf_sdbrcm_bus_rxctl,
3891};
3892
3880void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) 3893void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3881{ 3894{
3882 int ret; 3895 int ret;
@@ -3904,31 +3917,29 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3904 bus->txminmax = BRCMF_TXMINMAX; 3917 bus->txminmax = BRCMF_TXMINMAX;
3905 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1; 3918 bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
3906 3919
3920 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3921 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
3922 if (bus->brcmf_wq == NULL) {
3923 brcmf_err("insufficient memory to create txworkqueue\n");
3924 goto fail;
3925 }
3926
3907 /* attempt to attach to the dongle */ 3927 /* attempt to attach to the dongle */
3908 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) { 3928 if (!(brcmf_sdbrcm_probe_attach(bus, regsva))) {
3909 brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_attach failed\n"); 3929 brcmf_err("brcmf_sdbrcm_probe_attach failed\n");
3910 goto fail; 3930 goto fail;
3911 } 3931 }
3912 3932
3933 spin_lock_init(&bus->rxctl_lock);
3913 spin_lock_init(&bus->txqlock); 3934 spin_lock_init(&bus->txqlock);
3914 init_waitqueue_head(&bus->ctrl_wait); 3935 init_waitqueue_head(&bus->ctrl_wait);
3915 init_waitqueue_head(&bus->dcmd_resp_wait); 3936 init_waitqueue_head(&bus->dcmd_resp_wait);
3916 3937
3917 bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
3918 if (bus->brcmf_wq == NULL) {
3919 brcmf_dbg(ERROR, "insufficient memory to create txworkqueue\n");
3920 goto fail;
3921 }
3922 INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
3923
3924 /* Set up the watchdog timer */ 3938 /* Set up the watchdog timer */
3925 init_timer(&bus->timer); 3939 init_timer(&bus->timer);
3926 bus->timer.data = (unsigned long)bus; 3940 bus->timer.data = (unsigned long)bus;
3927 bus->timer.function = brcmf_sdbrcm_watchdog; 3941 bus->timer.function = brcmf_sdbrcm_watchdog;
3928 3942
3929 /* Initialize thread based operation and lock */
3930 sema_init(&bus->sdsem, 1);
3931
3932 /* Initialize watchdog thread */ 3943 /* Initialize watchdog thread */
3933 init_completion(&bus->watchdog_wait); 3944 init_completion(&bus->watchdog_wait);
3934 bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread, 3945 bus->watchdog_tsk = kthread_run(brcmf_sdbrcm_watchdog_thread,
@@ -3942,26 +3953,24 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3942 spin_lock_init(&bus->dpc_tl_lock); 3953 spin_lock_init(&bus->dpc_tl_lock);
3943 3954
3944 /* Assign bus interface call back */ 3955 /* Assign bus interface call back */
3945 bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop; 3956 bus->sdiodev->bus_if->dev = bus->sdiodev->dev;
3946 bus->sdiodev->bus_if->brcmf_bus_init = brcmf_sdbrcm_bus_init; 3957 bus->sdiodev->bus_if->ops = &brcmf_sdio_bus_ops;
3947 bus->sdiodev->bus_if->brcmf_bus_txdata = brcmf_sdbrcm_bus_txdata; 3958
3948 bus->sdiodev->bus_if->brcmf_bus_txctl = brcmf_sdbrcm_bus_txctl;
3949 bus->sdiodev->bus_if->brcmf_bus_rxctl = brcmf_sdbrcm_bus_rxctl;
3950 /* Attach to the brcmf/OS/network interface */ 3959 /* Attach to the brcmf/OS/network interface */
3951 ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev); 3960 ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
3952 if (ret != 0) { 3961 if (ret != 0) {
3953 brcmf_dbg(ERROR, "brcmf_attach failed\n"); 3962 brcmf_err("brcmf_attach failed\n");
3954 goto fail; 3963 goto fail;
3955 } 3964 }
3956 3965
3957 /* Allocate buffers */ 3966 /* Allocate buffers */
3958 if (!(brcmf_sdbrcm_probe_malloc(bus))) { 3967 if (!(brcmf_sdbrcm_probe_malloc(bus))) {
3959 brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_malloc failed\n"); 3968 brcmf_err("brcmf_sdbrcm_probe_malloc failed\n");
3960 goto fail; 3969 goto fail;
3961 } 3970 }
3962 3971
3963 if (!(brcmf_sdbrcm_probe_init(bus))) { 3972 if (!(brcmf_sdbrcm_probe_init(bus))) {
3964 brcmf_dbg(ERROR, "brcmf_sdbrcm_probe_init failed\n"); 3973 brcmf_err("brcmf_sdbrcm_probe_init failed\n");
3965 goto fail; 3974 goto fail;
3966 } 3975 }
3967 3976
@@ -3991,10 +4000,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3991 /* if firmware path present try to download and bring up bus */ 4000 /* if firmware path present try to download and bring up bus */
3992 ret = brcmf_bus_start(bus->sdiodev->dev); 4001 ret = brcmf_bus_start(bus->sdiodev->dev);
3993 if (ret != 0) { 4002 if (ret != 0) {
3994 if (ret == -ENOLINK) { 4003 brcmf_err("dongle is not responding\n");
3995 brcmf_dbg(ERROR, "dongle is not responding\n"); 4004 goto fail;
3996 goto fail;
3997 }
3998 } 4005 }
3999 4006
4000 return bus; 4007 return bus;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
new file mode 100644
index 000000000000..ba0b22512f12
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -0,0 +1,447 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <linux/netdevice.h>
17
18#include "brcmu_wifi.h"
19#include "brcmu_utils.h"
20
21#include "dhd.h"
22#include "dhd_dbg.h"
23#include "fweh.h"
24#include "fwil.h"
25
26/**
27 * struct brcm_ethhdr - broadcom specific ether header.
28 *
29 * @subtype: subtype for this packet.
30 * @length: TODO: length of appended data.
31 * @version: version indication.
32 * @oui: OUI of this packet.
33 * @usr_subtype: subtype for this OUI.
34 */
35struct brcm_ethhdr {
36 __be16 subtype;
37 __be16 length;
38 u8 version;
39 u8 oui[3];
40 __be16 usr_subtype;
41} __packed;
42
43struct brcmf_event_msg_be {
44 __be16 version;
45 __be16 flags;
46 __be32 event_type;
47 __be32 status;
48 __be32 reason;
49 __be32 auth_type;
50 __be32 datalen;
51 u8 addr[ETH_ALEN];
52 char ifname[IFNAMSIZ];
53 u8 ifidx;
54 u8 bsscfgidx;
55} __packed;
56
57/**
58 * struct brcmf_event - contents of broadcom event packet.
59 *
60 * @eth: standard ether header.
61 * @hdr: broadcom specific ether header.
62 * @msg: common part of the actual event message.
63 */
64struct brcmf_event {
65 struct ethhdr eth;
66 struct brcm_ethhdr hdr;
67 struct brcmf_event_msg_be msg;
68} __packed;
69
70/**
71 * struct brcmf_fweh_queue_item - event item on event queue.
72 *
73 * @q: list element for queuing.
74 * @code: event code.
75 * @ifidx: interface index related to this event.
76 * @ifaddr: ethernet address for interface.
77 * @emsg: common parameters of the firmware event message.
78 * @data: event specific data part of the firmware event.
79 */
80struct brcmf_fweh_queue_item {
81 struct list_head q;
82 enum brcmf_fweh_event_code code;
83 u8 ifidx;
84 u8 ifaddr[ETH_ALEN];
85 struct brcmf_event_msg_be emsg;
86 u8 data[0];
87};
88
89/**
90 * struct brcmf_fweh_event_name - code, name mapping entry.
91 */
92struct brcmf_fweh_event_name {
93 enum brcmf_fweh_event_code code;
94 const char *name;
95};
96
97#ifdef DEBUG
98#define BRCMF_ENUM_DEF(id, val) \
99 { val, #id },
100
101/* array for mapping code to event name */
102static struct brcmf_fweh_event_name fweh_event_names[] = {
103 BRCMF_FWEH_EVENT_ENUM_DEFLIST
104};
105#undef BRCMF_ENUM_DEF
106
107/**
108 * brcmf_fweh_event_name() - returns name for given event code.
109 *
110 * @code: code to lookup.
111 */
112static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
113{
114 int i;
115 for (i = 0; i < ARRAY_SIZE(fweh_event_names); i++) {
116 if (fweh_event_names[i].code == code)
117 return fweh_event_names[i].name;
118 }
119 return "unknown";
120}
121#else
122static const char *brcmf_fweh_event_name(enum brcmf_fweh_event_code code)
123{
124 return "nodebug";
125}
126#endif
127
128/**
129 * brcmf_fweh_queue_event() - create and queue event.
130 *
131 * @fweh: firmware event handling info.
132 * @event: event queue entry.
133 */
134static void brcmf_fweh_queue_event(struct brcmf_fweh_info *fweh,
135 struct brcmf_fweh_queue_item *event)
136{
137 ulong flags;
138
139 spin_lock_irqsave(&fweh->evt_q_lock, flags);
140 list_add_tail(&event->q, &fweh->event_q);
141 spin_unlock_irqrestore(&fweh->evt_q_lock, flags);
142 schedule_work(&fweh->event_work);
143}
144
145static int brcmf_fweh_call_event_handler(struct brcmf_if *ifp,
146 enum brcmf_fweh_event_code code,
147 struct brcmf_event_msg *emsg,
148 void *data)
149{
150 struct brcmf_fweh_info *fweh;
151 int err = -EINVAL;
152
153 if (ifp) {
154 fweh = &ifp->drvr->fweh;
155
156 /* handle the event if valid interface and handler */
157 if (ifp->ndev && fweh->evt_handler[code])
158 err = fweh->evt_handler[code](ifp, emsg, data);
159 else
160 brcmf_err("unhandled event %d ignored\n", code);
161 } else {
162 brcmf_err("no interface object\n");
163 }
164 return err;
165}
166
167/**
168 * brcmf_fweh_handle_if_event() - handle IF event.
169 *
170 * @drvr: driver information object.
171 * @item: queue entry.
172 * @ifpp: interface object (may change upon ADD action).
173 */
174static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
175 struct brcmf_event_msg *emsg,
176 void *data)
177{
178 struct brcmf_if_event *ifevent = data;
179 struct brcmf_if *ifp;
180 int err = 0;
181
182 brcmf_dbg(EVENT, "action: %u idx: %u bsscfg: %u flags: %u\n",
183 ifevent->action, ifevent->ifidx,
184 ifevent->bssidx, ifevent->flags);
185
186 if (ifevent->ifidx >= BRCMF_MAX_IFS) {
187 brcmf_err("invalid interface index: %u\n",
188 ifevent->ifidx);
189 return;
190 }
191
192 ifp = drvr->iflist[ifevent->ifidx];
193
194 if (ifevent->action == BRCMF_E_IF_ADD) {
195 brcmf_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname,
196 emsg->addr);
197 ifp = brcmf_add_if(drvr, ifevent->ifidx, ifevent->bssidx,
198 emsg->ifname, emsg->addr);
199 if (IS_ERR(ifp))
200 return;
201
202 if (!drvr->fweh.evt_handler[BRCMF_E_IF])
203 err = brcmf_net_attach(ifp);
204 }
205
206 err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
207
208 if (ifevent->action == BRCMF_E_IF_DEL)
209 brcmf_del_if(drvr, ifevent->ifidx);
210}
211
212/**
213 * brcmf_fweh_dequeue_event() - get event from the queue.
214 *
215 * @fweh: firmware event handling info.
216 */
217static struct brcmf_fweh_queue_item *
218brcmf_fweh_dequeue_event(struct brcmf_fweh_info *fweh)
219{
220 struct brcmf_fweh_queue_item *event = NULL;
221 ulong flags;
222
223 spin_lock_irqsave(&fweh->evt_q_lock, flags);
224 if (!list_empty(&fweh->event_q)) {
225 event = list_first_entry(&fweh->event_q,
226 struct brcmf_fweh_queue_item, q);
227 list_del(&event->q);
228 }
229 spin_unlock_irqrestore(&fweh->evt_q_lock, flags);
230
231 return event;
232}
233
234/**
235 * brcmf_fweh_event_worker() - firmware event worker.
236 *
237 * @work: worker object.
238 */
239static void brcmf_fweh_event_worker(struct work_struct *work)
240{
241 struct brcmf_pub *drvr;
242 struct brcmf_if *ifp;
243 struct brcmf_fweh_info *fweh;
244 struct brcmf_fweh_queue_item *event;
245 int err = 0;
246 struct brcmf_event_msg_be *emsg_be;
247 struct brcmf_event_msg emsg;
248
249 fweh = container_of(work, struct brcmf_fweh_info, event_work);
250 drvr = container_of(fweh, struct brcmf_pub, fweh);
251
252 while ((event = brcmf_fweh_dequeue_event(fweh))) {
253 ifp = drvr->iflist[event->ifidx];
254
255 brcmf_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n",
256 brcmf_fweh_event_name(event->code), event->code,
257 event->emsg.ifidx, event->emsg.bsscfgidx,
258 event->emsg.addr);
259
260 /* convert event message */
261 emsg_be = &event->emsg;
262 emsg.version = be16_to_cpu(emsg_be->version);
263 emsg.flags = be16_to_cpu(emsg_be->flags);
264 emsg.event_code = event->code;
265 emsg.status = be32_to_cpu(emsg_be->status);
266 emsg.reason = be32_to_cpu(emsg_be->reason);
267 emsg.auth_type = be32_to_cpu(emsg_be->auth_type);
268 emsg.datalen = be32_to_cpu(emsg_be->datalen);
269 memcpy(emsg.addr, emsg_be->addr, ETH_ALEN);
270 memcpy(emsg.ifname, emsg_be->ifname, sizeof(emsg.ifname));
271 emsg.ifidx = emsg_be->ifidx;
272 emsg.bsscfgidx = emsg_be->bsscfgidx;
273
274 brcmf_dbg(EVENT, " version %u flags %u status %u reason %u\n",
275 emsg.version, emsg.flags, emsg.status, emsg.reason);
276 brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data,
277 min_t(u32, emsg.datalen, 64),
278 "event payload, len=%d\n", emsg.datalen);
279
280 /* special handling of interface event */
281 if (event->code == BRCMF_E_IF) {
282 brcmf_fweh_handle_if_event(drvr, &emsg, event->data);
283 goto event_free;
284 }
285
286 err = brcmf_fweh_call_event_handler(ifp, event->code, &emsg,
287 event->data);
288 if (err) {
289 brcmf_err("event handler failed (%d)\n",
290 event->code);
291 err = 0;
292 }
293event_free:
294 kfree(event);
295 }
296}
297
298/**
299 * brcmf_fweh_attach() - initialize firmware event handling.
300 *
301 * @drvr: driver information object.
302 */
303void brcmf_fweh_attach(struct brcmf_pub *drvr)
304{
305 struct brcmf_fweh_info *fweh = &drvr->fweh;
306 INIT_WORK(&fweh->event_work, brcmf_fweh_event_worker);
307 spin_lock_init(&fweh->evt_q_lock);
308 INIT_LIST_HEAD(&fweh->event_q);
309}
310
311/**
312 * brcmf_fweh_detach() - cleanup firmware event handling.
313 *
314 * @drvr: driver information object.
315 */
316void brcmf_fweh_detach(struct brcmf_pub *drvr)
317{
318 struct brcmf_fweh_info *fweh = &drvr->fweh;
319 struct brcmf_if *ifp = drvr->iflist[0];
320 s8 eventmask[BRCMF_EVENTING_MASK_LEN];
321
322 if (ifp) {
323 /* clear all events */
324 memset(eventmask, 0, BRCMF_EVENTING_MASK_LEN);
325 (void)brcmf_fil_iovar_data_set(ifp, "event_msgs",
326 eventmask,
327 BRCMF_EVENTING_MASK_LEN);
328 }
329 /* cancel the worker */
330 cancel_work_sync(&fweh->event_work);
331 WARN_ON(!list_empty(&fweh->event_q));
332 memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
333}
334
335/**
336 * brcmf_fweh_register() - register handler for given event code.
337 *
338 * @drvr: driver information object.
339 * @code: event code.
340 * @handler: handler for the given event code.
341 */
342int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
343 brcmf_fweh_handler_t handler)
344{
345 if (drvr->fweh.evt_handler[code]) {
346 brcmf_err("event code %d already registered\n", code);
347 return -ENOSPC;
348 }
349 drvr->fweh.evt_handler[code] = handler;
350 brcmf_dbg(TRACE, "event handler registered for %s\n",
351 brcmf_fweh_event_name(code));
352 return 0;
353}
354
355/**
356 * brcmf_fweh_unregister() - remove handler for given code.
357 *
358 * @drvr: driver information object.
359 * @code: event code.
360 */
361void brcmf_fweh_unregister(struct brcmf_pub *drvr,
362 enum brcmf_fweh_event_code code)
363{
364 brcmf_dbg(TRACE, "event handler cleared for %s\n",
365 brcmf_fweh_event_name(code));
366 drvr->fweh.evt_handler[code] = NULL;
367}
368
369/**
370 * brcmf_fweh_activate_events() - enables firmware events registered.
371 *
372 * @ifp: primary interface object.
373 */
374int brcmf_fweh_activate_events(struct brcmf_if *ifp)
375{
376 int i, err;
377 s8 eventmask[BRCMF_EVENTING_MASK_LEN];
378
379 for (i = 0; i < BRCMF_E_LAST; i++) {
380 if (ifp->drvr->fweh.evt_handler[i]) {
381 brcmf_dbg(EVENT, "enable event %s\n",
382 brcmf_fweh_event_name(i));
383 setbit(eventmask, i);
384 }
385 }
386
387 /* want to handle IF event as well */
388 brcmf_dbg(EVENT, "enable event IF\n");
389 setbit(eventmask, BRCMF_E_IF);
390
391 err = brcmf_fil_iovar_data_set(ifp, "event_msgs",
392 eventmask, BRCMF_EVENTING_MASK_LEN);
393 if (err)
394 brcmf_err("Set event_msgs error (%d)\n", err);
395
396 return err;
397}
398
399/**
400 * brcmf_fweh_process_event() - process skb as firmware event.
401 *
402 * @drvr: driver information object.
403 * @event_packet: event packet to process.
404 * @ifidx: index of the firmware interface (may change).
405 *
406 * If the packet buffer contains a firmware event message it will
407 * dispatch the event to a registered handler (using worker).
408 */
409void brcmf_fweh_process_event(struct brcmf_pub *drvr,
410 struct brcmf_event *event_packet, u8 *ifidx)
411{
412 enum brcmf_fweh_event_code code;
413 struct brcmf_fweh_info *fweh = &drvr->fweh;
414 struct brcmf_fweh_queue_item *event;
415 gfp_t alloc_flag = GFP_KERNEL;
416 void *data;
417 u32 datalen;
418
419 /* get event info */
420 code = get_unaligned_be32(&event_packet->msg.event_type);
421 datalen = get_unaligned_be32(&event_packet->msg.datalen);
422 *ifidx = event_packet->msg.ifidx;
423 data = &event_packet[1];
424
425 if (code >= BRCMF_E_LAST)
426 return;
427
428 if (code != BRCMF_E_IF && !fweh->evt_handler[code])
429 return;
430
431 if (in_interrupt())
432 alloc_flag = GFP_ATOMIC;
433
434 event = kzalloc(sizeof(*event) + datalen, alloc_flag);
435 if (!event)
436 return;
437
438 event->code = code;
439 event->ifidx = *ifidx;
440
441 /* use memcpy to get aligned event message */
442 memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg));
443 memcpy(event->data, data, datalen);
444 memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN);
445
446 brcmf_fweh_queue_event(fweh, event);
447}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
new file mode 100644
index 000000000000..36901f76a3b5
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -0,0 +1,215 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17
18#ifndef FWEH_H_
19#define FWEH_H_
20
21#include <asm/unaligned.h>
22#include <linux/skbuff.h>
23#include <linux/if_ether.h>
24#include <linux/if.h>
25
26/* formward declarations */
27struct brcmf_pub;
28struct brcmf_if;
29struct brcmf_cfg80211_info;
30struct brcmf_event;
31
32/* list of firmware events */
33#define BRCMF_FWEH_EVENT_ENUM_DEFLIST \
34 BRCMF_ENUM_DEF(SET_SSID, 0) \
35 BRCMF_ENUM_DEF(JOIN, 1) \
36 BRCMF_ENUM_DEF(START, 2) \
37 BRCMF_ENUM_DEF(AUTH, 3) \
38 BRCMF_ENUM_DEF(AUTH_IND, 4) \
39 BRCMF_ENUM_DEF(DEAUTH, 5) \
40 BRCMF_ENUM_DEF(DEAUTH_IND, 6) \
41 BRCMF_ENUM_DEF(ASSOC, 7) \
42 BRCMF_ENUM_DEF(ASSOC_IND, 8) \
43 BRCMF_ENUM_DEF(REASSOC, 9) \
44 BRCMF_ENUM_DEF(REASSOC_IND, 10) \
45 BRCMF_ENUM_DEF(DISASSOC, 11) \
46 BRCMF_ENUM_DEF(DISASSOC_IND, 12) \
47 BRCMF_ENUM_DEF(QUIET_START, 13) \
48 BRCMF_ENUM_DEF(QUIET_END, 14) \
49 BRCMF_ENUM_DEF(BEACON_RX, 15) \
50 BRCMF_ENUM_DEF(LINK, 16) \
51 BRCMF_ENUM_DEF(MIC_ERROR, 17) \
52 BRCMF_ENUM_DEF(NDIS_LINK, 18) \
53 BRCMF_ENUM_DEF(ROAM, 19) \
54 BRCMF_ENUM_DEF(TXFAIL, 20) \
55 BRCMF_ENUM_DEF(PMKID_CACHE, 21) \
56 BRCMF_ENUM_DEF(RETROGRADE_TSF, 22) \
57 BRCMF_ENUM_DEF(PRUNE, 23) \
58 BRCMF_ENUM_DEF(AUTOAUTH, 24) \
59 BRCMF_ENUM_DEF(EAPOL_MSG, 25) \
60 BRCMF_ENUM_DEF(SCAN_COMPLETE, 26) \
61 BRCMF_ENUM_DEF(ADDTS_IND, 27) \
62 BRCMF_ENUM_DEF(DELTS_IND, 28) \
63 BRCMF_ENUM_DEF(BCNSENT_IND, 29) \
64 BRCMF_ENUM_DEF(BCNRX_MSG, 30) \
65 BRCMF_ENUM_DEF(BCNLOST_MSG, 31) \
66 BRCMF_ENUM_DEF(ROAM_PREP, 32) \
67 BRCMF_ENUM_DEF(PFN_NET_FOUND, 33) \
68 BRCMF_ENUM_DEF(PFN_NET_LOST, 34) \
69 BRCMF_ENUM_DEF(RESET_COMPLETE, 35) \
70 BRCMF_ENUM_DEF(JOIN_START, 36) \
71 BRCMF_ENUM_DEF(ROAM_START, 37) \
72 BRCMF_ENUM_DEF(ASSOC_START, 38) \
73 BRCMF_ENUM_DEF(IBSS_ASSOC, 39) \
74 BRCMF_ENUM_DEF(RADIO, 40) \
75 BRCMF_ENUM_DEF(PSM_WATCHDOG, 41) \
76 BRCMF_ENUM_DEF(PROBREQ_MSG, 44) \
77 BRCMF_ENUM_DEF(SCAN_CONFIRM_IND, 45) \
78 BRCMF_ENUM_DEF(PSK_SUP, 46) \
79 BRCMF_ENUM_DEF(COUNTRY_CODE_CHANGED, 47) \
80 BRCMF_ENUM_DEF(EXCEEDED_MEDIUM_TIME, 48) \
81 BRCMF_ENUM_DEF(ICV_ERROR, 49) \
82 BRCMF_ENUM_DEF(UNICAST_DECODE_ERROR, 50) \
83 BRCMF_ENUM_DEF(MULTICAST_DECODE_ERROR, 51) \
84 BRCMF_ENUM_DEF(TRACE, 52) \
85 BRCMF_ENUM_DEF(IF, 54) \
86 BRCMF_ENUM_DEF(RSSI, 56) \
87 BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
88 BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
89 BRCMF_ENUM_DEF(ACTION_FRAME, 59) \
90 BRCMF_ENUM_DEF(ACTION_FRAME_COMPLETE, 60) \
91 BRCMF_ENUM_DEF(PRE_ASSOC_IND, 61) \
92 BRCMF_ENUM_DEF(PRE_REASSOC_IND, 62) \
93 BRCMF_ENUM_DEF(CHANNEL_ADOPTED, 63) \
94 BRCMF_ENUM_DEF(AP_STARTED, 64) \
95 BRCMF_ENUM_DEF(DFS_AP_STOP, 65) \
96 BRCMF_ENUM_DEF(DFS_AP_RESUME, 66) \
97 BRCMF_ENUM_DEF(ESCAN_RESULT, 69) \
98 BRCMF_ENUM_DEF(ACTION_FRAME_OFF_CHAN_COMPLETE, 70) \
99 BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
100 BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74)
101
102#define BRCMF_ENUM_DEF(id, val) \
103 BRCMF_E_##id = (val),
104
105/* firmware event codes sent by the dongle */
106enum brcmf_fweh_event_code {
107 BRCMF_FWEH_EVENT_ENUM_DEFLIST
108 BRCMF_E_LAST
109};
110#undef BRCMF_ENUM_DEF
111
112/* flags field values in struct brcmf_event_msg */
113#define BRCMF_EVENT_MSG_LINK 0x01
114#define BRCMF_EVENT_MSG_FLUSHTXQ 0x02
115#define BRCMF_EVENT_MSG_GROUP 0x04
116
117/**
118 * definitions for event packet validation.
119 */
120#define BRCMF_EVENT_OUI_OFFSET 19
121#define BRCM_OUI "\x00\x10\x18"
122#define DOT11_OUI_LEN 3
123#define BCMILCP_BCM_SUBTYPE_EVENT 1
124
125
126/**
127 * struct brcmf_event_msg - firmware event message.
128 *
129 * @version: version information.
130 * @flags: event flags.
131 * @event_code: firmware event code.
132 * @status: status information.
133 * @reason: reason code.
134 * @auth_type: authentication type.
135 * @datalen: lenght of event data buffer.
136 * @addr: ether address.
137 * @ifname: interface name.
138 * @ifidx: interface index.
139 * @bsscfgidx: bsscfg index.
140 */
141struct brcmf_event_msg {
142 u16 version;
143 u16 flags;
144 u32 event_code;
145 u32 status;
146 u32 reason;
147 s32 auth_type;
148 u32 datalen;
149 u8 addr[ETH_ALEN];
150 char ifname[IFNAMSIZ];
151 u8 ifidx;
152 u8 bsscfgidx;
153};
154
155typedef int (*brcmf_fweh_handler_t)(struct brcmf_if *ifp,
156 const struct brcmf_event_msg *evtmsg,
157 void *data);
158
159/**
160 * struct brcmf_fweh_info - firmware event handling information.
161 *
162 * @event_work: event worker.
163 * @evt_q_lock: lock for event queue protection.
164 * @event_q: event queue.
165 * @evt_handler: registered event handlers.
166 */
167struct brcmf_fweh_info {
168 struct work_struct event_work;
169 spinlock_t evt_q_lock;
170 struct list_head event_q;
171 int (*evt_handler[BRCMF_E_LAST])(struct brcmf_if *ifp,
172 const struct brcmf_event_msg *evtmsg,
173 void *data);
174};
175
176void brcmf_fweh_attach(struct brcmf_pub *drvr);
177void brcmf_fweh_detach(struct brcmf_pub *drvr);
178int brcmf_fweh_register(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code,
179 int (*handler)(struct brcmf_if *ifp,
180 const struct brcmf_event_msg *evtmsg,
181 void *data));
182void brcmf_fweh_unregister(struct brcmf_pub *drvr,
183 enum brcmf_fweh_event_code code);
184int brcmf_fweh_activate_events(struct brcmf_if *ifp);
185void brcmf_fweh_process_event(struct brcmf_pub *drvr,
186 struct brcmf_event *event_packet, u8 *ifidx);
187
188static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
189 struct sk_buff *skb, u8 *ifidx)
190{
191 struct brcmf_event *event_packet;
192 u8 *data;
193 u16 usr_stype;
194
195 /* only process events when protocol matches */
196 if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
197 return;
198
199 /* check for BRCM oui match */
200 event_packet = (struct brcmf_event *)skb_mac_header(skb);
201 data = (u8 *)event_packet;
202 data += BRCMF_EVENT_OUI_OFFSET;
203 if (memcmp(BRCM_OUI, data, DOT11_OUI_LEN))
204 return;
205
206 /* final match on usr_subtype */
207 data += DOT11_OUI_LEN;
208 usr_stype = get_unaligned_be16(data);
209 if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT)
210 return;
211
212 brcmf_fweh_process_event(drvr, event_packet, ifidx);
213}
214
215#endif /* FWEH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
new file mode 100644
index 000000000000..d8d8b6549dc5
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -0,0 +1,344 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/* FWIL is the Firmware Interface Layer. In this module the support functions
18 * are located to set and get variables to and from the firmware.
19 */
20
21#include <linux/kernel.h>
22#include <linux/netdevice.h>
23#include <brcmu_utils.h>
24#include <brcmu_wifi.h>
25#include "dhd.h"
26#include "dhd_bus.h"
27#include "dhd_dbg.h"
28#include "fwil.h"
29
30
31#define MAX_HEX_DUMP_LEN 64
32
33
34static s32
35brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
36{
37 struct brcmf_pub *drvr = ifp->drvr;
38 s32 err;
39
40 if (drvr->bus_if->state != BRCMF_BUS_DATA) {
41 brcmf_err("bus is down. we have nothing to do.\n");
42 return -EIO;
43 }
44
45 if (data != NULL)
46 len = min_t(uint, len, BRCMF_DCMD_MAXLEN);
47 if (set)
48 err = brcmf_proto_cdc_set_dcmd(drvr, ifp->idx, cmd, data, len);
49 else
50 err = brcmf_proto_cdc_query_dcmd(drvr, ifp->idx, cmd, data,
51 len);
52
53 if (err >= 0)
54 err = 0;
55 else
56 brcmf_err("Failed err=%d\n", err);
57
58 return err;
59}
60
61s32
62brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
63{
64 s32 err;
65
66 mutex_lock(&ifp->drvr->proto_block);
67
68 brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
69 brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
70 min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
71
72 err = brcmf_fil_cmd_data(ifp, cmd, data, len, true);
73 mutex_unlock(&ifp->drvr->proto_block);
74
75 return err;
76}
77
78s32
79brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len)
80{
81 s32 err;
82
83 mutex_lock(&ifp->drvr->proto_block);
84 err = brcmf_fil_cmd_data(ifp, cmd, data, len, false);
85
86 brcmf_dbg(FIL, "cmd=%d, len=%d\n", cmd, len);
87 brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
88 min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
89
90 mutex_unlock(&ifp->drvr->proto_block);
91
92 return err;
93}
94
95
96s32
97brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data)
98{
99 s32 err;
100 __le32 data_le = cpu_to_le32(data);
101
102 mutex_lock(&ifp->drvr->proto_block);
103 err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true);
104 mutex_unlock(&ifp->drvr->proto_block);
105
106 return err;
107}
108
109s32
110brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
111{
112 s32 err;
113 __le32 data_le = cpu_to_le32(*data);
114
115 mutex_lock(&ifp->drvr->proto_block);
116 err = brcmf_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false);
117 mutex_unlock(&ifp->drvr->proto_block);
118 *data = le32_to_cpu(data_le);
119
120 return err;
121}
122
123static u32
124brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
125{
126 u32 len;
127
128 len = strlen(name) + 1;
129
130 if ((len + datalen) > buflen)
131 return 0;
132
133 memcpy(buf, name, len);
134
135 /* append data onto the end of the name string */
136 if (data && datalen)
137 memcpy(&buf[len], data, datalen);
138
139 return len + datalen;
140}
141
142
143s32
144brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
145 u32 len)
146{
147 struct brcmf_pub *drvr = ifp->drvr;
148 s32 err;
149 u32 buflen;
150
151 mutex_lock(&drvr->proto_block);
152
153 brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
154 brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
155 min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
156
157 buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
158 sizeof(drvr->proto_buf));
159 if (buflen) {
160 err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
161 buflen, true);
162 } else {
163 err = -EPERM;
164 brcmf_err("Creating iovar failed\n");
165 }
166
167 mutex_unlock(&drvr->proto_block);
168 return err;
169}
170
171s32
172brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
173 u32 len)
174{
175 struct brcmf_pub *drvr = ifp->drvr;
176 s32 err;
177 u32 buflen;
178
179 mutex_lock(&drvr->proto_block);
180
181 buflen = brcmf_create_iovar(name, data, len, drvr->proto_buf,
182 sizeof(drvr->proto_buf));
183 if (buflen) {
184 err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
185 buflen, false);
186 if (err == 0)
187 memcpy(data, drvr->proto_buf, len);
188 } else {
189 err = -EPERM;
190 brcmf_err("Creating iovar failed\n");
191 }
192
193 brcmf_dbg(FIL, "name=%s, len=%d\n", name, len);
194 brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
195 min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
196
197 mutex_unlock(&drvr->proto_block);
198 return err;
199}
200
201s32
202brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
203{
204 __le32 data_le = cpu_to_le32(data);
205
206 return brcmf_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le));
207}
208
209s32
210brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
211{
212 __le32 data_le = cpu_to_le32(*data);
213 s32 err;
214
215 err = brcmf_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le));
216 if (err == 0)
217 *data = le32_to_cpu(data_le);
218 return err;
219}
220
221static u32
222brcmf_create_bsscfg(s32 bssidx, char *name, char *data, u32 datalen, char *buf,
223 u32 buflen)
224{
225 const s8 *prefix = "bsscfg:";
226 s8 *p;
227 u32 prefixlen;
228 u32 namelen;
229 u32 iolen;
230 __le32 bssidx_le;
231
232 if (bssidx == 0)
233 return brcmf_create_iovar(name, data, datalen, buf, buflen);
234
235 prefixlen = strlen(prefix);
236 namelen = strlen(name) + 1; /* lengh of iovar name + null */
237 iolen = prefixlen + namelen + sizeof(bssidx_le) + datalen;
238
239 if (buflen < iolen) {
240 brcmf_err("buffer is too short\n");
241 return 0;
242 }
243
244 p = buf;
245
246 /* copy prefix, no null */
247 memcpy(p, prefix, prefixlen);
248 p += prefixlen;
249
250 /* copy iovar name including null */
251 memcpy(p, name, namelen);
252 p += namelen;
253
254 /* bss config index as first data */
255 bssidx_le = cpu_to_le32(bssidx);
256 memcpy(p, &bssidx_le, sizeof(bssidx_le));
257 p += sizeof(bssidx_le);
258
259 /* parameter buffer follows */
260 if (datalen)
261 memcpy(p, data, datalen);
262
263 return iolen;
264}
265
266s32
267brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
268 void *data, u32 len)
269{
270 struct brcmf_pub *drvr = ifp->drvr;
271 s32 err;
272 u32 buflen;
273
274 mutex_lock(&drvr->proto_block);
275
276 brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
277 brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
278 min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
279
280 buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
281 drvr->proto_buf, sizeof(drvr->proto_buf));
282 if (buflen) {
283 err = brcmf_fil_cmd_data(ifp, BRCMF_C_SET_VAR, drvr->proto_buf,
284 buflen, true);
285 } else {
286 err = -EPERM;
287 brcmf_err("Creating bsscfg failed\n");
288 }
289
290 mutex_unlock(&drvr->proto_block);
291 return err;
292}
293
294s32
295brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
296 void *data, u32 len)
297{
298 struct brcmf_pub *drvr = ifp->drvr;
299 s32 err;
300 u32 buflen;
301
302 mutex_lock(&drvr->proto_block);
303
304 buflen = brcmf_create_bsscfg(ifp->bssidx, name, data, len,
305 drvr->proto_buf, sizeof(drvr->proto_buf));
306 if (buflen) {
307 err = brcmf_fil_cmd_data(ifp, BRCMF_C_GET_VAR, drvr->proto_buf,
308 buflen, false);
309 if (err == 0)
310 memcpy(data, drvr->proto_buf, len);
311 } else {
312 err = -EPERM;
313 brcmf_err("Creating bsscfg failed\n");
314 }
315 brcmf_dbg(FIL, "bssidx=%d, name=%s, len=%d\n", ifp->bssidx, name, len);
316 brcmf_dbg_hex_dump(BRCMF_FIL_ON(), data,
317 min_t(uint, len, MAX_HEX_DUMP_LEN), "data");
318
319 mutex_unlock(&drvr->proto_block);
320 return err;
321
322}
323
324s32
325brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
326{
327 __le32 data_le = cpu_to_le32(data);
328
329 return brcmf_fil_bsscfg_data_set(ifp, name, &data_le,
330 sizeof(data_le));
331}
332
333s32
334brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
335{
336 __le32 data_le = cpu_to_le32(*data);
337 s32 err;
338
339 err = brcmf_fil_bsscfg_data_get(ifp, name, &data_le,
340 sizeof(data_le));
341 if (err == 0)
342 *data = le32_to_cpu(data_le);
343 return err;
344}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
new file mode 100644
index 000000000000..16eb8202fb1e
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -0,0 +1,39 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _fwil_h_
18#define _fwil_h_
19
20s32 brcmf_fil_cmd_data_set(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
21s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
22s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
23s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
24
25s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
26 u32 len);
27s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
28 u32 len);
29s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
30s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
31
32s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
33 u32 len);
34s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
35 u32 len);
36s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
37s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
38
39#endif /* _fwil_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index 9434440bbc65..b1bb46c49799 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -186,7 +186,7 @@ brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
186 CORE_SB(base, sbtmstatehigh), 186 CORE_SB(base, sbtmstatehigh),
187 NULL); 187 NULL);
188 if (regdata & SSB_TMSHIGH_BUSY) 188 if (regdata & SSB_TMSHIGH_BUSY)
189 brcmf_dbg(ERROR, "core state still busy\n"); 189 brcmf_err("core state still busy\n");
190 190
191 regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow), 191 regdata = brcmf_sdio_regrl(sdiodev, CORE_SB(base, sbidlow),
192 NULL); 192 NULL);
@@ -438,7 +438,7 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
438 ci->ramsize = 0x80000; 438 ci->ramsize = 0x80000;
439 break; 439 break;
440 default: 440 default:
441 brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip); 441 brcmf_err("chipid 0x%x is not supported\n", ci->chip);
442 return -ENODEV; 442 return -ENODEV;
443 } 443 }
444 444
@@ -456,7 +456,7 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
456 ci->resetcore = brcmf_sdio_ai_resetcore; 456 ci->resetcore = brcmf_sdio_ai_resetcore;
457 break; 457 break;
458 default: 458 default:
459 brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype); 459 brcmf_err("socitype %u not supported\n", ci->socitype);
460 return -ENODEV; 460 return -ENODEV;
461 } 461 }
462 462
@@ -473,7 +473,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
473 clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; 473 clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
474 brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); 474 brcmf_sdio_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
475 if (err) { 475 if (err) {
476 brcmf_dbg(ERROR, "error writing for HT off\n"); 476 brcmf_err("error writing for HT off\n");
477 return err; 477 return err;
478 } 478 }
479 479
@@ -483,7 +483,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
483 SBSDIO_FUNC1_CHIPCLKCSR, NULL); 483 SBSDIO_FUNC1_CHIPCLKCSR, NULL);
484 484
485 if ((clkval & ~SBSDIO_AVBITS) != clkset) { 485 if ((clkval & ~SBSDIO_AVBITS) != clkset) {
486 brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n", 486 brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
487 clkset, clkval); 487 clkset, clkval);
488 return -EACCES; 488 return -EACCES;
489 } 489 }
@@ -493,7 +493,7 @@ brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
493 !SBSDIO_ALPAV(clkval)), 493 !SBSDIO_ALPAV(clkval)),
494 PMU_MAX_TRANSITION_DLY); 494 PMU_MAX_TRANSITION_DLY);
495 if (!SBSDIO_ALPAV(clkval)) { 495 if (!SBSDIO_ALPAV(clkval)) {
496 brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n", 496 brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
497 clkval); 497 clkval);
498 return -EBUSY; 498 return -EBUSY;
499 } 499 }
@@ -618,7 +618,7 @@ brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
618 str_shift = 11; 618 str_shift = 11;
619 break; 619 break;
620 default: 620 default:
621 brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", 621 brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
622 brcmf_sdio_chip_name(ci->chip, chn, 8), 622 brcmf_sdio_chip_name(ci->chip, chn, 8),
623 ci->chiprev, ci->pmurev); 623 ci->chiprev, ci->pmurev);
624 break; 624 break;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 7a6dfdc67b6c..914c56fe6c5f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -14,24 +14,12 @@
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/init.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/kthread.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/netdevice.h>
24#include <linux/spinlock.h>
25#include <linux/ethtool.h>
26#include <linux/fcntl.h>
27#include <linux/fs.h>
28#include <linux/uaccess.h>
29#include <linux/firmware.h> 19#include <linux/firmware.h>
30#include <linux/usb.h> 20#include <linux/usb.h>
31#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
32#include <net/cfg80211.h>
33 22
34#include <defs.h>
35#include <brcmu_utils.h> 23#include <brcmu_utils.h>
36#include <brcmu_wifi.h> 24#include <brcmu_wifi.h>
37#include <dhd_bus.h> 25#include <dhd_bus.h>
@@ -42,14 +30,11 @@
42 30
43#define IOCTL_RESP_TIMEOUT 2000 31#define IOCTL_RESP_TIMEOUT 2000
44 32
45#define BRCMF_USB_SYNC_TIMEOUT 300 /* ms */ 33#define BRCMF_USB_RESET_GETVER_SPINWAIT 100 /* in unit of ms */
46#define BRCMF_USB_DLIMAGE_SPINWAIT 100 /* in unit of ms */ 34#define BRCMF_USB_RESET_GETVER_LOOP_CNT 10
47#define BRCMF_USB_DLIMAGE_LIMIT 500 /* spinwait limit (ms) */
48 35
49#define BRCMF_POSTBOOT_ID 0xA123 /* ID to detect if dongle 36#define BRCMF_POSTBOOT_ID 0xA123 /* ID to detect if dongle
50 has boot up */ 37 has boot up */
51#define BRCMF_USB_RESETCFG_SPINWAIT 1 /* wait after resetcfg (ms) */
52
53#define BRCMF_USB_NRXQ 50 38#define BRCMF_USB_NRXQ 50
54#define BRCMF_USB_NTXQ 50 39#define BRCMF_USB_NTXQ 50
55 40
@@ -70,16 +55,6 @@
70#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin" 55#define BRCMF_USB_43236_FW_NAME "brcm/brcmfmac43236b.bin"
71#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin" 56#define BRCMF_USB_43242_FW_NAME "brcm/brcmfmac43242a.bin"
72 57
73enum usbdev_suspend_state {
74 USBOS_SUSPEND_STATE_DEVICE_ACTIVE = 0, /* Device is busy, won't allow
75 suspend */
76 USBOS_SUSPEND_STATE_SUSPEND_PENDING, /* Device is idle, can be
77 * suspended. Wating PM to
78 * suspend the device
79 */
80 USBOS_SUSPEND_STATE_SUSPENDED /* Device suspended */
81};
82
83struct brcmf_usb_image { 58struct brcmf_usb_image {
84 struct list_head list; 59 struct list_head list;
85 s8 *fwname; 60 s8 *fwname;
@@ -100,10 +75,8 @@ struct brcmf_usbdev_info {
100 struct list_head rx_postq; 75 struct list_head rx_postq;
101 struct list_head tx_freeq; 76 struct list_head tx_freeq;
102 struct list_head tx_postq; 77 struct list_head tx_postq;
103 enum usbdev_suspend_state suspend_state;
104 uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2; 78 uint rx_pipe, tx_pipe, intr_pipe, rx_pipe2;
105 79
106 bool activity;
107 int rx_low_watermark; 80 int rx_low_watermark;
108 int tx_low_watermark; 81 int tx_low_watermark;
109 int tx_high_watermark; 82 int tx_high_watermark;
@@ -116,10 +89,6 @@ struct brcmf_usbdev_info {
116 u8 *image; /* buffer for combine fw and nvram */ 89 u8 *image; /* buffer for combine fw and nvram */
117 int image_len; 90 int image_len;
118 91
119 wait_queue_head_t wait;
120 bool waitdone;
121 int sync_urb_status;
122
123 struct usb_device *usbdev; 92 struct usb_device *usbdev;
124 struct device *dev; 93 struct device *dev;
125 94
@@ -131,7 +100,6 @@ struct brcmf_usbdev_info {
131 int ctl_urb_status; 100 int ctl_urb_status;
132 int ctl_completed; 101 int ctl_completed;
133 wait_queue_head_t ioctl_resp_wait; 102 wait_queue_head_t ioctl_resp_wait;
134 wait_queue_head_t ctrl_wait;
135 ulong ctl_op; 103 ulong ctl_op;
136 104
137 struct urb *bulk_urb; /* used for FW download */ 105 struct urb *bulk_urb; /* used for FW download */
@@ -176,6 +144,7 @@ static void brcmf_usb_ioctl_resp_wake(struct brcmf_usbdev_info *devinfo)
176static void 144static void
177brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status) 145brcmf_usb_ctl_complete(struct brcmf_usbdev_info *devinfo, int type, int status)
178{ 146{
147 brcmf_dbg(USB, "Enter, status=%d\n", status);
179 148
180 if (unlikely(devinfo == NULL)) 149 if (unlikely(devinfo == NULL))
181 return; 150 return;
@@ -203,6 +172,7 @@ brcmf_usb_ctlread_complete(struct urb *urb)
203 struct brcmf_usbdev_info *devinfo = 172 struct brcmf_usbdev_info *devinfo =
204 (struct brcmf_usbdev_info *)urb->context; 173 (struct brcmf_usbdev_info *)urb->context;
205 174
175 brcmf_dbg(USB, "Enter\n");
206 devinfo->ctl_urb_actual_length = urb->actual_length; 176 devinfo->ctl_urb_actual_length = urb->actual_length;
207 brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ, 177 brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_READ,
208 urb->status); 178 urb->status);
@@ -214,33 +184,22 @@ brcmf_usb_ctlwrite_complete(struct urb *urb)
214 struct brcmf_usbdev_info *devinfo = 184 struct brcmf_usbdev_info *devinfo =
215 (struct brcmf_usbdev_info *)urb->context; 185 (struct brcmf_usbdev_info *)urb->context;
216 186
187 brcmf_dbg(USB, "Enter\n");
217 brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE, 188 brcmf_usb_ctl_complete(devinfo, BRCMF_USB_CBCTL_WRITE,
218 urb->status); 189 urb->status);
219} 190}
220 191
221static int brcmf_usb_pnp(struct brcmf_usbdev_info *devinfo, uint state)
222{
223 return 0;
224}
225
226static int 192static int
227brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len) 193brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
228{ 194{
229 int ret; 195 int ret;
230 u16 size; 196 u16 size;
231 197
198 brcmf_dbg(USB, "Enter\n");
232 if (devinfo == NULL || buf == NULL || 199 if (devinfo == NULL || buf == NULL ||
233 len == 0 || devinfo->ctl_urb == NULL) 200 len == 0 || devinfo->ctl_urb == NULL)
234 return -EINVAL; 201 return -EINVAL;
235 202
236 /* If the USB/HSIC bus in sleep state, wake it up */
237 if (devinfo->suspend_state == USBOS_SUSPEND_STATE_SUSPENDED)
238 if (brcmf_usb_pnp(devinfo, BCMFMAC_USB_PNP_RESUME) != 0) {
239 brcmf_dbg(ERROR, "Could not Resume the bus!\n");
240 return -EIO;
241 }
242
243 devinfo->activity = true;
244 size = len; 203 size = len;
245 devinfo->ctl_write.wLength = cpu_to_le16p(&size); 204 devinfo->ctl_write.wLength = cpu_to_le16p(&size);
246 devinfo->ctl_urb->transfer_buffer_length = size; 205 devinfo->ctl_urb->transfer_buffer_length = size;
@@ -257,7 +216,7 @@ brcmf_usb_send_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
257 216
258 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC); 217 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
259 if (ret < 0) 218 if (ret < 0)
260 brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret); 219 brcmf_err("usb_submit_urb failed %d\n", ret);
261 220
262 return ret; 221 return ret;
263} 222}
@@ -268,6 +227,7 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
268 int ret; 227 int ret;
269 u16 size; 228 u16 size;
270 229
230 brcmf_dbg(USB, "Enter\n");
271 if ((devinfo == NULL) || (buf == NULL) || (len == 0) 231 if ((devinfo == NULL) || (buf == NULL) || (len == 0)
272 || (devinfo->ctl_urb == NULL)) 232 || (devinfo->ctl_urb == NULL))
273 return -EINVAL; 233 return -EINVAL;
@@ -290,7 +250,7 @@ brcmf_usb_recv_ctl(struct brcmf_usbdev_info *devinfo, u8 *buf, int len)
290 250
291 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC); 251 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
292 if (ret < 0) 252 if (ret < 0)
293 brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret); 253 brcmf_err("usb_submit_urb failed %d\n", ret);
294 254
295 return ret; 255 return ret;
296} 256}
@@ -301,10 +261,9 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
301 int timeout = 0; 261 int timeout = 0;
302 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 262 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
303 263
304 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { 264 brcmf_dbg(USB, "Enter\n");
305 /* TODO: handle suspend/resume */ 265 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
306 return -EIO; 266 return -EIO;
307 }
308 267
309 if (test_and_set_bit(0, &devinfo->ctl_op)) 268 if (test_and_set_bit(0, &devinfo->ctl_op))
310 return -EIO; 269 return -EIO;
@@ -312,14 +271,14 @@ static int brcmf_usb_tx_ctlpkt(struct device *dev, u8 *buf, u32 len)
312 devinfo->ctl_completed = false; 271 devinfo->ctl_completed = false;
313 err = brcmf_usb_send_ctl(devinfo, buf, len); 272 err = brcmf_usb_send_ctl(devinfo, buf, len);
314 if (err) { 273 if (err) {
315 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len); 274 brcmf_err("fail %d bytes: %d\n", err, len);
316 clear_bit(0, &devinfo->ctl_op); 275 clear_bit(0, &devinfo->ctl_op);
317 return err; 276 return err;
318 } 277 }
319 timeout = brcmf_usb_ioctl_resp_wait(devinfo); 278 timeout = brcmf_usb_ioctl_resp_wait(devinfo);
320 clear_bit(0, &devinfo->ctl_op); 279 clear_bit(0, &devinfo->ctl_op);
321 if (!timeout) { 280 if (!timeout) {
322 brcmf_dbg(ERROR, "Txctl wait timed out\n"); 281 brcmf_err("Txctl wait timed out\n");
323 err = -EIO; 282 err = -EIO;
324 } 283 }
325 return err; 284 return err;
@@ -331,17 +290,17 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
331 int timeout = 0; 290 int timeout = 0;
332 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 291 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
333 292
334 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { 293 brcmf_dbg(USB, "Enter\n");
335 /* TODO: handle suspend/resume */ 294 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
336 return -EIO; 295 return -EIO;
337 } 296
338 if (test_and_set_bit(0, &devinfo->ctl_op)) 297 if (test_and_set_bit(0, &devinfo->ctl_op))
339 return -EIO; 298 return -EIO;
340 299
341 devinfo->ctl_completed = false; 300 devinfo->ctl_completed = false;
342 err = brcmf_usb_recv_ctl(devinfo, buf, len); 301 err = brcmf_usb_recv_ctl(devinfo, buf, len);
343 if (err) { 302 if (err) {
344 brcmf_dbg(ERROR, "fail %d bytes: %d\n", err, len); 303 brcmf_err("fail %d bytes: %d\n", err, len);
345 clear_bit(0, &devinfo->ctl_op); 304 clear_bit(0, &devinfo->ctl_op);
346 return err; 305 return err;
347 } 306 }
@@ -349,7 +308,7 @@ static int brcmf_usb_rx_ctlpkt(struct device *dev, u8 *buf, u32 len)
349 err = devinfo->ctl_urb_status; 308 err = devinfo->ctl_urb_status;
350 clear_bit(0, &devinfo->ctl_op); 309 clear_bit(0, &devinfo->ctl_op);
351 if (!timeout) { 310 if (!timeout) {
352 brcmf_dbg(ERROR, "rxctl wait timed out\n"); 311 brcmf_err("rxctl wait timed out\n");
353 err = -EIO; 312 err = -EIO;
354 } 313 }
355 if (!err) 314 if (!err)
@@ -397,7 +356,7 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
397 356
398 reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC); 357 reqs = kzalloc(sizeof(struct brcmf_usbreq) * qsize, GFP_ATOMIC);
399 if (reqs == NULL) { 358 if (reqs == NULL) {
400 brcmf_dbg(ERROR, "fail to allocate memory!\n"); 359 brcmf_err("fail to allocate memory!\n");
401 return NULL; 360 return NULL;
402 } 361 }
403 req = reqs; 362 req = reqs;
@@ -413,7 +372,7 @@ brcmf_usbdev_qinit(struct list_head *q, int qsize)
413 } 372 }
414 return reqs; 373 return reqs;
415fail: 374fail:
416 brcmf_dbg(ERROR, "fail!\n"); 375 brcmf_err("fail!\n");
417 while (!list_empty(q)) { 376 while (!list_empty(q)) {
418 req = list_entry(q->next, struct brcmf_usbreq, list); 377 req = list_entry(q->next, struct brcmf_usbreq, list);
419 if (req && req->urb) 378 if (req && req->urb)
@@ -430,7 +389,7 @@ static void brcmf_usb_free_q(struct list_head *q, bool pending)
430 int i = 0; 389 int i = 0;
431 list_for_each_entry_safe(req, next, q, list) { 390 list_for_each_entry_safe(req, next, q, list) {
432 if (!req->urb) { 391 if (!req->urb) {
433 brcmf_dbg(ERROR, "bad req\n"); 392 brcmf_err("bad req\n");
434 break; 393 break;
435 } 394 }
436 i++; 395 i++;
@@ -459,6 +418,8 @@ static void brcmf_usb_tx_complete(struct urb *urb)
459 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context; 418 struct brcmf_usbreq *req = (struct brcmf_usbreq *)urb->context;
460 struct brcmf_usbdev_info *devinfo = req->devinfo; 419 struct brcmf_usbdev_info *devinfo = req->devinfo;
461 420
421 brcmf_dbg(USB, "Enter, urb->status=%d, skb=%p\n", urb->status,
422 req->skb);
462 brcmf_usb_del_fromq(devinfo, req); 423 brcmf_usb_del_fromq(devinfo, req);
463 if (urb->status == 0) 424 if (urb->status == 0)
464 devinfo->bus_pub.bus->dstats.tx_packets++; 425 devinfo->bus_pub.bus->dstats.tx_packets++;
@@ -484,6 +445,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
484 struct sk_buff *skb; 445 struct sk_buff *skb;
485 int ifidx = 0; 446 int ifidx = 0;
486 447
448 brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
487 brcmf_usb_del_fromq(devinfo, req); 449 brcmf_usb_del_fromq(devinfo, req);
488 skb = req->skb; 450 skb = req->skb;
489 req->skb = NULL; 451 req->skb = NULL;
@@ -497,10 +459,10 @@ static void brcmf_usb_rx_complete(struct urb *urb)
497 return; 459 return;
498 } 460 }
499 461
500 if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP) { 462 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
501 skb_put(skb, urb->actual_length); 463 skb_put(skb, urb->actual_length);
502 if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) { 464 if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) {
503 brcmf_dbg(ERROR, "rx protocol error\n"); 465 brcmf_err("rx protocol error\n");
504 brcmu_pkt_buf_free_skb(skb); 466 brcmu_pkt_buf_free_skb(skb);
505 devinfo->bus_pub.bus->dstats.rx_errors++; 467 devinfo->bus_pub.bus->dstats.rx_errors++;
506 } else 468 } else
@@ -550,8 +512,8 @@ static void brcmf_usb_rx_fill_all(struct brcmf_usbdev_info *devinfo)
550{ 512{
551 struct brcmf_usbreq *req; 513 struct brcmf_usbreq *req;
552 514
553 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { 515 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
554 brcmf_dbg(ERROR, "bus is not up\n"); 516 brcmf_err("bus is not up=%d\n", devinfo->bus_pub.state);
555 return; 517 return;
556 } 518 }
557 while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL) 519 while ((req = brcmf_usb_deq(devinfo, &devinfo->rx_freeq, NULL)) != NULL)
@@ -564,29 +526,24 @@ brcmf_usb_state_change(struct brcmf_usbdev_info *devinfo, int state)
564 struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus; 526 struct brcmf_bus *bcmf_bus = devinfo->bus_pub.bus;
565 int old_state; 527 int old_state;
566 528
529 brcmf_dbg(USB, "Enter, current state=%d, new state=%d\n",
530 devinfo->bus_pub.state, state);
567 531
568 if (devinfo->bus_pub.state == state) 532 if (devinfo->bus_pub.state == state)
569 return; 533 return;
570 534
571 old_state = devinfo->bus_pub.state; 535 old_state = devinfo->bus_pub.state;
572 brcmf_dbg(TRACE, "dbus state change from %d to to %d\n", 536 devinfo->bus_pub.state = state;
573 old_state, state);
574
575 /* Don't update state if it's PnP firmware re-download */
576 if (state != BCMFMAC_USB_STATE_PNP_FWDL) /* TODO */
577 devinfo->bus_pub.state = state;
578
579 if ((old_state == BCMFMAC_USB_STATE_SLEEP)
580 && (state == BCMFMAC_USB_STATE_UP)) {
581 brcmf_usb_rx_fill_all(devinfo);
582 }
583 537
584 /* update state of upper layer */ 538 /* update state of upper layer */
585 if (state == BCMFMAC_USB_STATE_DOWN) { 539 if (state == BRCMFMAC_USB_STATE_DOWN) {
586 brcmf_dbg(INFO, "DBUS is down\n"); 540 brcmf_dbg(USB, "DBUS is down\n");
587 bcmf_bus->state = BRCMF_BUS_DOWN; 541 bcmf_bus->state = BRCMF_BUS_DOWN;
542 } else if (state == BRCMFMAC_USB_STATE_UP) {
543 brcmf_dbg(USB, "DBUS is up\n");
544 bcmf_bus->state = BRCMF_BUS_DATA;
588 } else { 545 } else {
589 brcmf_dbg(INFO, "DBUS current state=%d\n", state); 546 brcmf_dbg(USB, "DBUS current state=%d\n", state);
590 } 547 }
591} 548}
592 549
@@ -595,30 +552,32 @@ brcmf_usb_intr_complete(struct urb *urb)
595{ 552{
596 struct brcmf_usbdev_info *devinfo = 553 struct brcmf_usbdev_info *devinfo =
597 (struct brcmf_usbdev_info *)urb->context; 554 (struct brcmf_usbdev_info *)urb->context;
598 bool killed; 555 int err;
556
557 brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
599 558
600 if (devinfo == NULL) 559 if (devinfo == NULL)
601 return; 560 return;
602 561
603 if (unlikely(urb->status)) { 562 if (unlikely(urb->status)) {
604 if (devinfo->suspend_state == 563 if (urb->status == -ENOENT ||
605 USBOS_SUSPEND_STATE_SUSPEND_PENDING) 564 urb->status == -ESHUTDOWN ||
606 killed = true; 565 urb->status == -ENODEV) {
607 566 brcmf_usb_state_change(devinfo,
608 if ((urb->status == -ENOENT && (!killed)) 567 BRCMFMAC_USB_STATE_DOWN);
609 || urb->status == -ESHUTDOWN ||
610 urb->status == -ENODEV) {
611 brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_DOWN);
612 } 568 }
613 } 569 }
614 570
615 if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_DOWN) { 571 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN) {
616 brcmf_dbg(ERROR, "intr cb when DBUS down, ignoring\n"); 572 brcmf_err("intr cb when DBUS down, ignoring\n");
617 return; 573 return;
618 } 574 }
619 575
620 if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP) 576 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
621 usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC); 577 err = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
578 if (err)
579 brcmf_err("usb_submit_urb, err=%d\n", err);
580 }
622} 581}
623 582
624static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) 583static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
@@ -627,16 +586,15 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
627 struct brcmf_usbreq *req; 586 struct brcmf_usbreq *req;
628 int ret; 587 int ret;
629 588
630 if (devinfo->bus_pub.state != BCMFMAC_USB_STATE_UP) { 589 brcmf_dbg(USB, "Enter, skb=%p\n", skb);
631 /* TODO: handle suspend/resume */ 590 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
632 return -EIO; 591 return -EIO;
633 }
634 592
635 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq, 593 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
636 &devinfo->tx_freecount); 594 &devinfo->tx_freecount);
637 if (!req) { 595 if (!req) {
638 brcmu_pkt_buf_free_skb(skb); 596 brcmu_pkt_buf_free_skb(skb);
639 brcmf_dbg(ERROR, "no req to send\n"); 597 brcmf_err("no req to send\n");
640 return -ENOMEM; 598 return -ENOMEM;
641 } 599 }
642 600
@@ -648,7 +606,7 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
648 brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL); 606 brcmf_usb_enq(devinfo, &devinfo->tx_postq, req, NULL);
649 ret = usb_submit_urb(req->urb, GFP_ATOMIC); 607 ret = usb_submit_urb(req->urb, GFP_ATOMIC);
650 if (ret) { 608 if (ret) {
651 brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n"); 609 brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n");
652 brcmf_usb_del_fromq(devinfo, req); 610 brcmf_usb_del_fromq(devinfo, req);
653 brcmu_pkt_buf_free_skb(req->skb); 611 brcmu_pkt_buf_free_skb(req->skb);
654 req->skb = NULL; 612 req->skb = NULL;
@@ -670,25 +628,16 @@ static int brcmf_usb_up(struct device *dev)
670{ 628{
671 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 629 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
672 u16 ifnum; 630 u16 ifnum;
631 int ret;
673 632
674 if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_UP) 633 brcmf_dbg(USB, "Enter\n");
634 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP)
675 return 0; 635 return 0;
676 636
677 /* If the USB/HSIC bus in sleep state, wake it up */
678 if (devinfo->suspend_state == USBOS_SUSPEND_STATE_SUSPENDED) {
679 if (brcmf_usb_pnp(devinfo, BCMFMAC_USB_PNP_RESUME) != 0) {
680 brcmf_dbg(ERROR, "Could not Resume the bus!\n");
681 return -EIO;
682 }
683 }
684 devinfo->activity = true;
685
686 /* Success, indicate devinfo is fully up */ 637 /* Success, indicate devinfo is fully up */
687 brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_UP); 638 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_UP);
688 639
689 if (devinfo->intr_urb) { 640 if (devinfo->intr_urb) {
690 int ret;
691
692 usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev, 641 usb_fill_int_urb(devinfo->intr_urb, devinfo->usbdev,
693 devinfo->intr_pipe, 642 devinfo->intr_pipe,
694 &devinfo->intr, 643 &devinfo->intr,
@@ -699,7 +648,7 @@ static int brcmf_usb_up(struct device *dev)
699 648
700 ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC); 649 ret = usb_submit_urb(devinfo->intr_urb, GFP_ATOMIC);
701 if (ret) { 650 if (ret) {
702 brcmf_dbg(ERROR, "USB_SUBMIT_URB failed with status %d\n", 651 brcmf_err("USB_SUBMIT_URB failed with status %d\n",
703 ret); 652 ret);
704 return -EINVAL; 653 return -EINVAL;
705 } 654 }
@@ -733,14 +682,14 @@ static void brcmf_usb_down(struct device *dev)
733{ 682{
734 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev); 683 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(dev);
735 684
685 brcmf_dbg(USB, "Enter\n");
736 if (devinfo == NULL) 686 if (devinfo == NULL)
737 return; 687 return;
738 688
739 brcmf_dbg(TRACE, "enter\n"); 689 if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_DOWN)
740 if (devinfo->bus_pub.state == BCMFMAC_USB_STATE_DOWN)
741 return; 690 return;
742 691
743 brcmf_usb_state_change(devinfo, BCMFMAC_USB_STATE_DOWN); 692 brcmf_usb_state_change(devinfo, BRCMFMAC_USB_STATE_DOWN);
744 if (devinfo->intr_urb) 693 if (devinfo->intr_urb)
745 usb_kill_urb(devinfo->intr_urb); 694 usb_kill_urb(devinfo->intr_urb);
746 695
@@ -754,34 +703,14 @@ static void brcmf_usb_down(struct device *dev)
754 brcmf_usb_free_q(&devinfo->rx_postq, true); 703 brcmf_usb_free_q(&devinfo->rx_postq, true);
755} 704}
756 705
757static int
758brcmf_usb_sync_wait(struct brcmf_usbdev_info *devinfo, u16 time)
759{
760 int ret;
761 int err = 0;
762 int ms = time;
763
764 ret = wait_event_interruptible_timeout(devinfo->wait,
765 devinfo->waitdone == true, (ms * HZ / 1000));
766
767 if ((devinfo->waitdone == false) || (devinfo->sync_urb_status)) {
768 brcmf_dbg(ERROR, "timeout(%d) or urb err=%d\n",
769 ret, devinfo->sync_urb_status);
770 err = -EINVAL;
771 }
772 devinfo->waitdone = false;
773 return err;
774}
775
776static void 706static void
777brcmf_usb_sync_complete(struct urb *urb) 707brcmf_usb_sync_complete(struct urb *urb)
778{ 708{
779 struct brcmf_usbdev_info *devinfo = 709 struct brcmf_usbdev_info *devinfo =
780 (struct brcmf_usbdev_info *)urb->context; 710 (struct brcmf_usbdev_info *)urb->context;
781 711
782 devinfo->waitdone = true; 712 devinfo->ctl_completed = true;
783 wake_up_interruptible(&devinfo->wait); 713 brcmf_usb_ioctl_resp_wake(devinfo);
784 devinfo->sync_urb_status = urb->status;
785} 714}
786 715
787static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd, 716static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
@@ -813,18 +742,19 @@ static bool brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
813 (void *) tmpbuf, size, 742 (void *) tmpbuf, size,
814 (usb_complete_t)brcmf_usb_sync_complete, devinfo); 743 (usb_complete_t)brcmf_usb_sync_complete, devinfo);
815 744
745 devinfo->ctl_completed = false;
816 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC); 746 ret = usb_submit_urb(devinfo->ctl_urb, GFP_ATOMIC);
817 if (ret < 0) { 747 if (ret < 0) {
818 brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret); 748 brcmf_err("usb_submit_urb failed %d\n", ret);
819 kfree(tmpbuf); 749 kfree(tmpbuf);
820 return false; 750 return false;
821 } 751 }
822 752
823 ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT); 753 ret = brcmf_usb_ioctl_resp_wait(devinfo);
824 memcpy(buffer, tmpbuf, buflen); 754 memcpy(buffer, tmpbuf, buflen);
825 kfree(tmpbuf); 755 kfree(tmpbuf);
826 756
827 return (ret == 0); 757 return ret;
828} 758}
829 759
830static bool 760static bool
@@ -833,27 +763,25 @@ brcmf_usb_dlneeded(struct brcmf_usbdev_info *devinfo)
833 struct bootrom_id_le id; 763 struct bootrom_id_le id;
834 u32 chipid, chiprev; 764 u32 chipid, chiprev;
835 765
836 brcmf_dbg(TRACE, "enter\n"); 766 brcmf_dbg(USB, "Enter\n");
837 767
838 if (devinfo == NULL) 768 if (devinfo == NULL)
839 return false; 769 return false;
840 770
841 /* Check if firmware downloaded already by querying runtime ID */ 771 /* Check if firmware downloaded already by querying runtime ID */
842 id.chip = cpu_to_le32(0xDEAD); 772 id.chip = cpu_to_le32(0xDEAD);
843 brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, 773 brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
844 sizeof(struct bootrom_id_le));
845 774
846 chipid = le32_to_cpu(id.chip); 775 chipid = le32_to_cpu(id.chip);
847 chiprev = le32_to_cpu(id.chiprev); 776 chiprev = le32_to_cpu(id.chiprev);
848 777
849 if ((chipid & 0x4300) == 0x4300) 778 if ((chipid & 0x4300) == 0x4300)
850 brcmf_dbg(INFO, "chip %x rev 0x%x\n", chipid, chiprev); 779 brcmf_dbg(USB, "chip %x rev 0x%x\n", chipid, chiprev);
851 else 780 else
852 brcmf_dbg(INFO, "chip %d rev 0x%x\n", chipid, chiprev); 781 brcmf_dbg(USB, "chip %d rev 0x%x\n", chipid, chiprev);
853 if (chipid == BRCMF_POSTBOOT_ID) { 782 if (chipid == BRCMF_POSTBOOT_ID) {
854 brcmf_dbg(INFO, "firmware already downloaded\n"); 783 brcmf_dbg(USB, "firmware already downloaded\n");
855 brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, 784 brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
856 sizeof(struct bootrom_id_le));
857 return false; 785 return false;
858 } else { 786 } else {
859 devinfo->bus_pub.devid = chipid; 787 devinfo->bus_pub.devid = chipid;
@@ -866,38 +794,29 @@ static int
866brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo) 794brcmf_usb_resetcfg(struct brcmf_usbdev_info *devinfo)
867{ 795{
868 struct bootrom_id_le id; 796 struct bootrom_id_le id;
869 u16 wait = 0, wait_time; 797 u32 loop_cnt;
870
871 brcmf_dbg(TRACE, "enter\n");
872 798
873 if (devinfo == NULL) 799 brcmf_dbg(USB, "Enter\n");
874 return -EINVAL;
875 800
876 /* Give dongle chance to boot */ 801 loop_cnt = 0;
877 wait_time = BRCMF_USB_DLIMAGE_SPINWAIT; 802 do {
878 while (wait < BRCMF_USB_DLIMAGE_LIMIT) { 803 mdelay(BRCMF_USB_RESET_GETVER_SPINWAIT);
879 mdelay(wait_time); 804 loop_cnt++;
880 wait += wait_time;
881 id.chip = cpu_to_le32(0xDEAD); /* Get the ID */ 805 id.chip = cpu_to_le32(0xDEAD); /* Get the ID */
882 brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, 806 brcmf_usb_dl_cmd(devinfo, DL_GETVER, &id, sizeof(id));
883 sizeof(struct bootrom_id_le));
884 if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) 807 if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID))
885 break; 808 break;
886 } 809 } while (loop_cnt < BRCMF_USB_RESET_GETVER_LOOP_CNT);
887 810
888 if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) { 811 if (id.chip == cpu_to_le32(BRCMF_POSTBOOT_ID)) {
889 brcmf_dbg(INFO, "download done %d ms postboot chip 0x%x/rev 0x%x\n", 812 brcmf_dbg(USB, "postboot chip 0x%x/rev 0x%x\n",
890 wait, le32_to_cpu(id.chip), le32_to_cpu(id.chiprev)); 813 le32_to_cpu(id.chip), le32_to_cpu(id.chiprev));
891
892 brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id,
893 sizeof(struct bootrom_id_le));
894 814
895 /* XXX this wait may not be necessary */ 815 brcmf_usb_dl_cmd(devinfo, DL_RESETCFG, &id, sizeof(id));
896 mdelay(BRCMF_USB_RESETCFG_SPINWAIT);
897 return 0; 816 return 0;
898 } else { 817 } else {
899 brcmf_dbg(ERROR, "Cannot talk to Dongle. Firmware is not UP, %d ms\n", 818 brcmf_err("Cannot talk to Dongle. Firmware is not UP, %d ms\n",
900 wait); 819 BRCMF_USB_RESET_GETVER_SPINWAIT * loop_cnt);
901 return -EINVAL; 820 return -EINVAL;
902 } 821 }
903} 822}
@@ -918,13 +837,14 @@ brcmf_usb_dl_send_bulk(struct brcmf_usbdev_info *devinfo, void *buffer, int len)
918 837
919 devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET; 838 devinfo->bulk_urb->transfer_flags |= URB_ZERO_PACKET;
920 839
840 devinfo->ctl_completed = false;
921 ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC); 841 ret = usb_submit_urb(devinfo->bulk_urb, GFP_ATOMIC);
922 if (ret) { 842 if (ret) {
923 brcmf_dbg(ERROR, "usb_submit_urb failed %d\n", ret); 843 brcmf_err("usb_submit_urb failed %d\n", ret);
924 return ret; 844 return ret;
925 } 845 }
926 ret = brcmf_usb_sync_wait(devinfo, BRCMF_USB_SYNC_TIMEOUT); 846 ret = brcmf_usb_ioctl_resp_wait(devinfo);
927 return ret; 847 return (ret == 0);
928} 848}
929 849
930static int 850static int
@@ -935,7 +855,8 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
935 struct rdl_state_le state; 855 struct rdl_state_le state;
936 u32 rdlstate, rdlbytes; 856 u32 rdlstate, rdlbytes;
937 int err = 0; 857 int err = 0;
938 brcmf_dbg(TRACE, "fw %p, len %d\n", fw, fwlen); 858
859 brcmf_dbg(USB, "Enter, fw %p, len %d\n", fw, fwlen);
939 860
940 bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC); 861 bulkchunk = kmalloc(RDL_CHUNK, GFP_ATOMIC);
941 if (bulkchunk == NULL) { 862 if (bulkchunk == NULL) {
@@ -952,7 +873,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
952 873
953 /* 2) Check we are in the Waiting state */ 874 /* 2) Check we are in the Waiting state */
954 if (rdlstate != DL_WAITING) { 875 if (rdlstate != DL_WAITING) {
955 brcmf_dbg(ERROR, "Failed to DL_START\n"); 876 brcmf_err("Failed to DL_START\n");
956 err = -EINVAL; 877 err = -EINVAL;
957 goto fail; 878 goto fail;
958 } 879 }
@@ -981,7 +902,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
981 memcpy(bulkchunk, dlpos, sendlen); 902 memcpy(bulkchunk, dlpos, sendlen);
982 if (brcmf_usb_dl_send_bulk(devinfo, bulkchunk, 903 if (brcmf_usb_dl_send_bulk(devinfo, bulkchunk,
983 sendlen)) { 904 sendlen)) {
984 brcmf_dbg(ERROR, "send_bulk failed\n"); 905 brcmf_err("send_bulk failed\n");
985 err = -EINVAL; 906 err = -EINVAL;
986 goto fail; 907 goto fail;
987 } 908 }
@@ -991,7 +912,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
991 } 912 }
992 if (!brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state, 913 if (!brcmf_usb_dl_cmd(devinfo, DL_GETSTATE, &state,
993 sizeof(struct rdl_state_le))) { 914 sizeof(struct rdl_state_le))) {
994 brcmf_dbg(ERROR, "DL_GETSTATE Failed xxxx\n"); 915 brcmf_err("DL_GETSTATE Failed xxxx\n");
995 err = -EINVAL; 916 err = -EINVAL;
996 goto fail; 917 goto fail;
997 } 918 }
@@ -1001,7 +922,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
1001 922
1002 /* restart if an error is reported */ 923 /* restart if an error is reported */
1003 if (rdlstate == DL_BAD_HDR || rdlstate == DL_BAD_CRC) { 924 if (rdlstate == DL_BAD_HDR || rdlstate == DL_BAD_CRC) {
1004 brcmf_dbg(ERROR, "Bad Hdr or Bad CRC state %d\n", 925 brcmf_err("Bad Hdr or Bad CRC state %d\n",
1005 rdlstate); 926 rdlstate);
1006 err = -EINVAL; 927 err = -EINVAL;
1007 goto fail; 928 goto fail;
@@ -1010,7 +931,7 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen)
1010 931
1011fail: 932fail:
1012 kfree(bulkchunk); 933 kfree(bulkchunk);
1013 brcmf_dbg(TRACE, "err=%d\n", err); 934 brcmf_dbg(USB, "Exit, err=%d\n", err);
1014 return err; 935 return err;
1015} 936}
1016 937
@@ -1018,7 +939,7 @@ static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len)
1018{ 939{
1019 int err; 940 int err;
1020 941
1021 brcmf_dbg(TRACE, "enter\n"); 942 brcmf_dbg(USB, "Enter\n");
1022 943
1023 if (devinfo == NULL) 944 if (devinfo == NULL)
1024 return -EINVAL; 945 return -EINVAL;
@@ -1028,10 +949,10 @@ static int brcmf_usb_dlstart(struct brcmf_usbdev_info *devinfo, u8 *fw, int len)
1028 949
1029 err = brcmf_usb_dl_writeimage(devinfo, fw, len); 950 err = brcmf_usb_dl_writeimage(devinfo, fw, len);
1030 if (err == 0) 951 if (err == 0)
1031 devinfo->bus_pub.state = BCMFMAC_USB_STATE_DL_DONE; 952 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_DONE;
1032 else 953 else
1033 devinfo->bus_pub.state = BCMFMAC_USB_STATE_DL_PENDING; 954 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DL_FAIL;
1034 brcmf_dbg(TRACE, "exit: err=%d\n", err); 955 brcmf_dbg(USB, "Exit, err=%d\n", err);
1035 956
1036 return err; 957 return err;
1037} 958}
@@ -1040,7 +961,7 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
1040{ 961{
1041 struct rdl_state_le state; 962 struct rdl_state_le state;
1042 963
1043 brcmf_dbg(TRACE, "enter\n"); 964 brcmf_dbg(USB, "Enter\n");
1044 if (!devinfo) 965 if (!devinfo)
1045 return -EINVAL; 966 return -EINVAL;
1046 967
@@ -1060,10 +981,10 @@ static int brcmf_usb_dlrun(struct brcmf_usbdev_info *devinfo)
1060 return -ENODEV; 981 return -ENODEV;
1061 /* The Dongle may go for re-enumeration. */ 982 /* The Dongle may go for re-enumeration. */
1062 } else { 983 } else {
1063 brcmf_dbg(ERROR, "Dongle not runnable\n"); 984 brcmf_err("Dongle not runnable\n");
1064 return -EINVAL; 985 return -EINVAL;
1065 } 986 }
1066 brcmf_dbg(TRACE, "exit\n"); 987 brcmf_dbg(USB, "Exit\n");
1067 return 0; 988 return 0;
1068} 989}
1069 990
@@ -1090,7 +1011,7 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
1090 int devid, chiprev; 1011 int devid, chiprev;
1091 int err; 1012 int err;
1092 1013
1093 brcmf_dbg(TRACE, "enter\n"); 1014 brcmf_dbg(USB, "Enter\n");
1094 if (devinfo == NULL) 1015 if (devinfo == NULL)
1095 return -ENODEV; 1016 return -ENODEV;
1096 1017
@@ -1098,13 +1019,13 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
1098 chiprev = devinfo->bus_pub.chiprev; 1019 chiprev = devinfo->bus_pub.chiprev;
1099 1020
1100 if (!brcmf_usb_chip_support(devid, chiprev)) { 1021 if (!brcmf_usb_chip_support(devid, chiprev)) {
1101 brcmf_dbg(ERROR, "unsupported chip %d rev %d\n", 1022 brcmf_err("unsupported chip %d rev %d\n",
1102 devid, chiprev); 1023 devid, chiprev);
1103 return -EINVAL; 1024 return -EINVAL;
1104 } 1025 }
1105 1026
1106 if (!devinfo->image) { 1027 if (!devinfo->image) {
1107 brcmf_dbg(ERROR, "No firmware!\n"); 1028 brcmf_err("No firmware!\n");
1108 return -ENOENT; 1029 return -ENOENT;
1109 } 1030 }
1110 1031
@@ -1118,7 +1039,7 @@ brcmf_usb_fw_download(struct brcmf_usbdev_info *devinfo)
1118 1039
1119static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo) 1040static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
1120{ 1041{
1121 brcmf_dbg(TRACE, "devinfo %p\n", devinfo); 1042 brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
1122 1043
1123 /* free the URBS */ 1044 /* free the URBS */
1124 brcmf_usb_free_q(&devinfo->rx_freeq, false); 1045 brcmf_usb_free_q(&devinfo->rx_freeq, false);
@@ -1153,6 +1074,7 @@ static int check_file(const u8 *headers)
1153 struct trx_header_le *trx; 1074 struct trx_header_le *trx;
1154 int actual_len = -1; 1075 int actual_len = -1;
1155 1076
1077 brcmf_dbg(USB, "Enter\n");
1156 /* Extract trx header */ 1078 /* Extract trx header */
1157 trx = (struct trx_header_le *) headers; 1079 trx = (struct trx_header_le *) headers;
1158 if (trx->magic != cpu_to_le32(TRX_MAGIC)) 1080 if (trx->magic != cpu_to_le32(TRX_MAGIC))
@@ -1174,6 +1096,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1174 struct brcmf_usb_image *fw_image; 1096 struct brcmf_usb_image *fw_image;
1175 int err; 1097 int err;
1176 1098
1099 brcmf_dbg(USB, "Enter\n");
1177 switch (devinfo->bus_pub.devid) { 1100 switch (devinfo->bus_pub.devid) {
1178 case 43143: 1101 case 43143:
1179 fwname = BRCMF_USB_43143_FW_NAME; 1102 fwname = BRCMF_USB_43143_FW_NAME;
@@ -1190,7 +1113,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1190 return -EINVAL; 1113 return -EINVAL;
1191 break; 1114 break;
1192 } 1115 }
1193 1116 brcmf_dbg(USB, "Loading FW %s\n", fwname);
1194 list_for_each_entry(fw_image, &fw_image_list, list) { 1117 list_for_each_entry(fw_image, &fw_image_list, list) {
1195 if (fw_image->fwname == fwname) { 1118 if (fw_image->fwname == fwname) {
1196 devinfo->image = fw_image->image; 1119 devinfo->image = fw_image->image;
@@ -1201,11 +1124,11 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1201 /* fw image not yet loaded. Load it now and add to list */ 1124 /* fw image not yet loaded. Load it now and add to list */
1202 err = request_firmware(&fw, fwname, devinfo->dev); 1125 err = request_firmware(&fw, fwname, devinfo->dev);
1203 if (!fw) { 1126 if (!fw) {
1204 brcmf_dbg(ERROR, "fail to request firmware %s\n", fwname); 1127 brcmf_err("fail to request firmware %s\n", fwname);
1205 return err; 1128 return err;
1206 } 1129 }
1207 if (check_file(fw->data) < 0) { 1130 if (check_file(fw->data) < 0) {
1208 brcmf_dbg(ERROR, "invalid firmware %s\n", fwname); 1131 brcmf_err("invalid firmware %s\n", fwname);
1209 return -EINVAL; 1132 return -EINVAL;
1210 } 1133 }
1211 1134
@@ -1235,10 +1158,13 @@ static
1235struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo, 1158struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1236 int nrxq, int ntxq) 1159 int nrxq, int ntxq)
1237{ 1160{
1161 brcmf_dbg(USB, "Enter\n");
1162
1238 devinfo->bus_pub.nrxq = nrxq; 1163 devinfo->bus_pub.nrxq = nrxq;
1239 devinfo->rx_low_watermark = nrxq / 2; 1164 devinfo->rx_low_watermark = nrxq / 2;
1240 devinfo->bus_pub.devinfo = devinfo; 1165 devinfo->bus_pub.devinfo = devinfo;
1241 devinfo->bus_pub.ntxq = ntxq; 1166 devinfo->bus_pub.ntxq = ntxq;
1167 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_DOWN;
1242 1168
1243 /* flow control when too many tx urbs posted */ 1169 /* flow control when too many tx urbs posted */
1244 devinfo->tx_low_watermark = ntxq / 4; 1170 devinfo->tx_low_watermark = ntxq / 4;
@@ -1270,25 +1196,24 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1270 1196
1271 devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC); 1197 devinfo->intr_urb = usb_alloc_urb(0, GFP_ATOMIC);
1272 if (!devinfo->intr_urb) { 1198 if (!devinfo->intr_urb) {
1273 brcmf_dbg(ERROR, "usb_alloc_urb (intr) failed\n"); 1199 brcmf_err("usb_alloc_urb (intr) failed\n");
1274 goto error; 1200 goto error;
1275 } 1201 }
1276 devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC); 1202 devinfo->ctl_urb = usb_alloc_urb(0, GFP_ATOMIC);
1277 if (!devinfo->ctl_urb) { 1203 if (!devinfo->ctl_urb) {
1278 brcmf_dbg(ERROR, "usb_alloc_urb (ctl) failed\n"); 1204 brcmf_err("usb_alloc_urb (ctl) failed\n");
1279 goto error; 1205 goto error;
1280 } 1206 }
1281 devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC); 1207 devinfo->bulk_urb = usb_alloc_urb(0, GFP_ATOMIC);
1282 if (!devinfo->bulk_urb) { 1208 if (!devinfo->bulk_urb) {
1283 brcmf_dbg(ERROR, "usb_alloc_urb (bulk) failed\n"); 1209 brcmf_err("usb_alloc_urb (bulk) failed\n");
1284 goto error; 1210 goto error;
1285 } 1211 }
1286 1212
1287 init_waitqueue_head(&devinfo->wait);
1288 if (!brcmf_usb_dlneeded(devinfo)) 1213 if (!brcmf_usb_dlneeded(devinfo))
1289 return &devinfo->bus_pub; 1214 return &devinfo->bus_pub;
1290 1215
1291 brcmf_dbg(TRACE, "start fw downloading\n"); 1216 brcmf_dbg(USB, "Start fw downloading\n");
1292 if (brcmf_usb_get_fw(devinfo)) 1217 if (brcmf_usb_get_fw(devinfo))
1293 goto error; 1218 goto error;
1294 1219
@@ -1298,19 +1223,27 @@ struct brcmf_usbdev *brcmf_usb_attach(struct brcmf_usbdev_info *devinfo,
1298 return &devinfo->bus_pub; 1223 return &devinfo->bus_pub;
1299 1224
1300error: 1225error:
1301 brcmf_dbg(ERROR, "failed!\n"); 1226 brcmf_err("failed!\n");
1302 brcmf_usb_detach(devinfo); 1227 brcmf_usb_detach(devinfo);
1303 return NULL; 1228 return NULL;
1304} 1229}
1305 1230
1306static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo, 1231static struct brcmf_bus_ops brcmf_usb_bus_ops = {
1307 const char *desc, u32 bustype, u32 hdrlen) 1232 .txdata = brcmf_usb_tx,
1233 .init = brcmf_usb_up,
1234 .stop = brcmf_usb_down,
1235 .txctl = brcmf_usb_tx_ctlpkt,
1236 .rxctl = brcmf_usb_rx_ctlpkt,
1237};
1238
1239static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1308{ 1240{
1309 struct brcmf_bus *bus = NULL; 1241 struct brcmf_bus *bus = NULL;
1310 struct brcmf_usbdev *bus_pub = NULL; 1242 struct brcmf_usbdev *bus_pub = NULL;
1311 int ret; 1243 int ret;
1312 struct device *dev = devinfo->dev; 1244 struct device *dev = devinfo->dev;
1313 1245
1246 brcmf_dbg(USB, "Enter\n");
1314 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ); 1247 bus_pub = brcmf_usb_attach(devinfo, BRCMF_USB_NRXQ, BRCMF_USB_NTXQ);
1315 if (!bus_pub) 1248 if (!bus_pub)
1316 return -ENODEV; 1249 return -ENODEV;
@@ -1321,26 +1254,22 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
1321 goto fail; 1254 goto fail;
1322 } 1255 }
1323 1256
1257 bus->dev = dev;
1324 bus_pub->bus = bus; 1258 bus_pub->bus = bus;
1325 bus->brcmf_bus_txdata = brcmf_usb_tx;
1326 bus->brcmf_bus_init = brcmf_usb_up;
1327 bus->brcmf_bus_stop = brcmf_usb_down;
1328 bus->brcmf_bus_txctl = brcmf_usb_tx_ctlpkt;
1329 bus->brcmf_bus_rxctl = brcmf_usb_rx_ctlpkt;
1330 bus->type = bustype;
1331 bus->bus_priv.usb = bus_pub; 1259 bus->bus_priv.usb = bus_pub;
1332 dev_set_drvdata(dev, bus); 1260 dev_set_drvdata(dev, bus);
1261 bus->ops = &brcmf_usb_bus_ops;
1333 1262
1334 /* Attach to the common driver interface */ 1263 /* Attach to the common driver interface */
1335 ret = brcmf_attach(hdrlen, dev); 1264 ret = brcmf_attach(0, dev);
1336 if (ret) { 1265 if (ret) {
1337 brcmf_dbg(ERROR, "dhd_attach failed\n"); 1266 brcmf_err("brcmf_attach failed\n");
1338 goto fail; 1267 goto fail;
1339 } 1268 }
1340 1269
1341 ret = brcmf_bus_start(dev); 1270 ret = brcmf_bus_start(dev);
1342 if (ret) { 1271 if (ret) {
1343 brcmf_dbg(ERROR, "dongle is not responding\n"); 1272 brcmf_err("dongle is not responding\n");
1344 brcmf_detach(dev); 1273 brcmf_detach(dev);
1345 goto fail; 1274 goto fail;
1346 } 1275 }
@@ -1358,7 +1287,7 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
1358{ 1287{
1359 if (!devinfo) 1288 if (!devinfo)
1360 return; 1289 return;
1361 brcmf_dbg(TRACE, "enter: bus_pub %p\n", devinfo); 1290 brcmf_dbg(USB, "Enter, bus_pub %p\n", devinfo);
1362 1291
1363 brcmf_detach(devinfo->dev); 1292 brcmf_detach(devinfo->dev);
1364 kfree(devinfo->bus_pub.bus); 1293 kfree(devinfo->bus_pub.bus);
@@ -1376,7 +1305,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1376 u8 endpoint_num; 1305 u8 endpoint_num;
1377 struct brcmf_usbdev_info *devinfo; 1306 struct brcmf_usbdev_info *devinfo;
1378 1307
1379 brcmf_dbg(TRACE, "enter\n"); 1308 brcmf_dbg(USB, "Enter\n");
1380 1309
1381 devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC); 1310 devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
1382 if (devinfo == NULL) 1311 if (devinfo == NULL)
@@ -1415,7 +1344,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1415 if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC || 1344 if (IFDESC(usb, CONTROL_IF).bInterfaceClass != USB_CLASS_VENDOR_SPEC ||
1416 IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 || 1345 IFDESC(usb, CONTROL_IF).bInterfaceSubClass != 2 ||
1417 IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff) { 1346 IFDESC(usb, CONTROL_IF).bInterfaceProtocol != 0xff) {
1418 brcmf_dbg(ERROR, "invalid control interface: class %d, subclass %d, proto %d\n", 1347 brcmf_err("invalid control interface: class %d, subclass %d, proto %d\n",
1419 IFDESC(usb, CONTROL_IF).bInterfaceClass, 1348 IFDESC(usb, CONTROL_IF).bInterfaceClass,
1420 IFDESC(usb, CONTROL_IF).bInterfaceSubClass, 1349 IFDESC(usb, CONTROL_IF).bInterfaceSubClass,
1421 IFDESC(usb, CONTROL_IF).bInterfaceProtocol); 1350 IFDESC(usb, CONTROL_IF).bInterfaceProtocol);
@@ -1427,7 +1356,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1427 endpoint = &IFEPDESC(usb, CONTROL_IF, 0); 1356 endpoint = &IFEPDESC(usb, CONTROL_IF, 0);
1428 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) 1357 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1429 != USB_ENDPOINT_XFER_INT) { 1358 != USB_ENDPOINT_XFER_INT) {
1430 brcmf_dbg(ERROR, "invalid control endpoint %d\n", 1359 brcmf_err("invalid control endpoint %d\n",
1431 endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK); 1360 endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
1432 ret = -1; 1361 ret = -1;
1433 goto fail; 1362 goto fail;
@@ -1446,7 +1375,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1446 endpoint = &IFEPDESC(usb, BULK_IF, ep); 1375 endpoint = &IFEPDESC(usb, BULK_IF, ep);
1447 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != 1376 if ((endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) !=
1448 USB_ENDPOINT_XFER_BULK) { 1377 USB_ENDPOINT_XFER_BULK) {
1449 brcmf_dbg(ERROR, "invalid data endpoint %d\n", ep); 1378 brcmf_err("invalid data endpoint %d\n", ep);
1450 ret = -1; 1379 ret = -1;
1451 goto fail; 1380 goto fail;
1452 } 1381 }
@@ -1477,11 +1406,11 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1477 devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval; 1406 devinfo->interval = IFEPDESC(usb, CONTROL_IF, 0).bInterval;
1478 1407
1479 if (usb->speed == USB_SPEED_HIGH) 1408 if (usb->speed == USB_SPEED_HIGH)
1480 brcmf_dbg(INFO, "Broadcom high speed USB wireless device detected\n"); 1409 brcmf_dbg(USB, "Broadcom high speed USB wireless device detected\n");
1481 else 1410 else
1482 brcmf_dbg(INFO, "Broadcom full speed USB wireless device detected\n"); 1411 brcmf_dbg(USB, "Broadcom full speed USB wireless device detected\n");
1483 1412
1484 ret = brcmf_usb_probe_cb(devinfo, "", USB_BUS, 0); 1413 ret = brcmf_usb_probe_cb(devinfo);
1485 if (ret) 1414 if (ret)
1486 goto fail; 1415 goto fail;
1487 1416
@@ -1489,7 +1418,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
1489 return 0; 1418 return 0;
1490 1419
1491fail: 1420fail:
1492 brcmf_dbg(ERROR, "failed with errno %d\n", ret); 1421 brcmf_err("failed with errno %d\n", ret);
1493 kfree(devinfo); 1422 kfree(devinfo);
1494 usb_set_intfdata(intf, NULL); 1423 usb_set_intfdata(intf, NULL);
1495 return ret; 1424 return ret;
@@ -1501,40 +1430,55 @@ brcmf_usb_disconnect(struct usb_interface *intf)
1501{ 1430{
1502 struct brcmf_usbdev_info *devinfo; 1431 struct brcmf_usbdev_info *devinfo;
1503 1432
1504 brcmf_dbg(TRACE, "enter\n"); 1433 brcmf_dbg(USB, "Enter\n");
1505 devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf); 1434 devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
1506 brcmf_usb_disconnect_cb(devinfo); 1435 brcmf_usb_disconnect_cb(devinfo);
1507 kfree(devinfo); 1436 kfree(devinfo);
1437 brcmf_dbg(USB, "Exit\n");
1508} 1438}
1509 1439
1510/* 1440/*
1511 * only need to signal the bus being down and update the suspend state. 1441 * only need to signal the bus being down and update the state.
1512 */ 1442 */
1513static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state) 1443static int brcmf_usb_suspend(struct usb_interface *intf, pm_message_t state)
1514{ 1444{
1515 struct usb_device *usb = interface_to_usbdev(intf); 1445 struct usb_device *usb = interface_to_usbdev(intf);
1516 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1446 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1517 1447
1518 brcmf_dbg(TRACE, "enter\n"); 1448 brcmf_dbg(USB, "Enter\n");
1519 devinfo->bus_pub.state = BCMFMAC_USB_STATE_DOWN; 1449 devinfo->bus_pub.state = BRCMFMAC_USB_STATE_SLEEP;
1520 devinfo->suspend_state = USBOS_SUSPEND_STATE_SUSPENDED; 1450 brcmf_detach(&usb->dev);
1521 return 0; 1451 return 0;
1522} 1452}
1523 1453
1524/* 1454/*
1525 * mark suspend state active and crank up the bus. 1455 * (re-) start the bus.
1526 */ 1456 */
1527static int brcmf_usb_resume(struct usb_interface *intf) 1457static int brcmf_usb_resume(struct usb_interface *intf)
1528{ 1458{
1529 struct usb_device *usb = interface_to_usbdev(intf); 1459 struct usb_device *usb = interface_to_usbdev(intf);
1530 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev); 1460 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1531 1461
1532 brcmf_dbg(TRACE, "enter\n"); 1462 brcmf_dbg(USB, "Enter\n");
1533 devinfo->suspend_state = USBOS_SUSPEND_STATE_DEVICE_ACTIVE; 1463 if (!brcmf_attach(0, devinfo->dev))
1534 brcmf_bus_start(&usb->dev); 1464 return brcmf_bus_start(&usb->dev);
1465
1535 return 0; 1466 return 0;
1536} 1467}
1537 1468
1469static int brcmf_usb_reset_resume(struct usb_interface *intf)
1470{
1471 struct usb_device *usb = interface_to_usbdev(intf);
1472 struct brcmf_usbdev_info *devinfo = brcmf_usb_get_businfo(&usb->dev);
1473
1474 brcmf_dbg(USB, "Enter\n");
1475
1476 if (!brcmf_usb_fw_download(devinfo))
1477 return brcmf_usb_resume(intf);
1478
1479 return -EIO;
1480}
1481
1538#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c 1482#define BRCMF_USB_VENDOR_ID_BROADCOM 0x0a5c
1539#define BRCMF_USB_DEVICE_ID_43143 0xbd1e 1483#define BRCMF_USB_DEVICE_ID_43143 0xbd1e
1540#define BRCMF_USB_DEVICE_ID_43236 0xbd17 1484#define BRCMF_USB_DEVICE_ID_43236 0xbd17
@@ -1554,7 +1498,6 @@ MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
1554MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME); 1498MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
1555MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME); 1499MODULE_FIRMWARE(BRCMF_USB_43242_FW_NAME);
1556 1500
1557/* TODO: suspend and resume entries */
1558static struct usb_driver brcmf_usbdrvr = { 1501static struct usb_driver brcmf_usbdrvr = {
1559 .name = KBUILD_MODNAME, 1502 .name = KBUILD_MODNAME,
1560 .probe = brcmf_usb_probe, 1503 .probe = brcmf_usb_probe,
@@ -1562,6 +1505,7 @@ static struct usb_driver brcmf_usbdrvr = {
1562 .id_table = brcmf_usb_devid_table, 1505 .id_table = brcmf_usb_devid_table,
1563 .suspend = brcmf_usb_suspend, 1506 .suspend = brcmf_usb_suspend,
1564 .resume = brcmf_usb_resume, 1507 .resume = brcmf_usb_resume,
1508 .reset_resume = brcmf_usb_reset_resume,
1565 .supports_autosuspend = 1, 1509 .supports_autosuspend = 1,
1566 .disable_hub_initiated_lpm = 1, 1510 .disable_hub_initiated_lpm = 1,
1567}; 1511};
@@ -1579,12 +1523,14 @@ static void brcmf_release_fw(struct list_head *q)
1579 1523
1580void brcmf_usb_exit(void) 1524void brcmf_usb_exit(void)
1581{ 1525{
1526 brcmf_dbg(USB, "Enter\n");
1582 usb_deregister(&brcmf_usbdrvr); 1527 usb_deregister(&brcmf_usbdrvr);
1583 brcmf_release_fw(&fw_image_list); 1528 brcmf_release_fw(&fw_image_list);
1584} 1529}
1585 1530
1586void brcmf_usb_init(void) 1531void brcmf_usb_init(void)
1587{ 1532{
1533 brcmf_dbg(USB, "Enter\n");
1588 INIT_LIST_HEAD(&fw_image_list); 1534 INIT_LIST_HEAD(&fw_image_list);
1589 usb_register(&brcmf_usbdrvr); 1535 usb_register(&brcmf_usbdrvr);
1590} 1536}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.h b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
index acfa5e89872f..f483a8c9945b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.h
@@ -17,19 +17,11 @@
17#define BRCMFMAC_USB_H 17#define BRCMFMAC_USB_H
18 18
19enum brcmf_usb_state { 19enum brcmf_usb_state {
20 BCMFMAC_USB_STATE_DL_PENDING, 20 BRCMFMAC_USB_STATE_DOWN,
21 BCMFMAC_USB_STATE_DL_DONE, 21 BRCMFMAC_USB_STATE_DL_FAIL,
22 BCMFMAC_USB_STATE_UP, 22 BRCMFMAC_USB_STATE_DL_DONE,
23 BCMFMAC_USB_STATE_DOWN, 23 BRCMFMAC_USB_STATE_UP,
24 BCMFMAC_USB_STATE_PNP_FWDL, 24 BRCMFMAC_USB_STATE_SLEEP
25 BCMFMAC_USB_STATE_DISCONNECT,
26 BCMFMAC_USB_STATE_SLEEP
27};
28
29enum brcmf_usb_pnp_state {
30 BCMFMAC_USB_PNP_DISCONNECT,
31 BCMFMAC_USB_PNP_SLEEP,
32 BCMFMAC_USB_PNP_RESUME,
33}; 25};
34 26
35struct brcmf_stats { 27struct brcmf_stats {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 481345c23ded..1261a9b84e04 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -19,14 +19,7 @@
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/if_arp.h>
23#include <linux/sched.h>
24#include <linux/kthread.h>
25#include <linux/netdevice.h>
26#include <linux/bitops.h>
27#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
28#include <linux/ieee80211.h>
29#include <linux/uaccess.h>
30#include <net/cfg80211.h> 23#include <net/cfg80211.h>
31#include <net/netlink.h> 24#include <net/netlink.h>
32 25
@@ -34,7 +27,9 @@
34#include <defs.h> 27#include <defs.h>
35#include <brcmu_wifi.h> 28#include <brcmu_wifi.h>
36#include "dhd.h" 29#include "dhd.h"
30#include "dhd_dbg.h"
37#include "wl_cfg80211.h" 31#include "wl_cfg80211.h"
32#include "fwil.h"
38 33
39#define BRCMF_SCAN_IE_LEN_MAX 2048 34#define BRCMF_SCAN_IE_LEN_MAX 2048
40#define BRCMF_PNO_VERSION 2 35#define BRCMF_PNO_VERSION 2
@@ -48,6 +43,8 @@
48#define BRCMF_PNO_SCAN_COMPLETE 1 43#define BRCMF_PNO_SCAN_COMPLETE 1
49#define BRCMF_PNO_SCAN_INCOMPLETE 0 44#define BRCMF_PNO_SCAN_INCOMPLETE 0
50 45
46#define BRCMF_IFACE_MAX_CNT 2
47
51#define TLV_LEN_OFF 1 /* length offset */ 48#define TLV_LEN_OFF 1 /* length offset */
52#define TLV_HDR_LEN 2 /* header length */ 49#define TLV_HDR_LEN 2 /* header length */
53#define TLV_BODY_OFF 2 /* body offset */ 50#define TLV_BODY_OFF 2 /* body offset */
@@ -91,16 +88,11 @@
91#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ 88#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
92 (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) 89 (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
93 90
94static const u8 ether_bcast[ETH_ALEN] = {255, 255, 255, 255, 255, 255}; 91static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
95
96static u32 brcmf_dbg_level = WL_DBG_ERR;
97
98static bool check_sys_up(struct wiphy *wiphy)
99{ 92{
100 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 93 if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
101 if (!test_bit(WL_STATUS_READY, &cfg->status)) { 94 brcmf_dbg(INFO, "device is not ready : status (%lu)\n",
102 WL_INFO("device is not ready : status (%d)\n", 95 vif->sme_state);
103 (int)cfg->status);
104 return false; 96 return false;
105 } 97 }
106 return true; 98 return true;
@@ -391,55 +383,29 @@ static u8 brcmf_mw_to_qdbm(u16 mw)
391 return qdbm; 383 return qdbm;
392} 384}
393 385
394/* function for reading/writing a single u32 from/to the dongle */ 386static u16 channel_to_chanspec(struct ieee80211_channel *ch)
395static int
396brcmf_exec_dcmd_u32(struct net_device *ndev, u32 cmd, u32 *par)
397{
398 int err;
399 __le32 par_le = cpu_to_le32(*par);
400
401 err = brcmf_exec_dcmd(ndev, cmd, &par_le, sizeof(__le32));
402 *par = le32_to_cpu(par_le);
403
404 return err;
405}
406
407static s32
408brcmf_dev_iovar_setbuf_bsscfg(struct net_device *ndev, s8 *name,
409 void *param, s32 paramlen,
410 void *buf, s32 buflen, s32 bssidx)
411{ 387{
412 s32 err = -ENOMEM; 388 u16 chanspec;
413 u32 len;
414
415 len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
416 buf, buflen, bssidx);
417 BUG_ON(!len);
418 if (len > 0)
419 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
420 if (err)
421 WL_ERR("error (%d)\n", err);
422 389
423 return err; 390 chanspec = ieee80211_frequency_to_channel(ch->center_freq);
424} 391 chanspec &= WL_CHANSPEC_CHAN_MASK;
425 392
426static s32 393 if (ch->band == IEEE80211_BAND_2GHZ)
427brcmf_dev_iovar_getbuf_bsscfg(struct net_device *ndev, s8 *name, 394 chanspec |= WL_CHANSPEC_BAND_2G;
428 void *param, s32 paramlen, 395 else
429 void *buf, s32 buflen, s32 bssidx) 396 chanspec |= WL_CHANSPEC_BAND_5G;
430{
431 s32 err = -ENOMEM;
432 u32 len;
433
434 len = brcmf_c_mkiovar_bsscfg(name, param, paramlen,
435 buf, buflen, bssidx);
436 BUG_ON(!len);
437 if (len > 0)
438 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, buf, len);
439 if (err)
440 WL_ERR("error (%d)\n", err);
441 397
442 return err; 398 if (ch->flags & IEEE80211_CHAN_NO_HT40) {
399 chanspec |= WL_CHANSPEC_BW_20;
400 chanspec |= WL_CHANSPEC_CTL_SB_NONE;
401 } else {
402 chanspec |= WL_CHANSPEC_BW_40;
403 if (ch->flags & IEEE80211_CHAN_NO_HT40PLUS)
404 chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
405 else
406 chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
407 }
408 return chanspec;
443} 409}
444 410
445static void convert_key_from_CPU(struct brcmf_wsec_key *key, 411static void convert_key_from_CPU(struct brcmf_wsec_key *key,
@@ -457,21 +423,20 @@ static void convert_key_from_CPU(struct brcmf_wsec_key *key,
457} 423}
458 424
459static int 425static int
460send_key_to_dongle(struct brcmf_cfg80211_info *cfg, s32 bssidx, 426send_key_to_dongle(struct net_device *ndev, struct brcmf_wsec_key *key)
461 struct net_device *ndev, struct brcmf_wsec_key *key)
462{ 427{
463 int err; 428 int err;
464 struct brcmf_wsec_key_le key_le; 429 struct brcmf_wsec_key_le key_le;
465 430
466 convert_key_from_CPU(key, &key_le); 431 convert_key_from_CPU(key, &key_le);
467 432
468 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le, 433 brcmf_netdev_wait_pend8021x(ndev);
469 sizeof(key_le), 434
470 cfg->extra_buf, 435 err = brcmf_fil_bsscfg_data_set(netdev_priv(ndev), "wsec_key", &key_le,
471 WL_EXTRA_BUF_MAX, bssidx); 436 sizeof(key_le));
472 437
473 if (err) 438 if (err)
474 WL_ERR("wsec_key error (%d)\n", err); 439 brcmf_err("wsec_key error (%d)\n", err);
475 return err; 440 return err;
476} 441}
477 442
@@ -480,29 +445,30 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
480 enum nl80211_iftype type, u32 *flags, 445 enum nl80211_iftype type, u32 *flags,
481 struct vif_params *params) 446 struct vif_params *params)
482{ 447{
483 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 448 struct brcmf_if *ifp = netdev_priv(ndev);
449 struct brcmf_cfg80211_vif *vif = ifp->vif;
484 s32 infra = 0; 450 s32 infra = 0;
485 s32 ap = 0; 451 s32 ap = 0;
486 s32 err = 0; 452 s32 err = 0;
487 453
488 WL_TRACE("Enter, ndev=%p, type=%d\n", ndev, type); 454 brcmf_dbg(TRACE, "Enter, ndev=%p, type=%d\n", ndev, type);
489 455
490 switch (type) { 456 switch (type) {
491 case NL80211_IFTYPE_MONITOR: 457 case NL80211_IFTYPE_MONITOR:
492 case NL80211_IFTYPE_WDS: 458 case NL80211_IFTYPE_WDS:
493 WL_ERR("type (%d) : currently we do not support this type\n", 459 brcmf_err("type (%d) : currently we do not support this type\n",
494 type); 460 type);
495 return -EOPNOTSUPP; 461 return -EOPNOTSUPP;
496 case NL80211_IFTYPE_ADHOC: 462 case NL80211_IFTYPE_ADHOC:
497 cfg->conf->mode = WL_MODE_IBSS; 463 vif->mode = WL_MODE_IBSS;
498 infra = 0; 464 infra = 0;
499 break; 465 break;
500 case NL80211_IFTYPE_STATION: 466 case NL80211_IFTYPE_STATION:
501 cfg->conf->mode = WL_MODE_BSS; 467 vif->mode = WL_MODE_BSS;
502 infra = 1; 468 infra = 1;
503 break; 469 break;
504 case NL80211_IFTYPE_AP: 470 case NL80211_IFTYPE_AP:
505 cfg->conf->mode = WL_MODE_AP; 471 vif->mode = WL_MODE_AP;
506 ap = 1; 472 ap = 1;
507 break; 473 break;
508 default: 474 default:
@@ -511,338 +477,41 @@ brcmf_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev,
511 } 477 }
512 478
513 if (ap) { 479 if (ap) {
514 set_bit(WL_STATUS_AP_CREATING, &cfg->status); 480 set_bit(BRCMF_VIF_STATUS_AP_CREATING, &vif->sme_state);
515 if (!cfg->ap_info) 481 brcmf_dbg(INFO, "IF Type = AP\n");
516 cfg->ap_info = kzalloc(sizeof(*cfg->ap_info),
517 GFP_KERNEL);
518 if (!cfg->ap_info) {
519 err = -ENOMEM;
520 goto done;
521 }
522 WL_INFO("IF Type = AP\n");
523 } else { 482 } else {
524 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &infra); 483 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, infra);
525 if (err) { 484 if (err) {
526 WL_ERR("WLC_SET_INFRA error (%d)\n", err); 485 brcmf_err("WLC_SET_INFRA error (%d)\n", err);
527 err = -EAGAIN; 486 err = -EAGAIN;
528 goto done; 487 goto done;
529 } 488 }
530 WL_INFO("IF Type = %s\n", 489 brcmf_dbg(INFO, "IF Type = %s\n", (vif->mode == WL_MODE_IBSS) ?
531 (cfg->conf->mode == WL_MODE_IBSS) ? 490 "Adhoc" : "Infra");
532 "Adhoc" : "Infra");
533 } 491 }
534 ndev->ieee80211_ptr->iftype = type; 492 ndev->ieee80211_ptr->iftype = type;
535 493
536done: 494done:
537 WL_TRACE("Exit\n"); 495 brcmf_dbg(TRACE, "Exit\n");
538 496
539 return err; 497 return err;
540} 498}
541 499
542static s32 brcmf_dev_intvar_set(struct net_device *ndev, s8 *name, s32 val)
543{
544 s8 buf[BRCMF_DCMD_SMLEN];
545 u32 len;
546 s32 err = 0;
547 __le32 val_le;
548
549 val_le = cpu_to_le32(val);
550 len = brcmf_c_mkiovar(name, (char *)(&val_le), sizeof(val_le), buf,
551 sizeof(buf));
552 BUG_ON(!len);
553
554 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, buf, len);
555 if (err)
556 WL_ERR("error (%d)\n", err);
557
558 return err;
559}
560
561static s32
562brcmf_dev_intvar_get(struct net_device *ndev, s8 *name, s32 *retval)
563{
564 union {
565 s8 buf[BRCMF_DCMD_SMLEN];
566 __le32 val;
567 } var;
568 u32 len;
569 u32 data_null;
570 s32 err = 0;
571
572 len =
573 brcmf_c_mkiovar(name, (char *)(&data_null), 0, (char *)(&var),
574 sizeof(var.buf));
575 BUG_ON(!len);
576 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, &var, len);
577 if (err)
578 WL_ERR("error (%d)\n", err);
579
580 *retval = le32_to_cpu(var.val);
581
582 return err;
583}
584
585static s32
586brcmf_dev_intvar_set_bsscfg(struct net_device *ndev, s8 *name, u32 val,
587 s32 bssidx)
588{
589 s8 buf[BRCMF_DCMD_SMLEN];
590 __le32 val_le;
591
592 val_le = cpu_to_le32(val);
593
594 return brcmf_dev_iovar_setbuf_bsscfg(ndev, name, &val_le,
595 sizeof(val_le), buf, sizeof(buf),
596 bssidx);
597}
598
599static s32
600brcmf_dev_intvar_get_bsscfg(struct net_device *ndev, s8 *name, s32 *val,
601 s32 bssidx)
602{
603 s8 buf[BRCMF_DCMD_SMLEN];
604 s32 err;
605 __le32 val_le;
606
607 memset(buf, 0, sizeof(buf));
608 err = brcmf_dev_iovar_getbuf_bsscfg(ndev, name, val, sizeof(*val), buf,
609 sizeof(buf), bssidx);
610 if (err == 0) {
611 memcpy(&val_le, buf, sizeof(val_le));
612 *val = le32_to_cpu(val_le);
613 }
614 return err;
615}
616
617
618/*
619 * For now brcmf_find_bssidx will return 0. Once p2p gets implemented this
620 * should return the ndev matching bssidx.
621 */
622static s32
623brcmf_find_bssidx(struct brcmf_cfg80211_info *cfg, struct net_device *ndev)
624{
625 return 0;
626}
627
628static void brcmf_set_mpc(struct net_device *ndev, int mpc) 500static void brcmf_set_mpc(struct net_device *ndev, int mpc)
629{ 501{
502 struct brcmf_if *ifp = netdev_priv(ndev);
630 s32 err = 0; 503 s32 err = 0;
631 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
632 504
633 if (test_bit(WL_STATUS_READY, &cfg->status)) { 505 if (check_vif_up(ifp->vif)) {
634 err = brcmf_dev_intvar_set(ndev, "mpc", mpc); 506 err = brcmf_fil_iovar_int_set(ifp, "mpc", mpc);
635 if (err) { 507 if (err) {
636 WL_ERR("fail to set mpc\n"); 508 brcmf_err("fail to set mpc\n");
637 return; 509 return;
638 } 510 }
639 WL_INFO("MPC : %d\n", mpc); 511 brcmf_dbg(INFO, "MPC : %d\n", mpc);
640 }
641}
642
643static void brcmf_iscan_prep(struct brcmf_scan_params_le *params_le,
644 struct brcmf_ssid *ssid)
645{
646 memcpy(params_le->bssid, ether_bcast, ETH_ALEN);
647 params_le->bss_type = DOT11_BSSTYPE_ANY;
648 params_le->scan_type = 0;
649 params_le->channel_num = 0;
650 params_le->nprobes = cpu_to_le32(-1);
651 params_le->active_time = cpu_to_le32(-1);
652 params_le->passive_time = cpu_to_le32(-1);
653 params_le->home_time = cpu_to_le32(-1);
654 if (ssid && ssid->SSID_len) {
655 params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len);
656 memcpy(&params_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len);
657 } 512 }
658} 513}
659 514
660static s32
661brcmf_dev_iovar_setbuf(struct net_device *ndev, s8 * iovar, void *param,
662 s32 paramlen, void *bufptr, s32 buflen)
663{
664 s32 iolen;
665
666 iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
667 BUG_ON(!iolen);
668
669 return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, bufptr, iolen);
670}
671
672static s32
673brcmf_dev_iovar_getbuf(struct net_device *ndev, s8 * iovar, void *param,
674 s32 paramlen, void *bufptr, s32 buflen)
675{
676 s32 iolen;
677
678 iolen = brcmf_c_mkiovar(iovar, param, paramlen, bufptr, buflen);
679 BUG_ON(!iolen);
680
681 return brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, bufptr, buflen);
682}
683
684static s32
685brcmf_run_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan,
686 struct brcmf_ssid *ssid, u16 action)
687{
688 s32 params_size = BRCMF_SCAN_PARAMS_FIXED_SIZE +
689 offsetof(struct brcmf_iscan_params_le, params_le);
690 struct brcmf_iscan_params_le *params;
691 s32 err = 0;
692
693 if (ssid && ssid->SSID_len)
694 params_size += sizeof(struct brcmf_ssid);
695 params = kzalloc(params_size, GFP_KERNEL);
696 if (!params)
697 return -ENOMEM;
698 BUG_ON(params_size >= BRCMF_DCMD_SMLEN);
699
700 brcmf_iscan_prep(&params->params_le, ssid);
701
702 params->version = cpu_to_le32(BRCMF_ISCAN_REQ_VERSION);
703 params->action = cpu_to_le16(action);
704 params->scan_duration = cpu_to_le16(0);
705
706 err = brcmf_dev_iovar_setbuf(iscan->ndev, "iscan", params, params_size,
707 iscan->dcmd_buf, BRCMF_DCMD_SMLEN);
708 if (err) {
709 if (err == -EBUSY)
710 WL_INFO("system busy : iscan canceled\n");
711 else
712 WL_ERR("error (%d)\n", err);
713 }
714
715 kfree(params);
716 return err;
717}
718
719static s32 brcmf_do_iscan(struct brcmf_cfg80211_info *cfg)
720{
721 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
722 struct net_device *ndev = cfg_to_ndev(cfg);
723 struct brcmf_ssid ssid;
724 __le32 passive_scan;
725 s32 err = 0;
726
727 /* Broadcast scan by default */
728 memset(&ssid, 0, sizeof(ssid));
729
730 iscan->state = WL_ISCAN_STATE_SCANING;
731
732 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
733 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_SET_PASSIVE_SCAN,
734 &passive_scan, sizeof(passive_scan));
735 if (err) {
736 WL_ERR("error (%d)\n", err);
737 return err;
738 }
739 brcmf_set_mpc(ndev, 0);
740 cfg->iscan_kickstart = true;
741 err = brcmf_run_iscan(iscan, &ssid, BRCMF_SCAN_ACTION_START);
742 if (err) {
743 brcmf_set_mpc(ndev, 1);
744 cfg->iscan_kickstart = false;
745 return err;
746 }
747 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
748 iscan->timer_on = 1;
749 return err;
750}
751
752static s32
753brcmf_cfg80211_iscan(struct wiphy *wiphy, struct net_device *ndev,
754 struct cfg80211_scan_request *request,
755 struct cfg80211_ssid *this_ssid)
756{
757 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
758 struct cfg80211_ssid *ssids;
759 struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int;
760 __le32 passive_scan;
761 bool iscan_req;
762 bool spec_scan;
763 s32 err = 0;
764 u32 SSID_len;
765
766 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) {
767 WL_ERR("Scanning already : status (%lu)\n", cfg->status);
768 return -EAGAIN;
769 }
770 if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) {
771 WL_ERR("Scanning being aborted : status (%lu)\n",
772 cfg->status);
773 return -EAGAIN;
774 }
775 if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) {
776 WL_ERR("Connecting : status (%lu)\n",
777 cfg->status);
778 return -EAGAIN;
779 }
780
781 iscan_req = false;
782 spec_scan = false;
783 if (request) {
784 /* scan bss */
785 ssids = request->ssids;
786 if (cfg->iscan_on && (!ssids || !ssids->ssid_len))
787 iscan_req = true;
788 } else {
789 /* scan in ibss */
790 /* we don't do iscan in ibss */
791 ssids = this_ssid;
792 }
793
794 cfg->scan_request = request;
795 set_bit(WL_STATUS_SCANNING, &cfg->status);
796 if (iscan_req) {
797 err = brcmf_do_iscan(cfg);
798 if (!err)
799 return err;
800 else
801 goto scan_out;
802 } else {
803 WL_SCAN("ssid \"%s\", ssid_len (%d)\n",
804 ssids->ssid, ssids->ssid_len);
805 memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
806 SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
807 sr->ssid_le.SSID_len = cpu_to_le32(0);
808 if (SSID_len) {
809 memcpy(sr->ssid_le.SSID, ssids->ssid, SSID_len);
810 sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
811 spec_scan = true;
812 } else {
813 WL_SCAN("Broadcast scan\n");
814 }
815
816 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1);
817 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN,
818 &passive_scan, sizeof(passive_scan));
819 if (err) {
820 WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
821 goto scan_out;
822 }
823 brcmf_set_mpc(ndev, 0);
824 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le,
825 sizeof(sr->ssid_le));
826 if (err) {
827 if (err == -EBUSY)
828 WL_INFO("system busy : scan for \"%s\" "
829 "canceled\n", sr->ssid_le.SSID);
830 else
831 WL_ERR("WLC_SCAN error (%d)\n", err);
832
833 brcmf_set_mpc(ndev, 1);
834 goto scan_out;
835 }
836 }
837
838 return 0;
839
840scan_out:
841 clear_bit(WL_STATUS_SCANNING, &cfg->status);
842 cfg->scan_request = NULL;
843 return err;
844}
845
846static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le, 515static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
847 struct cfg80211_scan_request *request) 516 struct cfg80211_scan_request *request)
848{ 517{
@@ -851,12 +520,10 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
851 s32 i; 520 s32 i;
852 s32 offset; 521 s32 offset;
853 u16 chanspec; 522 u16 chanspec;
854 u16 channel;
855 struct ieee80211_channel *req_channel;
856 char *ptr; 523 char *ptr;
857 struct brcmf_ssid_le ssid_le; 524 struct brcmf_ssid_le ssid_le;
858 525
859 memcpy(params_le->bssid, ether_bcast, ETH_ALEN); 526 memset(params_le->bssid, 0xFF, ETH_ALEN);
860 params_le->bss_type = DOT11_BSSTYPE_ANY; 527 params_le->bss_type = DOT11_BSSTYPE_ANY;
861 params_le->scan_type = 0; 528 params_le->scan_type = 0;
862 params_le->channel_num = 0; 529 params_le->channel_num = 0;
@@ -873,40 +540,20 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
873 n_ssids = request->n_ssids; 540 n_ssids = request->n_ssids;
874 n_channels = request->n_channels; 541 n_channels = request->n_channels;
875 /* Copy channel array if applicable */ 542 /* Copy channel array if applicable */
876 WL_SCAN("### List of channelspecs to scan ### %d\n", n_channels); 543 brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
544 n_channels);
877 if (n_channels > 0) { 545 if (n_channels > 0) {
878 for (i = 0; i < n_channels; i++) { 546 for (i = 0; i < n_channels; i++) {
879 chanspec = 0; 547 chanspec = channel_to_chanspec(request->channels[i]);
880 req_channel = request->channels[i]; 548 brcmf_dbg(SCAN, "Chan : %d, Channel spec: %x\n",
881 channel = ieee80211_frequency_to_channel( 549 request->channels[i]->hw_value, chanspec);
882 req_channel->center_freq);
883 if (req_channel->band == IEEE80211_BAND_2GHZ)
884 chanspec |= WL_CHANSPEC_BAND_2G;
885 else
886 chanspec |= WL_CHANSPEC_BAND_5G;
887
888 if (req_channel->flags & IEEE80211_CHAN_NO_HT40) {
889 chanspec |= WL_CHANSPEC_BW_20;
890 chanspec |= WL_CHANSPEC_CTL_SB_NONE;
891 } else {
892 chanspec |= WL_CHANSPEC_BW_40;
893 if (req_channel->flags &
894 IEEE80211_CHAN_NO_HT40PLUS)
895 chanspec |= WL_CHANSPEC_CTL_SB_LOWER;
896 else
897 chanspec |= WL_CHANSPEC_CTL_SB_UPPER;
898 }
899
900 chanspec |= (channel & WL_CHANSPEC_CHAN_MASK);
901 WL_SCAN("Chan : %d, Channel spec: %x\n",
902 channel, chanspec);
903 params_le->channel_list[i] = cpu_to_le16(chanspec); 550 params_le->channel_list[i] = cpu_to_le16(chanspec);
904 } 551 }
905 } else { 552 } else {
906 WL_SCAN("Scanning all channels\n"); 553 brcmf_dbg(SCAN, "Scanning all channels\n");
907 } 554 }
908 /* Copy ssid array if applicable */ 555 /* Copy ssid array if applicable */
909 WL_SCAN("### List of SSIDs to scan ### %d\n", n_ssids); 556 brcmf_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids);
910 if (n_ssids > 0) { 557 if (n_ssids > 0) {
911 offset = offsetof(struct brcmf_scan_params_le, channel_list) + 558 offset = offsetof(struct brcmf_scan_params_le, channel_list) +
912 n_channels * sizeof(u16); 559 n_channels * sizeof(u16);
@@ -919,18 +566,19 @@ static void brcmf_escan_prep(struct brcmf_scan_params_le *params_le,
919 memcpy(ssid_le.SSID, request->ssids[i].ssid, 566 memcpy(ssid_le.SSID, request->ssids[i].ssid,
920 request->ssids[i].ssid_len); 567 request->ssids[i].ssid_len);
921 if (!ssid_le.SSID_len) 568 if (!ssid_le.SSID_len)
922 WL_SCAN("%d: Broadcast scan\n", i); 569 brcmf_dbg(SCAN, "%d: Broadcast scan\n", i);
923 else 570 else
924 WL_SCAN("%d: scan for %s size =%d\n", i, 571 brcmf_dbg(SCAN, "%d: scan for %s size =%d\n",
925 ssid_le.SSID, ssid_le.SSID_len); 572 i, ssid_le.SSID, ssid_le.SSID_len);
926 memcpy(ptr, &ssid_le, sizeof(ssid_le)); 573 memcpy(ptr, &ssid_le, sizeof(ssid_le));
927 ptr += sizeof(ssid_le); 574 ptr += sizeof(ssid_le);
928 } 575 }
929 } else { 576 } else {
930 WL_SCAN("Broadcast scan %p\n", request->ssids); 577 brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
931 if ((request->ssids) && request->ssids->ssid_len) { 578 if ((request->ssids) && request->ssids->ssid_len) {
932 WL_SCAN("SSID %s len=%d\n", params_le->ssid_le.SSID, 579 brcmf_dbg(SCAN, "SSID %s len=%d\n",
933 request->ssids->ssid_len); 580 params_le->ssid_le.SSID,
581 request->ssids->ssid_len);
934 params_le->ssid_le.SSID_len = 582 params_le->ssid_le.SSID_len =
935 cpu_to_le32(request->ssids->ssid_len); 583 cpu_to_le32(request->ssids->ssid_len);
936 memcpy(&params_le->ssid_le.SSID, request->ssids->ssid, 584 memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
@@ -952,7 +600,7 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
952 struct cfg80211_scan_request *scan_request; 600 struct cfg80211_scan_request *scan_request;
953 s32 err = 0; 601 s32 err = 0;
954 602
955 WL_SCAN("Enter\n"); 603 brcmf_dbg(SCAN, "Enter\n");
956 604
957 /* clear scan request, because the FW abort can cause a second call */ 605 /* clear scan request, because the FW abort can cause a second call */
958 /* to this functon and might cause a double cfg80211_scan_done */ 606 /* to this functon and might cause a double cfg80211_scan_done */
@@ -964,9 +612,9 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
964 612
965 if (fw_abort) { 613 if (fw_abort) {
966 /* Do a scan abort to stop the driver's scan engine */ 614 /* Do a scan abort to stop the driver's scan engine */
967 WL_SCAN("ABORT scan in firmware\n"); 615 brcmf_dbg(SCAN, "ABORT scan in firmware\n");
968 memset(&params_le, 0, sizeof(params_le)); 616 memset(&params_le, 0, sizeof(params_le));
969 memcpy(params_le.bssid, ether_bcast, ETH_ALEN); 617 memset(params_le.bssid, 0xFF, ETH_ALEN);
970 params_le.bss_type = DOT11_BSSTYPE_ANY; 618 params_le.bss_type = DOT11_BSSTYPE_ANY;
971 params_le.scan_type = 0; 619 params_le.scan_type = 0;
972 params_le.channel_num = cpu_to_le32(1); 620 params_le.channel_num = cpu_to_le32(1);
@@ -977,29 +625,29 @@ brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
977 /* Scan is aborted by setting channel_list[0] to -1 */ 625 /* Scan is aborted by setting channel_list[0] to -1 */
978 params_le.channel_list[0] = cpu_to_le16(-1); 626 params_le.channel_list[0] = cpu_to_le16(-1);
979 /* E-Scan (or anyother type) can be aborted by SCAN */ 627 /* E-Scan (or anyother type) can be aborted by SCAN */
980 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &params_le, 628 err = brcmf_fil_cmd_data_set(netdev_priv(ndev), BRCMF_C_SCAN,
981 sizeof(params_le)); 629 &params_le, sizeof(params_le));
982 if (err) 630 if (err)
983 WL_ERR("Scan abort failed\n"); 631 brcmf_err("Scan abort failed\n");
984 } 632 }
985 /* 633 /*
986 * e-scan can be initiated by scheduled scan 634 * e-scan can be initiated by scheduled scan
987 * which takes precedence. 635 * which takes precedence.
988 */ 636 */
989 if (cfg->sched_escan) { 637 if (cfg->sched_escan) {
990 WL_SCAN("scheduled scan completed\n"); 638 brcmf_dbg(SCAN, "scheduled scan completed\n");
991 cfg->sched_escan = false; 639 cfg->sched_escan = false;
992 if (!aborted) 640 if (!aborted)
993 cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); 641 cfg80211_sched_scan_results(cfg_to_wiphy(cfg));
994 brcmf_set_mpc(ndev, 1); 642 brcmf_set_mpc(ndev, 1);
995 } else if (scan_request) { 643 } else if (scan_request) {
996 WL_SCAN("ESCAN Completed scan: %s\n", 644 brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n",
997 aborted ? "Aborted" : "Done"); 645 aborted ? "Aborted" : "Done");
998 cfg80211_scan_done(scan_request, aborted); 646 cfg80211_scan_done(scan_request, aborted);
999 brcmf_set_mpc(ndev, 1); 647 brcmf_set_mpc(ndev, 1);
1000 } 648 }
1001 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) { 649 if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
1002 WL_ERR("Scan complete while device not scanning\n"); 650 brcmf_err("Scan complete while device not scanning\n");
1003 return -EPERM; 651 return -EPERM;
1004 } 652 }
1005 653
@@ -1015,7 +663,7 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
1015 struct brcmf_escan_params_le *params; 663 struct brcmf_escan_params_le *params;
1016 s32 err = 0; 664 s32 err = 0;
1017 665
1018 WL_SCAN("E-SCAN START\n"); 666 brcmf_dbg(SCAN, "E-SCAN START\n");
1019 667
1020 if (request != NULL) { 668 if (request != NULL) {
1021 /* Allocate space for populating ssids in struct */ 669 /* Allocate space for populating ssids in struct */
@@ -1036,13 +684,13 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct net_device *ndev,
1036 params->action = cpu_to_le16(action); 684 params->action = cpu_to_le16(action);
1037 params->sync_id = cpu_to_le16(0x1234); 685 params->sync_id = cpu_to_le16(0x1234);
1038 686
1039 err = brcmf_dev_iovar_setbuf(ndev, "escan", params, params_size, 687 err = brcmf_fil_iovar_data_set(netdev_priv(ndev), "escan",
1040 cfg->escan_ioctl_buf, BRCMF_DCMD_MEDLEN); 688 params, params_size);
1041 if (err) { 689 if (err) {
1042 if (err == -EBUSY) 690 if (err == -EBUSY)
1043 WL_INFO("system busy : escan canceled\n"); 691 brcmf_dbg(INFO, "system busy : escan canceled\n");
1044 else 692 else
1045 WL_ERR("error (%d)\n", err); 693 brcmf_err("error (%d)\n", err);
1046 } 694 }
1047 695
1048 kfree(params); 696 kfree(params);
@@ -1055,18 +703,18 @@ brcmf_do_escan(struct brcmf_cfg80211_info *cfg, struct wiphy *wiphy,
1055 struct net_device *ndev, struct cfg80211_scan_request *request) 703 struct net_device *ndev, struct cfg80211_scan_request *request)
1056{ 704{
1057 s32 err; 705 s32 err;
1058 __le32 passive_scan; 706 u32 passive_scan;
1059 struct brcmf_scan_results *results; 707 struct brcmf_scan_results *results;
1060 708
1061 WL_SCAN("Enter\n"); 709 brcmf_dbg(SCAN, "Enter\n");
1062 cfg->escan_info.ndev = ndev; 710 cfg->escan_info.ndev = ndev;
1063 cfg->escan_info.wiphy = wiphy; 711 cfg->escan_info.wiphy = wiphy;
1064 cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING; 712 cfg->escan_info.escan_state = WL_ESCAN_STATE_SCANNING;
1065 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1); 713 passive_scan = cfg->active_scan ? 0 : 1;
1066 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, 714 err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PASSIVE_SCAN,
1067 &passive_scan, sizeof(passive_scan)); 715 passive_scan);
1068 if (err) { 716 if (err) {
1069 WL_ERR("error (%d)\n", err); 717 brcmf_err("error (%d)\n", err);
1070 return err; 718 return err;
1071 } 719 }
1072 brcmf_set_mpc(ndev, 0); 720 brcmf_set_mpc(ndev, 0);
@@ -1086,29 +734,29 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
1086 struct cfg80211_scan_request *request, 734 struct cfg80211_scan_request *request,
1087 struct cfg80211_ssid *this_ssid) 735 struct cfg80211_ssid *this_ssid)
1088{ 736{
737 struct brcmf_if *ifp = netdev_priv(ndev);
1089 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev); 738 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1090 struct cfg80211_ssid *ssids; 739 struct cfg80211_ssid *ssids;
1091 struct brcmf_cfg80211_scan_req *sr = cfg->scan_req_int; 740 struct brcmf_cfg80211_scan_req *sr = &cfg->scan_req_int;
1092 __le32 passive_scan; 741 u32 passive_scan;
1093 bool escan_req; 742 bool escan_req;
1094 bool spec_scan; 743 bool spec_scan;
1095 s32 err; 744 s32 err;
1096 u32 SSID_len; 745 u32 SSID_len;
1097 746
1098 WL_SCAN("START ESCAN\n"); 747 brcmf_dbg(SCAN, "START ESCAN\n");
1099 748
1100 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) { 749 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
1101 WL_ERR("Scanning already : status (%lu)\n", cfg->status); 750 brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
1102 return -EAGAIN; 751 return -EAGAIN;
1103 } 752 }
1104 if (test_bit(WL_STATUS_SCAN_ABORTING, &cfg->status)) { 753 if (test_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status)) {
1105 WL_ERR("Scanning being aborted : status (%lu)\n", 754 brcmf_err("Scanning being aborted: status (%lu)\n",
1106 cfg->status); 755 cfg->scan_status);
1107 return -EAGAIN; 756 return -EAGAIN;
1108 } 757 }
1109 if (test_bit(WL_STATUS_CONNECTING, &cfg->status)) { 758 if (test_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) {
1110 WL_ERR("Connecting : status (%lu)\n", 759 brcmf_err("Connecting: status (%lu)\n", ifp->vif->sme_state);
1111 cfg->status);
1112 return -EAGAIN; 760 return -EAGAIN;
1113 } 761 }
1114 762
@@ -1128,16 +776,14 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
1128 } 776 }
1129 777
1130 cfg->scan_request = request; 778 cfg->scan_request = request;
1131 set_bit(WL_STATUS_SCANNING, &cfg->status); 779 set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
1132 if (escan_req) { 780 if (escan_req) {
1133 err = brcmf_do_escan(cfg, wiphy, ndev, request); 781 err = brcmf_do_escan(cfg, wiphy, ndev, request);
1134 if (!err) 782 if (err)
1135 return err;
1136 else
1137 goto scan_out; 783 goto scan_out;
1138 } else { 784 } else {
1139 WL_SCAN("ssid \"%s\", ssid_len (%d)\n", 785 brcmf_dbg(SCAN, "ssid \"%s\", ssid_len (%d)\n",
1140 ssids->ssid, ssids->ssid_len); 786 ssids->ssid, ssids->ssid_len);
1141 memset(&sr->ssid_le, 0, sizeof(sr->ssid_le)); 787 memset(&sr->ssid_le, 0, sizeof(sr->ssid_le));
1142 SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len); 788 SSID_len = min_t(u8, sizeof(sr->ssid_le.SSID), ssids->ssid_len);
1143 sr->ssid_le.SSID_len = cpu_to_le32(0); 789 sr->ssid_le.SSID_len = cpu_to_le32(0);
@@ -1147,24 +793,24 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
1147 sr->ssid_le.SSID_len = cpu_to_le32(SSID_len); 793 sr->ssid_le.SSID_len = cpu_to_le32(SSID_len);
1148 spec_scan = true; 794 spec_scan = true;
1149 } else 795 } else
1150 WL_SCAN("Broadcast scan\n"); 796 brcmf_dbg(SCAN, "Broadcast scan\n");
1151 797
1152 passive_scan = cfg->active_scan ? 0 : cpu_to_le32(1); 798 passive_scan = cfg->active_scan ? 0 : 1;
1153 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_PASSIVE_SCAN, 799 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PASSIVE_SCAN,
1154 &passive_scan, sizeof(passive_scan)); 800 passive_scan);
1155 if (err) { 801 if (err) {
1156 WL_ERR("WLC_SET_PASSIVE_SCAN error (%d)\n", err); 802 brcmf_err("WLC_SET_PASSIVE_SCAN error (%d)\n", err);
1157 goto scan_out; 803 goto scan_out;
1158 } 804 }
1159 brcmf_set_mpc(ndev, 0); 805 brcmf_set_mpc(ndev, 0);
1160 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN, &sr->ssid_le, 806 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
1161 sizeof(sr->ssid_le)); 807 &sr->ssid_le, sizeof(sr->ssid_le));
1162 if (err) { 808 if (err) {
1163 if (err == -EBUSY) 809 if (err == -EBUSY)
1164 WL_INFO("BUSY: scan for \"%s\" canceled\n", 810 brcmf_dbg(INFO, "BUSY: scan for \"%s\" canceled\n",
1165 sr->ssid_le.SSID); 811 sr->ssid_le.SSID);
1166 else 812 else
1167 WL_ERR("WLC_SCAN error (%d)\n", err); 813 brcmf_err("WLC_SCAN error (%d)\n", err);
1168 814
1169 brcmf_set_mpc(ndev, 1); 815 brcmf_set_mpc(ndev, 1);
1170 goto scan_out; 816 goto scan_out;
@@ -1174,7 +820,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct net_device *ndev,
1174 return 0; 820 return 0;
1175 821
1176scan_out: 822scan_out:
1177 clear_bit(WL_STATUS_SCANNING, &cfg->status); 823 clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
1178 if (timer_pending(&cfg->escan_timeout)) 824 if (timer_pending(&cfg->escan_timeout))
1179 del_timer_sync(&cfg->escan_timeout); 825 del_timer_sync(&cfg->escan_timeout);
1180 cfg->scan_request = NULL; 826 cfg->scan_request = NULL;
@@ -1182,27 +828,23 @@ scan_out:
1182} 828}
1183 829
1184static s32 830static s32
1185brcmf_cfg80211_scan(struct wiphy *wiphy, 831brcmf_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
1186 struct cfg80211_scan_request *request)
1187{ 832{
1188 struct net_device *ndev = request->wdev->netdev; 833 struct net_device *ndev = request->wdev->netdev;
1189 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
1190 s32 err = 0; 834 s32 err = 0;
1191 835
1192 WL_TRACE("Enter\n"); 836 brcmf_dbg(TRACE, "Enter\n");
1193 837
1194 if (!check_sys_up(wiphy)) 838 if (!check_vif_up(container_of(request->wdev,
839 struct brcmf_cfg80211_vif, wdev)))
1195 return -EIO; 840 return -EIO;
1196 841
1197 if (cfg->iscan_on) 842 err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
1198 err = brcmf_cfg80211_iscan(wiphy, ndev, request, NULL);
1199 else if (cfg->escan_on)
1200 err = brcmf_cfg80211_escan(wiphy, ndev, request, NULL);
1201 843
1202 if (err) 844 if (err)
1203 WL_ERR("scan error (%d)\n", err); 845 brcmf_err("scan error (%d)\n", err);
1204 846
1205 WL_TRACE("Exit\n"); 847 brcmf_dbg(TRACE, "Exit\n");
1206 return err; 848 return err;
1207} 849}
1208 850
@@ -1210,9 +852,10 @@ static s32 brcmf_set_rts(struct net_device *ndev, u32 rts_threshold)
1210{ 852{
1211 s32 err = 0; 853 s32 err = 0;
1212 854
1213 err = brcmf_dev_intvar_set(ndev, "rtsthresh", rts_threshold); 855 err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "rtsthresh",
856 rts_threshold);
1214 if (err) 857 if (err)
1215 WL_ERR("Error (%d)\n", err); 858 brcmf_err("Error (%d)\n", err);
1216 859
1217 return err; 860 return err;
1218} 861}
@@ -1221,9 +864,10 @@ static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold)
1221{ 864{
1222 s32 err = 0; 865 s32 err = 0;
1223 866
1224 err = brcmf_dev_intvar_set(ndev, "fragthresh", frag_threshold); 867 err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "fragthresh",
868 frag_threshold);
1225 if (err) 869 if (err)
1226 WL_ERR("Error (%d)\n", err); 870 brcmf_err("Error (%d)\n", err);
1227 871
1228 return err; 872 return err;
1229} 873}
@@ -1231,11 +875,11 @@ static s32 brcmf_set_frag(struct net_device *ndev, u32 frag_threshold)
1231static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l) 875static s32 brcmf_set_retry(struct net_device *ndev, u32 retry, bool l)
1232{ 876{
1233 s32 err = 0; 877 s32 err = 0;
1234 u32 cmd = (l ? BRCM_SET_LRL : BRCM_SET_SRL); 878 u32 cmd = (l ? BRCMF_C_SET_LRL : BRCMF_C_SET_SRL);
1235 879
1236 err = brcmf_exec_dcmd_u32(ndev, cmd, &retry); 880 err = brcmf_fil_cmd_int_set(netdev_priv(ndev), cmd, retry);
1237 if (err) { 881 if (err) {
1238 WL_ERR("cmd (%d) , error (%d)\n", cmd, err); 882 brcmf_err("cmd (%d) , error (%d)\n", cmd, err);
1239 return err; 883 return err;
1240 } 884 }
1241 return err; 885 return err;
@@ -1245,10 +889,11 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1245{ 889{
1246 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 890 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1247 struct net_device *ndev = cfg_to_ndev(cfg); 891 struct net_device *ndev = cfg_to_ndev(cfg);
892 struct brcmf_if *ifp = netdev_priv(ndev);
1248 s32 err = 0; 893 s32 err = 0;
1249 894
1250 WL_TRACE("Enter\n"); 895 brcmf_dbg(TRACE, "Enter\n");
1251 if (!check_sys_up(wiphy)) 896 if (!check_vif_up(ifp->vif))
1252 return -EIO; 897 return -EIO;
1253 898
1254 if (changed & WIPHY_PARAM_RTS_THRESHOLD && 899 if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
@@ -1281,7 +926,7 @@ static s32 brcmf_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1281 } 926 }
1282 927
1283done: 928done:
1284 WL_TRACE("Exit\n"); 929 brcmf_dbg(TRACE, "Exit\n");
1285 return err; 930 return err;
1286} 931}
1287 932
@@ -1311,28 +956,26 @@ static void brcmf_ch_to_chanspec(int ch, struct brcmf_join_params *join_params,
1311 join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec); 956 join_params->params_le.chanspec_list[0] = cpu_to_le16(chanspec);
1312 join_params->params_le.chanspec_num = cpu_to_le32(1); 957 join_params->params_le.chanspec_num = cpu_to_le32(1);
1313 958
1314 WL_CONN("join_params->params.chanspec_list[0]= %#X," 959 brcmf_dbg(CONN, "channel %d, chanspec %#X\n", ch, chanspec);
1315 "channel %d, chanspec %#X\n",
1316 chanspec, ch, chanspec);
1317 } 960 }
1318} 961}
1319 962
1320static void brcmf_link_down(struct brcmf_cfg80211_info *cfg) 963static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
1321{ 964{
1322 struct net_device *ndev = NULL;
1323 s32 err = 0; 965 s32 err = 0;
1324 966
1325 WL_TRACE("Enter\n"); 967 brcmf_dbg(TRACE, "Enter\n");
1326 968
1327 if (cfg->link_up) { 969 if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state)) {
1328 ndev = cfg_to_ndev(cfg); 970 brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
1329 WL_INFO("Call WLC_DISASSOC to stop excess roaming\n "); 971 err = brcmf_fil_cmd_data_set(vif->ifp,
1330 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, NULL, 0); 972 BRCMF_C_DISASSOC, NULL, 0);
1331 if (err) 973 if (err)
1332 WL_ERR("WLC_DISASSOC failed (%d)\n", err); 974 brcmf_err("WLC_DISASSOC failed (%d)\n", err);
1333 cfg->link_up = false; 975 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
1334 } 976 }
1335 WL_TRACE("Exit\n"); 977 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
978 brcmf_dbg(TRACE, "Exit\n");
1336} 979}
1337 980
1338static s32 981static s32
@@ -1340,68 +983,71 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1340 struct cfg80211_ibss_params *params) 983 struct cfg80211_ibss_params *params)
1341{ 984{
1342 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 985 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1343 struct brcmf_cfg80211_profile *profile = cfg->profile; 986 struct brcmf_if *ifp = netdev_priv(ndev);
987 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
1344 struct brcmf_join_params join_params; 988 struct brcmf_join_params join_params;
1345 size_t join_params_size = 0; 989 size_t join_params_size = 0;
1346 s32 err = 0; 990 s32 err = 0;
1347 s32 wsec = 0; 991 s32 wsec = 0;
1348 s32 bcnprd; 992 s32 bcnprd;
1349 993
1350 WL_TRACE("Enter\n"); 994 brcmf_dbg(TRACE, "Enter\n");
1351 if (!check_sys_up(wiphy)) 995 if (!check_vif_up(ifp->vif))
1352 return -EIO; 996 return -EIO;
1353 997
1354 if (params->ssid) 998 if (params->ssid)
1355 WL_CONN("SSID: %s\n", params->ssid); 999 brcmf_dbg(CONN, "SSID: %s\n", params->ssid);
1356 else { 1000 else {
1357 WL_CONN("SSID: NULL, Not supported\n"); 1001 brcmf_dbg(CONN, "SSID: NULL, Not supported\n");
1358 return -EOPNOTSUPP; 1002 return -EOPNOTSUPP;
1359 } 1003 }
1360 1004
1361 set_bit(WL_STATUS_CONNECTING, &cfg->status); 1005 set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
1362 1006
1363 if (params->bssid) 1007 if (params->bssid)
1364 WL_CONN("BSSID: %pM\n", params->bssid); 1008 brcmf_dbg(CONN, "BSSID: %pM\n", params->bssid);
1365 else 1009 else
1366 WL_CONN("No BSSID specified\n"); 1010 brcmf_dbg(CONN, "No BSSID specified\n");
1367 1011
1368 if (params->channel) 1012 if (params->chandef.chan)
1369 WL_CONN("channel: %d\n", params->channel->center_freq); 1013 brcmf_dbg(CONN, "channel: %d\n",
1014 params->chandef.chan->center_freq);
1370 else 1015 else
1371 WL_CONN("no channel specified\n"); 1016 brcmf_dbg(CONN, "no channel specified\n");
1372 1017
1373 if (params->channel_fixed) 1018 if (params->channel_fixed)
1374 WL_CONN("fixed channel required\n"); 1019 brcmf_dbg(CONN, "fixed channel required\n");
1375 else 1020 else
1376 WL_CONN("no fixed channel required\n"); 1021 brcmf_dbg(CONN, "no fixed channel required\n");
1377 1022
1378 if (params->ie && params->ie_len) 1023 if (params->ie && params->ie_len)
1379 WL_CONN("ie len: %d\n", params->ie_len); 1024 brcmf_dbg(CONN, "ie len: %d\n", params->ie_len);
1380 else 1025 else
1381 WL_CONN("no ie specified\n"); 1026 brcmf_dbg(CONN, "no ie specified\n");
1382 1027
1383 if (params->beacon_interval) 1028 if (params->beacon_interval)
1384 WL_CONN("beacon interval: %d\n", params->beacon_interval); 1029 brcmf_dbg(CONN, "beacon interval: %d\n",
1030 params->beacon_interval);
1385 else 1031 else
1386 WL_CONN("no beacon interval specified\n"); 1032 brcmf_dbg(CONN, "no beacon interval specified\n");
1387 1033
1388 if (params->basic_rates) 1034 if (params->basic_rates)
1389 WL_CONN("basic rates: %08X\n", params->basic_rates); 1035 brcmf_dbg(CONN, "basic rates: %08X\n", params->basic_rates);
1390 else 1036 else
1391 WL_CONN("no basic rates specified\n"); 1037 brcmf_dbg(CONN, "no basic rates specified\n");
1392 1038
1393 if (params->privacy) 1039 if (params->privacy)
1394 WL_CONN("privacy required\n"); 1040 brcmf_dbg(CONN, "privacy required\n");
1395 else 1041 else
1396 WL_CONN("no privacy required\n"); 1042 brcmf_dbg(CONN, "no privacy required\n");
1397 1043
1398 /* Configure Privacy for starter */ 1044 /* Configure Privacy for starter */
1399 if (params->privacy) 1045 if (params->privacy)
1400 wsec |= WEP_ENABLED; 1046 wsec |= WEP_ENABLED;
1401 1047
1402 err = brcmf_dev_intvar_set(ndev, "wsec", wsec); 1048 err = brcmf_fil_iovar_int_set(ifp, "wsec", wsec);
1403 if (err) { 1049 if (err) {
1404 WL_ERR("wsec failed (%d)\n", err); 1050 brcmf_err("wsec failed (%d)\n", err);
1405 goto done; 1051 goto done;
1406 } 1052 }
1407 1053
@@ -1411,9 +1057,9 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1411 else 1057 else
1412 bcnprd = 100; 1058 bcnprd = 100;
1413 1059
1414 err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_BCNPRD, &bcnprd); 1060 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD, bcnprd);
1415 if (err) { 1061 if (err) {
1416 WL_ERR("WLC_SET_BCNPRD failed (%d)\n", err); 1062 brcmf_err("WLC_SET_BCNPRD failed (%d)\n", err);
1417 goto done; 1063 goto done;
1418 } 1064 }
1419 1065
@@ -1434,17 +1080,17 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1434 BRCMF_ASSOC_PARAMS_FIXED_SIZE; 1080 BRCMF_ASSOC_PARAMS_FIXED_SIZE;
1435 memcpy(profile->bssid, params->bssid, ETH_ALEN); 1081 memcpy(profile->bssid, params->bssid, ETH_ALEN);
1436 } else { 1082 } else {
1437 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); 1083 memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
1438 memset(profile->bssid, 0, ETH_ALEN); 1084 memset(profile->bssid, 0, ETH_ALEN);
1439 } 1085 }
1440 1086
1441 /* Channel */ 1087 /* Channel */
1442 if (params->channel) { 1088 if (params->chandef.chan) {
1443 u32 target_channel; 1089 u32 target_channel;
1444 1090
1445 cfg->channel = 1091 cfg->channel =
1446 ieee80211_frequency_to_channel( 1092 ieee80211_frequency_to_channel(
1447 params->channel->center_freq); 1093 params->chandef.chan->center_freq);
1448 if (params->channel_fixed) { 1094 if (params->channel_fixed) {
1449 /* adding chanspec */ 1095 /* adding chanspec */
1450 brcmf_ch_to_chanspec(cfg->channel, 1096 brcmf_ch_to_chanspec(cfg->channel,
@@ -1453,10 +1099,10 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1453 1099
1454 /* set channel for starter */ 1100 /* set channel for starter */
1455 target_channel = cfg->channel; 1101 target_channel = cfg->channel;
1456 err = brcmf_exec_dcmd_u32(ndev, BRCM_SET_CHANNEL, 1102 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_CHANNEL,
1457 &target_channel); 1103 target_channel);
1458 if (err) { 1104 if (err) {
1459 WL_ERR("WLC_SET_CHANNEL failed (%d)\n", err); 1105 brcmf_err("WLC_SET_CHANNEL failed (%d)\n", err);
1460 goto done; 1106 goto done;
1461 } 1107 }
1462 } else 1108 } else
@@ -1465,33 +1111,33 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
1465 cfg->ibss_starter = false; 1111 cfg->ibss_starter = false;
1466 1112
1467 1113
1468 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, 1114 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
1469 &join_params, join_params_size); 1115 &join_params, join_params_size);
1470 if (err) { 1116 if (err) {
1471 WL_ERR("WLC_SET_SSID failed (%d)\n", err); 1117 brcmf_err("WLC_SET_SSID failed (%d)\n", err);
1472 goto done; 1118 goto done;
1473 } 1119 }
1474 1120
1475done: 1121done:
1476 if (err) 1122 if (err)
1477 clear_bit(WL_STATUS_CONNECTING, &cfg->status); 1123 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
1478 WL_TRACE("Exit\n"); 1124 brcmf_dbg(TRACE, "Exit\n");
1479 return err; 1125 return err;
1480} 1126}
1481 1127
1482static s32 1128static s32
1483brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) 1129brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
1484{ 1130{
1485 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1131 struct brcmf_if *ifp = netdev_priv(ndev);
1486 s32 err = 0; 1132 s32 err = 0;
1487 1133
1488 WL_TRACE("Enter\n"); 1134 brcmf_dbg(TRACE, "Enter\n");
1489 if (!check_sys_up(wiphy)) 1135 if (!check_vif_up(ifp->vif))
1490 return -EIO; 1136 return -EIO;
1491 1137
1492 brcmf_link_down(cfg); 1138 brcmf_link_down(ifp->vif);
1493 1139
1494 WL_TRACE("Exit\n"); 1140 brcmf_dbg(TRACE, "Exit\n");
1495 1141
1496 return err; 1142 return err;
1497} 1143}
@@ -1499,8 +1145,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
1499static s32 brcmf_set_wpa_version(struct net_device *ndev, 1145static s32 brcmf_set_wpa_version(struct net_device *ndev,
1500 struct cfg80211_connect_params *sme) 1146 struct cfg80211_connect_params *sme)
1501{ 1147{
1502 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev); 1148 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
1503 struct brcmf_cfg80211_profile *profile = cfg->profile;
1504 struct brcmf_cfg80211_security *sec; 1149 struct brcmf_cfg80211_security *sec;
1505 s32 val = 0; 1150 s32 val = 0;
1506 s32 err = 0; 1151 s32 err = 0;
@@ -1511,10 +1156,10 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
1511 val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; 1156 val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED;
1512 else 1157 else
1513 val = WPA_AUTH_DISABLED; 1158 val = WPA_AUTH_DISABLED;
1514 WL_CONN("setting wpa_auth to 0x%0x\n", val); 1159 brcmf_dbg(CONN, "setting wpa_auth to 0x%0x\n", val);
1515 err = brcmf_dev_intvar_set(ndev, "wpa_auth", val); 1160 err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wpa_auth", val);
1516 if (err) { 1161 if (err) {
1517 WL_ERR("set wpa_auth failed (%d)\n", err); 1162 brcmf_err("set wpa_auth failed (%d)\n", err);
1518 return err; 1163 return err;
1519 } 1164 }
1520 sec = &profile->sec; 1165 sec = &profile->sec;
@@ -1525,8 +1170,7 @@ static s32 brcmf_set_wpa_version(struct net_device *ndev,
1525static s32 brcmf_set_auth_type(struct net_device *ndev, 1170static s32 brcmf_set_auth_type(struct net_device *ndev,
1526 struct cfg80211_connect_params *sme) 1171 struct cfg80211_connect_params *sme)
1527{ 1172{
1528 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev); 1173 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
1529 struct brcmf_cfg80211_profile *profile = cfg->profile;
1530 struct brcmf_cfg80211_security *sec; 1174 struct brcmf_cfg80211_security *sec;
1531 s32 val = 0; 1175 s32 val = 0;
1532 s32 err = 0; 1176 s32 err = 0;
@@ -1534,27 +1178,27 @@ static s32 brcmf_set_auth_type(struct net_device *ndev,
1534 switch (sme->auth_type) { 1178 switch (sme->auth_type) {
1535 case NL80211_AUTHTYPE_OPEN_SYSTEM: 1179 case NL80211_AUTHTYPE_OPEN_SYSTEM:
1536 val = 0; 1180 val = 0;
1537 WL_CONN("open system\n"); 1181 brcmf_dbg(CONN, "open system\n");
1538 break; 1182 break;
1539 case NL80211_AUTHTYPE_SHARED_KEY: 1183 case NL80211_AUTHTYPE_SHARED_KEY:
1540 val = 1; 1184 val = 1;
1541 WL_CONN("shared key\n"); 1185 brcmf_dbg(CONN, "shared key\n");
1542 break; 1186 break;
1543 case NL80211_AUTHTYPE_AUTOMATIC: 1187 case NL80211_AUTHTYPE_AUTOMATIC:
1544 val = 2; 1188 val = 2;
1545 WL_CONN("automatic\n"); 1189 brcmf_dbg(CONN, "automatic\n");
1546 break; 1190 break;
1547 case NL80211_AUTHTYPE_NETWORK_EAP: 1191 case NL80211_AUTHTYPE_NETWORK_EAP:
1548 WL_CONN("network eap\n"); 1192 brcmf_dbg(CONN, "network eap\n");
1549 default: 1193 default:
1550 val = 2; 1194 val = 2;
1551 WL_ERR("invalid auth type (%d)\n", sme->auth_type); 1195 brcmf_err("invalid auth type (%d)\n", sme->auth_type);
1552 break; 1196 break;
1553 } 1197 }
1554 1198
1555 err = brcmf_dev_intvar_set(ndev, "auth", val); 1199 err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "auth", val);
1556 if (err) { 1200 if (err) {
1557 WL_ERR("set auth failed (%d)\n", err); 1201 brcmf_err("set auth failed (%d)\n", err);
1558 return err; 1202 return err;
1559 } 1203 }
1560 sec = &profile->sec; 1204 sec = &profile->sec;
@@ -1566,8 +1210,7 @@ static s32
1566brcmf_set_set_cipher(struct net_device *ndev, 1210brcmf_set_set_cipher(struct net_device *ndev,
1567 struct cfg80211_connect_params *sme) 1211 struct cfg80211_connect_params *sme)
1568{ 1212{
1569 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev); 1213 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
1570 struct brcmf_cfg80211_profile *profile = cfg->profile;
1571 struct brcmf_cfg80211_security *sec; 1214 struct brcmf_cfg80211_security *sec;
1572 s32 pval = 0; 1215 s32 pval = 0;
1573 s32 gval = 0; 1216 s32 gval = 0;
@@ -1589,8 +1232,8 @@ brcmf_set_set_cipher(struct net_device *ndev,
1589 pval = AES_ENABLED; 1232 pval = AES_ENABLED;
1590 break; 1233 break;
1591 default: 1234 default:
1592 WL_ERR("invalid cipher pairwise (%d)\n", 1235 brcmf_err("invalid cipher pairwise (%d)\n",
1593 sme->crypto.ciphers_pairwise[0]); 1236 sme->crypto.ciphers_pairwise[0]);
1594 return -EINVAL; 1237 return -EINVAL;
1595 } 1238 }
1596 } 1239 }
@@ -1610,16 +1253,16 @@ brcmf_set_set_cipher(struct net_device *ndev,
1610 gval = AES_ENABLED; 1253 gval = AES_ENABLED;
1611 break; 1254 break;
1612 default: 1255 default:
1613 WL_ERR("invalid cipher group (%d)\n", 1256 brcmf_err("invalid cipher group (%d)\n",
1614 sme->crypto.cipher_group); 1257 sme->crypto.cipher_group);
1615 return -EINVAL; 1258 return -EINVAL;
1616 } 1259 }
1617 } 1260 }
1618 1261
1619 WL_CONN("pval (%d) gval (%d)\n", pval, gval); 1262 brcmf_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval);
1620 err = brcmf_dev_intvar_set(ndev, "wsec", pval | gval); 1263 err = brcmf_fil_iovar_int_set(netdev_priv(ndev), "wsec", pval | gval);
1621 if (err) { 1264 if (err) {
1622 WL_ERR("error (%d)\n", err); 1265 brcmf_err("error (%d)\n", err);
1623 return err; 1266 return err;
1624 } 1267 }
1625 1268
@@ -1633,16 +1276,16 @@ brcmf_set_set_cipher(struct net_device *ndev,
1633static s32 1276static s32
1634brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) 1277brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
1635{ 1278{
1636 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev); 1279 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
1637 struct brcmf_cfg80211_profile *profile = cfg->profile;
1638 struct brcmf_cfg80211_security *sec; 1280 struct brcmf_cfg80211_security *sec;
1639 s32 val = 0; 1281 s32 val = 0;
1640 s32 err = 0; 1282 s32 err = 0;
1641 1283
1642 if (sme->crypto.n_akm_suites) { 1284 if (sme->crypto.n_akm_suites) {
1643 err = brcmf_dev_intvar_get(ndev, "wpa_auth", &val); 1285 err = brcmf_fil_iovar_int_get(netdev_priv(ndev),
1286 "wpa_auth", &val);
1644 if (err) { 1287 if (err) {
1645 WL_ERR("could not get wpa_auth (%d)\n", err); 1288 brcmf_err("could not get wpa_auth (%d)\n", err);
1646 return err; 1289 return err;
1647 } 1290 }
1648 if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { 1291 if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) {
@@ -1654,8 +1297,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
1654 val = WPA_AUTH_PSK; 1297 val = WPA_AUTH_PSK;
1655 break; 1298 break;
1656 default: 1299 default:
1657 WL_ERR("invalid cipher group (%d)\n", 1300 brcmf_err("invalid cipher group (%d)\n",
1658 sme->crypto.cipher_group); 1301 sme->crypto.cipher_group);
1659 return -EINVAL; 1302 return -EINVAL;
1660 } 1303 }
1661 } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { 1304 } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -1667,16 +1310,17 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
1667 val = WPA2_AUTH_PSK; 1310 val = WPA2_AUTH_PSK;
1668 break; 1311 break;
1669 default: 1312 default:
1670 WL_ERR("invalid cipher group (%d)\n", 1313 brcmf_err("invalid cipher group (%d)\n",
1671 sme->crypto.cipher_group); 1314 sme->crypto.cipher_group);
1672 return -EINVAL; 1315 return -EINVAL;
1673 } 1316 }
1674 } 1317 }
1675 1318
1676 WL_CONN("setting wpa_auth to %d\n", val); 1319 brcmf_dbg(CONN, "setting wpa_auth to %d\n", val);
1677 err = brcmf_dev_intvar_set(ndev, "wpa_auth", val); 1320 err = brcmf_fil_iovar_int_set(netdev_priv(ndev),
1321 "wpa_auth", val);
1678 if (err) { 1322 if (err) {
1679 WL_ERR("could not set wpa_auth (%d)\n", err); 1323 brcmf_err("could not set wpa_auth (%d)\n", err);
1680 return err; 1324 return err;
1681 } 1325 }
1682 } 1326 }
@@ -1690,22 +1334,20 @@ static s32
1690brcmf_set_sharedkey(struct net_device *ndev, 1334brcmf_set_sharedkey(struct net_device *ndev,
1691 struct cfg80211_connect_params *sme) 1335 struct cfg80211_connect_params *sme)
1692{ 1336{
1693 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev); 1337 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
1694 struct brcmf_cfg80211_profile *profile = cfg->profile;
1695 struct brcmf_cfg80211_security *sec; 1338 struct brcmf_cfg80211_security *sec;
1696 struct brcmf_wsec_key key; 1339 struct brcmf_wsec_key key;
1697 s32 val; 1340 s32 val;
1698 s32 err = 0; 1341 s32 err = 0;
1699 s32 bssidx;
1700 1342
1701 WL_CONN("key len (%d)\n", sme->key_len); 1343 brcmf_dbg(CONN, "key len (%d)\n", sme->key_len);
1702 1344
1703 if (sme->key_len == 0) 1345 if (sme->key_len == 0)
1704 return 0; 1346 return 0;
1705 1347
1706 sec = &profile->sec; 1348 sec = &profile->sec;
1707 WL_CONN("wpa_versions 0x%x cipher_pairwise 0x%x\n", 1349 brcmf_dbg(CONN, "wpa_versions 0x%x cipher_pairwise 0x%x\n",
1708 sec->wpa_versions, sec->cipher_pairwise); 1350 sec->wpa_versions, sec->cipher_pairwise);
1709 1351
1710 if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)) 1352 if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2))
1711 return 0; 1353 return 0;
@@ -1718,7 +1360,7 @@ brcmf_set_sharedkey(struct net_device *ndev,
1718 key.len = (u32) sme->key_len; 1360 key.len = (u32) sme->key_len;
1719 key.index = (u32) sme->key_idx; 1361 key.index = (u32) sme->key_idx;
1720 if (key.len > sizeof(key.data)) { 1362 if (key.len > sizeof(key.data)) {
1721 WL_ERR("Too long key length (%u)\n", key.len); 1363 brcmf_err("Too long key length (%u)\n", key.len);
1722 return -EINVAL; 1364 return -EINVAL;
1723 } 1365 }
1724 memcpy(key.data, sme->key, key.len); 1366 memcpy(key.data, sme->key, key.len);
@@ -1731,25 +1373,24 @@ brcmf_set_sharedkey(struct net_device *ndev,
1731 key.algo = CRYPTO_ALGO_WEP128; 1373 key.algo = CRYPTO_ALGO_WEP128;
1732 break; 1374 break;
1733 default: 1375 default:
1734 WL_ERR("Invalid algorithm (%d)\n", 1376 brcmf_err("Invalid algorithm (%d)\n",
1735 sme->crypto.ciphers_pairwise[0]); 1377 sme->crypto.ciphers_pairwise[0]);
1736 return -EINVAL; 1378 return -EINVAL;
1737 } 1379 }
1738 /* Set the new key/index */ 1380 /* Set the new key/index */
1739 WL_CONN("key length (%d) key index (%d) algo (%d)\n", 1381 brcmf_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n",
1740 key.len, key.index, key.algo); 1382 key.len, key.index, key.algo);
1741 WL_CONN("key \"%s\"\n", key.data); 1383 brcmf_dbg(CONN, "key \"%s\"\n", key.data);
1742 bssidx = brcmf_find_bssidx(cfg, ndev); 1384 err = send_key_to_dongle(ndev, &key);
1743 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
1744 if (err) 1385 if (err)
1745 return err; 1386 return err;
1746 1387
1747 if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { 1388 if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) {
1748 WL_CONN("set auth_type to shared key\n"); 1389 brcmf_dbg(CONN, "set auth_type to shared key\n");
1749 val = WL_AUTH_SHARED_KEY; /* shared key */ 1390 val = WL_AUTH_SHARED_KEY; /* shared key */
1750 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", val, bssidx); 1391 err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "auth", val);
1751 if (err) 1392 if (err)
1752 WL_ERR("set auth failed (%d)\n", err); 1393 brcmf_err("set auth failed (%d)\n", err);
1753 } 1394 }
1754 return err; 1395 return err;
1755} 1396}
@@ -1759,7 +1400,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1759 struct cfg80211_connect_params *sme) 1400 struct cfg80211_connect_params *sme)
1760{ 1401{
1761 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1402 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1762 struct brcmf_cfg80211_profile *profile = cfg->profile; 1403 struct brcmf_if *ifp = netdev_priv(ndev);
1404 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
1763 struct ieee80211_channel *chan = sme->channel; 1405 struct ieee80211_channel *chan = sme->channel;
1764 struct brcmf_join_params join_params; 1406 struct brcmf_join_params join_params;
1765 size_t join_params_size; 1407 size_t join_params_size;
@@ -1767,54 +1409,54 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1767 1409
1768 s32 err = 0; 1410 s32 err = 0;
1769 1411
1770 WL_TRACE("Enter\n"); 1412 brcmf_dbg(TRACE, "Enter\n");
1771 if (!check_sys_up(wiphy)) 1413 if (!check_vif_up(ifp->vif))
1772 return -EIO; 1414 return -EIO;
1773 1415
1774 if (!sme->ssid) { 1416 if (!sme->ssid) {
1775 WL_ERR("Invalid ssid\n"); 1417 brcmf_err("Invalid ssid\n");
1776 return -EOPNOTSUPP; 1418 return -EOPNOTSUPP;
1777 } 1419 }
1778 1420
1779 set_bit(WL_STATUS_CONNECTING, &cfg->status); 1421 set_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
1780 1422
1781 if (chan) { 1423 if (chan) {
1782 cfg->channel = 1424 cfg->channel =
1783 ieee80211_frequency_to_channel(chan->center_freq); 1425 ieee80211_frequency_to_channel(chan->center_freq);
1784 WL_CONN("channel (%d), center_req (%d)\n", 1426 brcmf_dbg(CONN, "channel (%d), center_req (%d)\n",
1785 cfg->channel, chan->center_freq); 1427 cfg->channel, chan->center_freq);
1786 } else 1428 } else
1787 cfg->channel = 0; 1429 cfg->channel = 0;
1788 1430
1789 WL_INFO("ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len); 1431 brcmf_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len);
1790 1432
1791 err = brcmf_set_wpa_version(ndev, sme); 1433 err = brcmf_set_wpa_version(ndev, sme);
1792 if (err) { 1434 if (err) {
1793 WL_ERR("wl_set_wpa_version failed (%d)\n", err); 1435 brcmf_err("wl_set_wpa_version failed (%d)\n", err);
1794 goto done; 1436 goto done;
1795 } 1437 }
1796 1438
1797 err = brcmf_set_auth_type(ndev, sme); 1439 err = brcmf_set_auth_type(ndev, sme);
1798 if (err) { 1440 if (err) {
1799 WL_ERR("wl_set_auth_type failed (%d)\n", err); 1441 brcmf_err("wl_set_auth_type failed (%d)\n", err);
1800 goto done; 1442 goto done;
1801 } 1443 }
1802 1444
1803 err = brcmf_set_set_cipher(ndev, sme); 1445 err = brcmf_set_set_cipher(ndev, sme);
1804 if (err) { 1446 if (err) {
1805 WL_ERR("wl_set_set_cipher failed (%d)\n", err); 1447 brcmf_err("wl_set_set_cipher failed (%d)\n", err);
1806 goto done; 1448 goto done;
1807 } 1449 }
1808 1450
1809 err = brcmf_set_key_mgmt(ndev, sme); 1451 err = brcmf_set_key_mgmt(ndev, sme);
1810 if (err) { 1452 if (err) {
1811 WL_ERR("wl_set_key_mgmt failed (%d)\n", err); 1453 brcmf_err("wl_set_key_mgmt failed (%d)\n", err);
1812 goto done; 1454 goto done;
1813 } 1455 }
1814 1456
1815 err = brcmf_set_sharedkey(ndev, sme); 1457 err = brcmf_set_sharedkey(ndev, sme);
1816 if (err) { 1458 if (err) {
1817 WL_ERR("brcmf_set_sharedkey failed (%d)\n", err); 1459 brcmf_err("brcmf_set_sharedkey failed (%d)\n", err);
1818 goto done; 1460 goto done;
1819 } 1461 }
1820 1462
@@ -1827,23 +1469,23 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
1827 memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len); 1469 memcpy(&profile->ssid.SSID, sme->ssid, profile->ssid.SSID_len);
1828 join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len); 1470 join_params.ssid_le.SSID_len = cpu_to_le32(profile->ssid.SSID_len);
1829 1471
1830 memcpy(join_params.params_le.bssid, ether_bcast, ETH_ALEN); 1472 memset(join_params.params_le.bssid, 0xFF, ETH_ALEN);
1831 1473
1832 if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN) 1474 if (ssid.SSID_len < IEEE80211_MAX_SSID_LEN)
1833 WL_CONN("ssid \"%s\", len (%d)\n", 1475 brcmf_dbg(CONN, "ssid \"%s\", len (%d)\n",
1834 ssid.SSID, ssid.SSID_len); 1476 ssid.SSID, ssid.SSID_len);
1835 1477
1836 brcmf_ch_to_chanspec(cfg->channel, 1478 brcmf_ch_to_chanspec(cfg->channel,
1837 &join_params, &join_params_size); 1479 &join_params, &join_params_size);
1838 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, 1480 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
1839 &join_params, join_params_size); 1481 &join_params, join_params_size);
1840 if (err) 1482 if (err)
1841 WL_ERR("WLC_SET_SSID failed (%d)\n", err); 1483 brcmf_err("WLC_SET_SSID failed (%d)\n", err);
1842 1484
1843done: 1485done:
1844 if (err) 1486 if (err)
1845 clear_bit(WL_STATUS_CONNECTING, &cfg->status); 1487 clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
1846 WL_TRACE("Exit\n"); 1488 brcmf_dbg(TRACE, "Exit\n");
1847 return err; 1489 return err;
1848} 1490}
1849 1491
@@ -1851,44 +1493,43 @@ static s32
1851brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, 1493brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
1852 u16 reason_code) 1494 u16 reason_code)
1853{ 1495{
1854 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1496 struct brcmf_if *ifp = netdev_priv(ndev);
1855 struct brcmf_cfg80211_profile *profile = cfg->profile; 1497 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
1856 struct brcmf_scb_val_le scbval; 1498 struct brcmf_scb_val_le scbval;
1857 s32 err = 0; 1499 s32 err = 0;
1858 1500
1859 WL_TRACE("Enter. Reason code = %d\n", reason_code); 1501 brcmf_dbg(TRACE, "Enter. Reason code = %d\n", reason_code);
1860 if (!check_sys_up(wiphy)) 1502 if (!check_vif_up(ifp->vif))
1861 return -EIO; 1503 return -EIO;
1862 1504
1863 clear_bit(WL_STATUS_CONNECTED, &cfg->status); 1505 clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
1864 1506
1865 memcpy(&scbval.ea, &profile->bssid, ETH_ALEN); 1507 memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
1866 scbval.val = cpu_to_le32(reason_code); 1508 scbval.val = cpu_to_le32(reason_code);
1867 err = brcmf_exec_dcmd(ndev, BRCMF_C_DISASSOC, &scbval, 1509 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_DISASSOC,
1868 sizeof(struct brcmf_scb_val_le)); 1510 &scbval, sizeof(scbval));
1869 if (err) 1511 if (err)
1870 WL_ERR("error (%d)\n", err); 1512 brcmf_err("error (%d)\n", err);
1871
1872 cfg->link_up = false;
1873 1513
1874 WL_TRACE("Exit\n"); 1514 brcmf_dbg(TRACE, "Exit\n");
1875 return err; 1515 return err;
1876} 1516}
1877 1517
1878static s32 1518static s32
1879brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, 1519brcmf_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
1880 enum nl80211_tx_power_setting type, s32 mbm) 1520 enum nl80211_tx_power_setting type, s32 mbm)
1881{ 1521{
1882 1522
1883 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1523 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1884 struct net_device *ndev = cfg_to_ndev(cfg); 1524 struct net_device *ndev = cfg_to_ndev(cfg);
1525 struct brcmf_if *ifp = netdev_priv(ndev);
1885 u16 txpwrmw; 1526 u16 txpwrmw;
1886 s32 err = 0; 1527 s32 err = 0;
1887 s32 disable = 0; 1528 s32 disable = 0;
1888 s32 dbm = MBM_TO_DBM(mbm); 1529 s32 dbm = MBM_TO_DBM(mbm);
1889 1530
1890 WL_TRACE("Enter\n"); 1531 brcmf_dbg(TRACE, "Enter\n");
1891 if (!check_sys_up(wiphy)) 1532 if (!check_vif_up(ifp->vif))
1892 return -EIO; 1533 return -EIO;
1893 1534
1894 switch (type) { 1535 switch (type) {
@@ -1897,7 +1538,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1897 case NL80211_TX_POWER_LIMITED: 1538 case NL80211_TX_POWER_LIMITED:
1898 case NL80211_TX_POWER_FIXED: 1539 case NL80211_TX_POWER_FIXED:
1899 if (dbm < 0) { 1540 if (dbm < 0) {
1900 WL_ERR("TX_POWER_FIXED - dbm is negative\n"); 1541 brcmf_err("TX_POWER_FIXED - dbm is negative\n");
1901 err = -EINVAL; 1542 err = -EINVAL;
1902 goto done; 1543 goto done;
1903 } 1544 }
@@ -1905,40 +1546,42 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
1905 } 1546 }
1906 /* Make sure radio is off or on as far as software is concerned */ 1547 /* Make sure radio is off or on as far as software is concerned */
1907 disable = WL_RADIO_SW_DISABLE << 16; 1548 disable = WL_RADIO_SW_DISABLE << 16;
1908 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_RADIO, &disable); 1549 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_RADIO, disable);
1909 if (err) 1550 if (err)
1910 WL_ERR("WLC_SET_RADIO error (%d)\n", err); 1551 brcmf_err("WLC_SET_RADIO error (%d)\n", err);
1911 1552
1912 if (dbm > 0xffff) 1553 if (dbm > 0xffff)
1913 txpwrmw = 0xffff; 1554 txpwrmw = 0xffff;
1914 else 1555 else
1915 txpwrmw = (u16) dbm; 1556 txpwrmw = (u16) dbm;
1916 err = brcmf_dev_intvar_set(ndev, "qtxpower", 1557 err = brcmf_fil_iovar_int_set(ifp, "qtxpower",
1917 (s32) (brcmf_mw_to_qdbm(txpwrmw))); 1558 (s32)brcmf_mw_to_qdbm(txpwrmw));
1918 if (err) 1559 if (err)
1919 WL_ERR("qtxpower error (%d)\n", err); 1560 brcmf_err("qtxpower error (%d)\n", err);
1920 cfg->conf->tx_power = dbm; 1561 cfg->conf->tx_power = dbm;
1921 1562
1922done: 1563done:
1923 WL_TRACE("Exit\n"); 1564 brcmf_dbg(TRACE, "Exit\n");
1924 return err; 1565 return err;
1925} 1566}
1926 1567
1927static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm) 1568static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy,
1569 struct wireless_dev *wdev,
1570 s32 *dbm)
1928{ 1571{
1929 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1572 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1930 struct net_device *ndev = cfg_to_ndev(cfg); 1573 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
1931 s32 txpwrdbm; 1574 s32 txpwrdbm;
1932 u8 result; 1575 u8 result;
1933 s32 err = 0; 1576 s32 err = 0;
1934 1577
1935 WL_TRACE("Enter\n"); 1578 brcmf_dbg(TRACE, "Enter\n");
1936 if (!check_sys_up(wiphy)) 1579 if (!check_vif_up(ifp->vif))
1937 return -EIO; 1580 return -EIO;
1938 1581
1939 err = brcmf_dev_intvar_get(ndev, "qtxpower", &txpwrdbm); 1582 err = brcmf_fil_iovar_int_get(ifp, "qtxpower", &txpwrdbm);
1940 if (err) { 1583 if (err) {
1941 WL_ERR("error (%d)\n", err); 1584 brcmf_err("error (%d)\n", err);
1942 goto done; 1585 goto done;
1943 } 1586 }
1944 1587
@@ -1946,7 +1589,7 @@ static s32 brcmf_cfg80211_get_tx_power(struct wiphy *wiphy, s32 *dbm)
1946 *dbm = (s32) brcmf_qdbm_to_mw(result); 1589 *dbm = (s32) brcmf_qdbm_to_mw(result);
1947 1590
1948done: 1591done:
1949 WL_TRACE("Exit\n"); 1592 brcmf_dbg(TRACE, "Exit\n");
1950 return err; 1593 return err;
1951} 1594}
1952 1595
@@ -1954,34 +1597,32 @@ static s32
1954brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, 1597brcmf_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev,
1955 u8 key_idx, bool unicast, bool multicast) 1598 u8 key_idx, bool unicast, bool multicast)
1956{ 1599{
1957 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1600 struct brcmf_if *ifp = netdev_priv(ndev);
1958 u32 index; 1601 u32 index;
1959 u32 wsec; 1602 u32 wsec;
1960 s32 err = 0; 1603 s32 err = 0;
1961 s32 bssidx;
1962 1604
1963 WL_TRACE("Enter\n"); 1605 brcmf_dbg(TRACE, "Enter\n");
1964 WL_CONN("key index (%d)\n", key_idx); 1606 brcmf_dbg(CONN, "key index (%d)\n", key_idx);
1965 if (!check_sys_up(wiphy)) 1607 if (!check_vif_up(ifp->vif))
1966 return -EIO; 1608 return -EIO;
1967 1609
1968 bssidx = brcmf_find_bssidx(cfg, ndev); 1610 err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
1969 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
1970 if (err) { 1611 if (err) {
1971 WL_ERR("WLC_GET_WSEC error (%d)\n", err); 1612 brcmf_err("WLC_GET_WSEC error (%d)\n", err);
1972 goto done; 1613 goto done;
1973 } 1614 }
1974 1615
1975 if (wsec & WEP_ENABLED) { 1616 if (wsec & WEP_ENABLED) {
1976 /* Just select a new current key */ 1617 /* Just select a new current key */
1977 index = key_idx; 1618 index = key_idx;
1978 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_KEY_PRIMARY, 1619 err = brcmf_fil_cmd_int_set(ifp,
1979 &index); 1620 BRCMF_C_SET_KEY_PRIMARY, index);
1980 if (err) 1621 if (err)
1981 WL_ERR("error (%d)\n", err); 1622 brcmf_err("error (%d)\n", err);
1982 } 1623 }
1983done: 1624done:
1984 WL_TRACE("Exit\n"); 1625 brcmf_dbg(TRACE, "Exit\n");
1985 return err; 1626 return err;
1986} 1627}
1987 1628
@@ -1989,11 +1630,8 @@ static s32
1989brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, 1630brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
1990 u8 key_idx, const u8 *mac_addr, struct key_params *params) 1631 u8 key_idx, const u8 *mac_addr, struct key_params *params)
1991{ 1632{
1992 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1993 struct brcmf_wsec_key key; 1633 struct brcmf_wsec_key key;
1994 struct brcmf_wsec_key_le key_le;
1995 s32 err = 0; 1634 s32 err = 0;
1996 s32 bssidx;
1997 1635
1998 memset(&key, 0, sizeof(key)); 1636 memset(&key, 0, sizeof(key));
1999 key.index = (u32) key_idx; 1637 key.index = (u32) key_idx;
@@ -2002,20 +1640,19 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
2002 if (!is_multicast_ether_addr(mac_addr)) 1640 if (!is_multicast_ether_addr(mac_addr))
2003 memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN); 1641 memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN);
2004 key.len = (u32) params->key_len; 1642 key.len = (u32) params->key_len;
2005 bssidx = brcmf_find_bssidx(cfg, ndev);
2006 /* check for key index change */ 1643 /* check for key index change */
2007 if (key.len == 0) { 1644 if (key.len == 0) {
2008 /* key delete */ 1645 /* key delete */
2009 err = send_key_to_dongle(cfg, bssidx, ndev, &key); 1646 err = send_key_to_dongle(ndev, &key);
2010 if (err) 1647 if (err)
2011 WL_ERR("key delete error (%d)\n", err); 1648 brcmf_err("key delete error (%d)\n", err);
2012 } else { 1649 } else {
2013 if (key.len > sizeof(key.data)) { 1650 if (key.len > sizeof(key.data)) {
2014 WL_ERR("Invalid key length (%d)\n", key.len); 1651 brcmf_err("Invalid key length (%d)\n", key.len);
2015 return -EINVAL; 1652 return -EINVAL;
2016 } 1653 }
2017 1654
2018 WL_CONN("Setting the key index %d\n", key.index); 1655 brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
2019 memcpy(key.data, params->key, key.len); 1656 memcpy(key.data, params->key, key.len);
2020 1657
2021 if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { 1658 if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
@@ -2039,37 +1676,31 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
2039 switch (params->cipher) { 1676 switch (params->cipher) {
2040 case WLAN_CIPHER_SUITE_WEP40: 1677 case WLAN_CIPHER_SUITE_WEP40:
2041 key.algo = CRYPTO_ALGO_WEP1; 1678 key.algo = CRYPTO_ALGO_WEP1;
2042 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); 1679 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
2043 break; 1680 break;
2044 case WLAN_CIPHER_SUITE_WEP104: 1681 case WLAN_CIPHER_SUITE_WEP104:
2045 key.algo = CRYPTO_ALGO_WEP128; 1682 key.algo = CRYPTO_ALGO_WEP128;
2046 WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); 1683 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
2047 break; 1684 break;
2048 case WLAN_CIPHER_SUITE_TKIP: 1685 case WLAN_CIPHER_SUITE_TKIP:
2049 key.algo = CRYPTO_ALGO_TKIP; 1686 key.algo = CRYPTO_ALGO_TKIP;
2050 WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); 1687 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
2051 break; 1688 break;
2052 case WLAN_CIPHER_SUITE_AES_CMAC: 1689 case WLAN_CIPHER_SUITE_AES_CMAC:
2053 key.algo = CRYPTO_ALGO_AES_CCM; 1690 key.algo = CRYPTO_ALGO_AES_CCM;
2054 WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); 1691 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
2055 break; 1692 break;
2056 case WLAN_CIPHER_SUITE_CCMP: 1693 case WLAN_CIPHER_SUITE_CCMP:
2057 key.algo = CRYPTO_ALGO_AES_CCM; 1694 key.algo = CRYPTO_ALGO_AES_CCM;
2058 WL_CONN("WLAN_CIPHER_SUITE_CCMP\n"); 1695 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
2059 break; 1696 break;
2060 default: 1697 default:
2061 WL_ERR("Invalid cipher (0x%x)\n", params->cipher); 1698 brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
2062 return -EINVAL; 1699 return -EINVAL;
2063 } 1700 }
2064 convert_key_from_CPU(&key, &key_le); 1701 err = send_key_to_dongle(ndev, &key);
2065
2066 brcmf_netdev_wait_pend8021x(ndev);
2067 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "wsec_key", &key_le,
2068 sizeof(key_le),
2069 cfg->extra_buf,
2070 WL_EXTRA_BUF_MAX, bssidx);
2071 if (err) 1702 if (err)
2072 WL_ERR("wsec_key error (%d)\n", err); 1703 brcmf_err("wsec_key error (%d)\n", err);
2073 } 1704 }
2074 return err; 1705 return err;
2075} 1706}
@@ -2079,21 +1710,20 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2079 u8 key_idx, bool pairwise, const u8 *mac_addr, 1710 u8 key_idx, bool pairwise, const u8 *mac_addr,
2080 struct key_params *params) 1711 struct key_params *params)
2081{ 1712{
2082 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1713 struct brcmf_if *ifp = netdev_priv(ndev);
2083 struct brcmf_wsec_key key; 1714 struct brcmf_wsec_key key;
2084 s32 val; 1715 s32 val;
2085 s32 wsec; 1716 s32 wsec;
2086 s32 err = 0; 1717 s32 err = 0;
2087 u8 keybuf[8]; 1718 u8 keybuf[8];
2088 s32 bssidx;
2089 1719
2090 WL_TRACE("Enter\n"); 1720 brcmf_dbg(TRACE, "Enter\n");
2091 WL_CONN("key index (%d)\n", key_idx); 1721 brcmf_dbg(CONN, "key index (%d)\n", key_idx);
2092 if (!check_sys_up(wiphy)) 1722 if (!check_vif_up(ifp->vif))
2093 return -EIO; 1723 return -EIO;
2094 1724
2095 if (mac_addr) { 1725 if (mac_addr) {
2096 WL_TRACE("Exit"); 1726 brcmf_dbg(TRACE, "Exit");
2097 return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params); 1727 return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
2098 } 1728 }
2099 memset(&key, 0, sizeof(key)); 1729 memset(&key, 0, sizeof(key));
@@ -2102,7 +1732,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2102 key.index = (u32) key_idx; 1732 key.index = (u32) key_idx;
2103 1733
2104 if (key.len > sizeof(key.data)) { 1734 if (key.len > sizeof(key.data)) {
2105 WL_ERR("Too long key length (%u)\n", key.len); 1735 brcmf_err("Too long key length (%u)\n", key.len);
2106 err = -EINVAL; 1736 err = -EINVAL;
2107 goto done; 1737 goto done;
2108 } 1738 }
@@ -2113,59 +1743,58 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
2113 case WLAN_CIPHER_SUITE_WEP40: 1743 case WLAN_CIPHER_SUITE_WEP40:
2114 key.algo = CRYPTO_ALGO_WEP1; 1744 key.algo = CRYPTO_ALGO_WEP1;
2115 val = WEP_ENABLED; 1745 val = WEP_ENABLED;
2116 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); 1746 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
2117 break; 1747 break;
2118 case WLAN_CIPHER_SUITE_WEP104: 1748 case WLAN_CIPHER_SUITE_WEP104:
2119 key.algo = CRYPTO_ALGO_WEP128; 1749 key.algo = CRYPTO_ALGO_WEP128;
2120 val = WEP_ENABLED; 1750 val = WEP_ENABLED;
2121 WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); 1751 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
2122 break; 1752 break;
2123 case WLAN_CIPHER_SUITE_TKIP: 1753 case WLAN_CIPHER_SUITE_TKIP:
2124 if (cfg->conf->mode != WL_MODE_AP) { 1754 if (ifp->vif->mode != WL_MODE_AP) {
2125 WL_CONN("Swapping key\n"); 1755 brcmf_dbg(CONN, "Swapping key\n");
2126 memcpy(keybuf, &key.data[24], sizeof(keybuf)); 1756 memcpy(keybuf, &key.data[24], sizeof(keybuf));
2127 memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); 1757 memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
2128 memcpy(&key.data[16], keybuf, sizeof(keybuf)); 1758 memcpy(&key.data[16], keybuf, sizeof(keybuf));
2129 } 1759 }
2130 key.algo = CRYPTO_ALGO_TKIP; 1760 key.algo = CRYPTO_ALGO_TKIP;
2131 val = TKIP_ENABLED; 1761 val = TKIP_ENABLED;
2132 WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); 1762 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
2133 break; 1763 break;
2134 case WLAN_CIPHER_SUITE_AES_CMAC: 1764 case WLAN_CIPHER_SUITE_AES_CMAC:
2135 key.algo = CRYPTO_ALGO_AES_CCM; 1765 key.algo = CRYPTO_ALGO_AES_CCM;
2136 val = AES_ENABLED; 1766 val = AES_ENABLED;
2137 WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); 1767 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
2138 break; 1768 break;
2139 case WLAN_CIPHER_SUITE_CCMP: 1769 case WLAN_CIPHER_SUITE_CCMP:
2140 key.algo = CRYPTO_ALGO_AES_CCM; 1770 key.algo = CRYPTO_ALGO_AES_CCM;
2141 val = AES_ENABLED; 1771 val = AES_ENABLED;
2142 WL_CONN("WLAN_CIPHER_SUITE_CCMP\n"); 1772 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n");
2143 break; 1773 break;
2144 default: 1774 default:
2145 WL_ERR("Invalid cipher (0x%x)\n", params->cipher); 1775 brcmf_err("Invalid cipher (0x%x)\n", params->cipher);
2146 err = -EINVAL; 1776 err = -EINVAL;
2147 goto done; 1777 goto done;
2148 } 1778 }
2149 1779
2150 bssidx = brcmf_find_bssidx(cfg, ndev); 1780 err = send_key_to_dongle(ndev, &key);
2151 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
2152 if (err) 1781 if (err)
2153 goto done; 1782 goto done;
2154 1783
2155 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx); 1784 err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
2156 if (err) { 1785 if (err) {
2157 WL_ERR("get wsec error (%d)\n", err); 1786 brcmf_err("get wsec error (%d)\n", err);
2158 goto done; 1787 goto done;
2159 } 1788 }
2160 wsec |= val; 1789 wsec |= val;
2161 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx); 1790 err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
2162 if (err) { 1791 if (err) {
2163 WL_ERR("set wsec error (%d)\n", err); 1792 brcmf_err("set wsec error (%d)\n", err);
2164 goto done; 1793 goto done;
2165 } 1794 }
2166 1795
2167done: 1796done:
2168 WL_TRACE("Exit\n"); 1797 brcmf_dbg(TRACE, "Exit\n");
2169 return err; 1798 return err;
2170} 1799}
2171 1800
@@ -2173,37 +1802,32 @@ static s32
2173brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, 1802brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
2174 u8 key_idx, bool pairwise, const u8 *mac_addr) 1803 u8 key_idx, bool pairwise, const u8 *mac_addr)
2175{ 1804{
2176 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1805 struct brcmf_if *ifp = netdev_priv(ndev);
2177 struct brcmf_wsec_key key; 1806 struct brcmf_wsec_key key;
2178 s32 err = 0; 1807 s32 err = 0;
2179 s32 bssidx;
2180 1808
2181 WL_TRACE("Enter\n"); 1809 brcmf_dbg(TRACE, "Enter\n");
2182 if (!check_sys_up(wiphy)) 1810 if (!check_vif_up(ifp->vif))
2183 return -EIO; 1811 return -EIO;
2184 1812
1813 if (key_idx >= DOT11_MAX_DEFAULT_KEYS) {
1814 /* we ignore this key index in this case */
1815 brcmf_err("invalid key index (%d)\n", key_idx);
1816 return -EINVAL;
1817 }
1818
2185 memset(&key, 0, sizeof(key)); 1819 memset(&key, 0, sizeof(key));
2186 1820
2187 key.index = (u32) key_idx; 1821 key.index = (u32) key_idx;
2188 key.flags = BRCMF_PRIMARY_KEY; 1822 key.flags = BRCMF_PRIMARY_KEY;
2189 key.algo = CRYPTO_ALGO_OFF; 1823 key.algo = CRYPTO_ALGO_OFF;
2190 1824
2191 WL_CONN("key index (%d)\n", key_idx); 1825 brcmf_dbg(CONN, "key index (%d)\n", key_idx);
2192 1826
2193 /* Set the new key/index */ 1827 /* Set the new key/index */
2194 bssidx = brcmf_find_bssidx(cfg, ndev); 1828 err = send_key_to_dongle(ndev, &key);
2195 err = send_key_to_dongle(cfg, bssidx, ndev, &key);
2196 if (err) {
2197 if (err == -EINVAL) {
2198 if (key.index >= DOT11_MAX_DEFAULT_KEYS)
2199 /* we ignore this key index in this case */
2200 WL_ERR("invalid key index (%d)\n", key_idx);
2201 }
2202 /* Ignore this error, may happen during DISASSOC */
2203 err = -EAGAIN;
2204 }
2205 1829
2206 WL_TRACE("Exit\n"); 1830 brcmf_dbg(TRACE, "Exit\n");
2207 return err; 1831 return err;
2208} 1832}
2209 1833
@@ -2213,24 +1837,22 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
2213 void (*callback) (void *cookie, struct key_params * params)) 1837 void (*callback) (void *cookie, struct key_params * params))
2214{ 1838{
2215 struct key_params params; 1839 struct key_params params;
2216 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1840 struct brcmf_if *ifp = netdev_priv(ndev);
2217 struct brcmf_cfg80211_profile *profile = cfg->profile; 1841 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
2218 struct brcmf_cfg80211_security *sec; 1842 struct brcmf_cfg80211_security *sec;
2219 s32 wsec; 1843 s32 wsec;
2220 s32 err = 0; 1844 s32 err = 0;
2221 s32 bssidx;
2222 1845
2223 WL_TRACE("Enter\n"); 1846 brcmf_dbg(TRACE, "Enter\n");
2224 WL_CONN("key index (%d)\n", key_idx); 1847 brcmf_dbg(CONN, "key index (%d)\n", key_idx);
2225 if (!check_sys_up(wiphy)) 1848 if (!check_vif_up(ifp->vif))
2226 return -EIO; 1849 return -EIO;
2227 1850
2228 memset(&params, 0, sizeof(params)); 1851 memset(&params, 0, sizeof(params));
2229 1852
2230 bssidx = brcmf_find_bssidx(cfg, ndev); 1853 err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec);
2231 err = brcmf_dev_intvar_get_bsscfg(ndev, "wsec", &wsec, bssidx);
2232 if (err) { 1854 if (err) {
2233 WL_ERR("WLC_GET_WSEC error (%d)\n", err); 1855 brcmf_err("WLC_GET_WSEC error (%d)\n", err);
2234 /* Ignore this error, may happen during DISASSOC */ 1856 /* Ignore this error, may happen during DISASSOC */
2235 err = -EAGAIN; 1857 err = -EAGAIN;
2236 goto done; 1858 goto done;
@@ -2240,29 +1862,29 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
2240 sec = &profile->sec; 1862 sec = &profile->sec;
2241 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { 1863 if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
2242 params.cipher = WLAN_CIPHER_SUITE_WEP40; 1864 params.cipher = WLAN_CIPHER_SUITE_WEP40;
2243 WL_CONN("WLAN_CIPHER_SUITE_WEP40\n"); 1865 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n");
2244 } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) { 1866 } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) {
2245 params.cipher = WLAN_CIPHER_SUITE_WEP104; 1867 params.cipher = WLAN_CIPHER_SUITE_WEP104;
2246 WL_CONN("WLAN_CIPHER_SUITE_WEP104\n"); 1868 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
2247 } 1869 }
2248 break; 1870 break;
2249 case TKIP_ENABLED: 1871 case TKIP_ENABLED:
2250 params.cipher = WLAN_CIPHER_SUITE_TKIP; 1872 params.cipher = WLAN_CIPHER_SUITE_TKIP;
2251 WL_CONN("WLAN_CIPHER_SUITE_TKIP\n"); 1873 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
2252 break; 1874 break;
2253 case AES_ENABLED: 1875 case AES_ENABLED:
2254 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; 1876 params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
2255 WL_CONN("WLAN_CIPHER_SUITE_AES_CMAC\n"); 1877 brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
2256 break; 1878 break;
2257 default: 1879 default:
2258 WL_ERR("Invalid algo (0x%x)\n", wsec); 1880 brcmf_err("Invalid algo (0x%x)\n", wsec);
2259 err = -EINVAL; 1881 err = -EINVAL;
2260 goto done; 1882 goto done;
2261 } 1883 }
2262 callback(cookie, &params); 1884 callback(cookie, &params);
2263 1885
2264done: 1886done:
2265 WL_TRACE("Exit\n"); 1887 brcmf_dbg(TRACE, "Exit\n");
2266 return err; 1888 return err;
2267} 1889}
2268 1890
@@ -2270,7 +1892,7 @@ static s32
2270brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, 1892brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy,
2271 struct net_device *ndev, u8 key_idx) 1893 struct net_device *ndev, u8 key_idx)
2272{ 1894{
2273 WL_INFO("Not supported\n"); 1895 brcmf_dbg(INFO, "Not supported\n");
2274 1896
2275 return -EOPNOTSUPP; 1897 return -EOPNOTSUPP;
2276} 1898}
@@ -2279,73 +1901,73 @@ static s32
2279brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, 1901brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
2280 u8 *mac, struct station_info *sinfo) 1902 u8 *mac, struct station_info *sinfo)
2281{ 1903{
2282 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1904 struct brcmf_if *ifp = netdev_priv(ndev);
2283 struct brcmf_cfg80211_profile *profile = cfg->profile; 1905 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
2284 struct brcmf_scb_val_le scb_val; 1906 struct brcmf_scb_val_le scb_val;
2285 int rssi; 1907 int rssi;
2286 s32 rate; 1908 s32 rate;
2287 s32 err = 0; 1909 s32 err = 0;
2288 u8 *bssid = profile->bssid; 1910 u8 *bssid = profile->bssid;
2289 struct brcmf_sta_info_le *sta_info_le; 1911 struct brcmf_sta_info_le sta_info_le;
2290 1912
2291 WL_TRACE("Enter, MAC %pM\n", mac); 1913 brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
2292 if (!check_sys_up(wiphy)) 1914 if (!check_vif_up(ifp->vif))
2293 return -EIO; 1915 return -EIO;
2294 1916
2295 if (cfg->conf->mode == WL_MODE_AP) { 1917 if (ifp->vif->mode == WL_MODE_AP) {
2296 err = brcmf_dev_iovar_getbuf(ndev, "sta_info", mac, ETH_ALEN, 1918 memcpy(&sta_info_le, mac, ETH_ALEN);
2297 cfg->dcmd_buf, 1919 err = brcmf_fil_iovar_data_get(ifp, "sta_info",
2298 WL_DCMD_LEN_MAX); 1920 &sta_info_le,
1921 sizeof(sta_info_le));
2299 if (err < 0) { 1922 if (err < 0) {
2300 WL_ERR("GET STA INFO failed, %d\n", err); 1923 brcmf_err("GET STA INFO failed, %d\n", err);
2301 goto done; 1924 goto done;
2302 } 1925 }
2303 sta_info_le = (struct brcmf_sta_info_le *)cfg->dcmd_buf;
2304
2305 sinfo->filled = STATION_INFO_INACTIVE_TIME; 1926 sinfo->filled = STATION_INFO_INACTIVE_TIME;
2306 sinfo->inactive_time = le32_to_cpu(sta_info_le->idle) * 1000; 1927 sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000;
2307 if (le32_to_cpu(sta_info_le->flags) & BRCMF_STA_ASSOC) { 1928 if (le32_to_cpu(sta_info_le.flags) & BRCMF_STA_ASSOC) {
2308 sinfo->filled |= STATION_INFO_CONNECTED_TIME; 1929 sinfo->filled |= STATION_INFO_CONNECTED_TIME;
2309 sinfo->connected_time = le32_to_cpu(sta_info_le->in); 1930 sinfo->connected_time = le32_to_cpu(sta_info_le.in);
2310 } 1931 }
2311 WL_TRACE("STA idle time : %d ms, connected time :%d sec\n", 1932 brcmf_dbg(TRACE, "STA idle time : %d ms, connected time :%d sec\n",
2312 sinfo->inactive_time, sinfo->connected_time); 1933 sinfo->inactive_time, sinfo->connected_time);
2313 } else if (cfg->conf->mode == WL_MODE_BSS) { 1934 } else if (ifp->vif->mode == WL_MODE_BSS) {
2314 if (memcmp(mac, bssid, ETH_ALEN)) { 1935 if (memcmp(mac, bssid, ETH_ALEN)) {
2315 WL_ERR("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n", 1936 brcmf_err("Wrong Mac address cfg_mac-%pM wl_bssid-%pM\n",
2316 mac, bssid); 1937 mac, bssid);
2317 err = -ENOENT; 1938 err = -ENOENT;
2318 goto done; 1939 goto done;
2319 } 1940 }
2320 /* Report the current tx rate */ 1941 /* Report the current tx rate */
2321 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_GET_RATE, &rate); 1942 err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_RATE, &rate);
2322 if (err) { 1943 if (err) {
2323 WL_ERR("Could not get rate (%d)\n", err); 1944 brcmf_err("Could not get rate (%d)\n", err);
2324 goto done; 1945 goto done;
2325 } else { 1946 } else {
2326 sinfo->filled |= STATION_INFO_TX_BITRATE; 1947 sinfo->filled |= STATION_INFO_TX_BITRATE;
2327 sinfo->txrate.legacy = rate * 5; 1948 sinfo->txrate.legacy = rate * 5;
2328 WL_CONN("Rate %d Mbps\n", rate / 2); 1949 brcmf_dbg(CONN, "Rate %d Mbps\n", rate / 2);
2329 } 1950 }
2330 1951
2331 if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) { 1952 if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
1953 &ifp->vif->sme_state)) {
2332 memset(&scb_val, 0, sizeof(scb_val)); 1954 memset(&scb_val, 0, sizeof(scb_val));
2333 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val, 1955 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
2334 sizeof(scb_val)); 1956 &scb_val, sizeof(scb_val));
2335 if (err) { 1957 if (err) {
2336 WL_ERR("Could not get rssi (%d)\n", err); 1958 brcmf_err("Could not get rssi (%d)\n", err);
2337 goto done; 1959 goto done;
2338 } else { 1960 } else {
2339 rssi = le32_to_cpu(scb_val.val); 1961 rssi = le32_to_cpu(scb_val.val);
2340 sinfo->filled |= STATION_INFO_SIGNAL; 1962 sinfo->filled |= STATION_INFO_SIGNAL;
2341 sinfo->signal = rssi; 1963 sinfo->signal = rssi;
2342 WL_CONN("RSSI %d dBm\n", rssi); 1964 brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
2343 } 1965 }
2344 } 1966 }
2345 } else 1967 } else
2346 err = -EPERM; 1968 err = -EPERM;
2347done: 1969done:
2348 WL_TRACE("Exit\n"); 1970 brcmf_dbg(TRACE, "Exit\n");
2349 return err; 1971 return err;
2350} 1972}
2351 1973
@@ -2356,8 +1978,9 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
2356 s32 pm; 1978 s32 pm;
2357 s32 err = 0; 1979 s32 err = 0;
2358 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 1980 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
1981 struct brcmf_if *ifp = netdev_priv(ndev);
2359 1982
2360 WL_TRACE("Enter\n"); 1983 brcmf_dbg(TRACE, "Enter\n");
2361 1984
2362 /* 1985 /*
2363 * Powersave enable/disable request is coming from the 1986 * Powersave enable/disable request is coming from the
@@ -2367,24 +1990,24 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
2367 * FW later while initializing the dongle 1990 * FW later while initializing the dongle
2368 */ 1991 */
2369 cfg->pwr_save = enabled; 1992 cfg->pwr_save = enabled;
2370 if (!test_bit(WL_STATUS_READY, &cfg->status)) { 1993 if (!check_vif_up(ifp->vif)) {
2371 1994
2372 WL_INFO("Device is not ready, storing the value in cfg_info struct\n"); 1995 brcmf_dbg(INFO, "Device is not ready, storing the value in cfg_info struct\n");
2373 goto done; 1996 goto done;
2374 } 1997 }
2375 1998
2376 pm = enabled ? PM_FAST : PM_OFF; 1999 pm = enabled ? PM_FAST : PM_OFF;
2377 WL_INFO("power save %s\n", (pm ? "enabled" : "disabled")); 2000 brcmf_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled"));
2378 2001
2379 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &pm); 2002 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, pm);
2380 if (err) { 2003 if (err) {
2381 if (err == -ENODEV) 2004 if (err == -ENODEV)
2382 WL_ERR("net_device is not ready yet\n"); 2005 brcmf_err("net_device is not ready yet\n");
2383 else 2006 else
2384 WL_ERR("error (%d)\n", err); 2007 brcmf_err("error (%d)\n", err);
2385 } 2008 }
2386done: 2009done:
2387 WL_TRACE("Exit\n"); 2010 brcmf_dbg(TRACE, "Exit\n");
2388 return err; 2011 return err;
2389} 2012}
2390 2013
@@ -2393,6 +2016,7 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
2393 const u8 *addr, 2016 const u8 *addr,
2394 const struct cfg80211_bitrate_mask *mask) 2017 const struct cfg80211_bitrate_mask *mask)
2395{ 2018{
2019 struct brcmf_if *ifp = netdev_priv(ndev);
2396 struct brcm_rateset_le rateset_le; 2020 struct brcm_rateset_le rateset_le;
2397 s32 rate; 2021 s32 rate;
2398 s32 val; 2022 s32 val;
@@ -2401,16 +2025,16 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
2401 u32 legacy; 2025 u32 legacy;
2402 s32 err = 0; 2026 s32 err = 0;
2403 2027
2404 WL_TRACE("Enter\n"); 2028 brcmf_dbg(TRACE, "Enter\n");
2405 if (!check_sys_up(wiphy)) 2029 if (!check_vif_up(ifp->vif))
2406 return -EIO; 2030 return -EIO;
2407 2031
2408 /* addr param is always NULL. ignore it */ 2032 /* addr param is always NULL. ignore it */
2409 /* Get current rateset */ 2033 /* Get current rateset */
2410 err = brcmf_exec_dcmd(ndev, BRCM_GET_CURR_RATESET, &rateset_le, 2034 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_CURR_RATESET,
2411 sizeof(rateset_le)); 2035 &rateset_le, sizeof(rateset_le));
2412 if (err) { 2036 if (err) {
2413 WL_ERR("could not get current rateset (%d)\n", err); 2037 brcmf_err("could not get current rateset (%d)\n", err);
2414 goto done; 2038 goto done;
2415 } 2039 }
2416 2040
@@ -2428,22 +2052,23 @@ brcmf_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *ndev,
2428 /* Specified rate in bps */ 2052 /* Specified rate in bps */
2429 rate = val / 500000; 2053 rate = val / 500000;
2430 2054
2431 WL_CONN("rate %d mbps\n", rate / 2); 2055 brcmf_dbg(CONN, "rate %d mbps\n", rate / 2);
2432 2056
2433 /* 2057 /*
2434 * 2058 *
2435 * Set rate override, 2059 * Set rate override,
2436 * Since the is a/b/g-blind, both a/bg_rate are enforced. 2060 * Since the is a/b/g-blind, both a/bg_rate are enforced.
2437 */ 2061 */
2438 err_bg = brcmf_dev_intvar_set(ndev, "bg_rate", rate); 2062 err_bg = brcmf_fil_iovar_int_set(ifp, "bg_rate", rate);
2439 err_a = brcmf_dev_intvar_set(ndev, "a_rate", rate); 2063 err_a = brcmf_fil_iovar_int_set(ifp, "a_rate", rate);
2440 if (err_bg && err_a) { 2064 if (err_bg && err_a) {
2441 WL_ERR("could not set fixed rate (%d) (%d)\n", err_bg, err_a); 2065 brcmf_err("could not set fixed rate (%d) (%d)\n", err_bg,
2066 err_a);
2442 err = err_bg | err_a; 2067 err = err_bg | err_a;
2443 } 2068 }
2444 2069
2445done: 2070done:
2446 WL_TRACE("Exit\n"); 2071 brcmf_dbg(TRACE, "Exit\n");
2447 return err; 2072 return err;
2448} 2073}
2449 2074
@@ -2464,7 +2089,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
2464 s32 notify_signal; 2089 s32 notify_signal;
2465 2090
2466 if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { 2091 if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) {
2467 WL_ERR("Bss info is larger than buffer. Discarding\n"); 2092 brcmf_err("Bss info is larger than buffer. Discarding\n");
2468 return 0; 2093 return 0;
2469 } 2094 }
2470 2095
@@ -2485,13 +2110,11 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
2485 notify_ielen = le32_to_cpu(bi->ie_length); 2110 notify_ielen = le32_to_cpu(bi->ie_length);
2486 notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; 2111 notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
2487 2112
2488 WL_CONN("bssid: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", 2113 brcmf_dbg(CONN, "bssid: %pM\n", bi->BSSID);
2489 bi->BSSID[0], bi->BSSID[1], bi->BSSID[2], 2114 brcmf_dbg(CONN, "Channel: %d(%d)\n", channel, freq);
2490 bi->BSSID[3], bi->BSSID[4], bi->BSSID[5]); 2115 brcmf_dbg(CONN, "Capability: %X\n", notify_capability);
2491 WL_CONN("Channel: %d(%d)\n", channel, freq); 2116 brcmf_dbg(CONN, "Beacon interval: %d\n", notify_interval);
2492 WL_CONN("Capability: %X\n", notify_capability); 2117 brcmf_dbg(CONN, "Signal: %d\n", notify_signal);
2493 WL_CONN("Beacon interval: %d\n", notify_interval);
2494 WL_CONN("Signal: %d\n", notify_signal);
2495 2118
2496 bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID, 2119 bss = cfg80211_inform_bss(wiphy, notify_channel, (const u8 *)bi->BSSID,
2497 0, notify_capability, notify_interval, notify_ie, 2120 0, notify_capability, notify_interval, notify_ie,
@@ -2522,13 +2145,14 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_info *cfg)
2522 int i; 2145 int i;
2523 2146
2524 bss_list = cfg->bss_list; 2147 bss_list = cfg->bss_list;
2525 if (bss_list->version != BRCMF_BSS_INFO_VERSION) { 2148 if (bss_list->count != 0 &&
2526 WL_ERR("Version %d != WL_BSS_INFO_VERSION\n", 2149 bss_list->version != BRCMF_BSS_INFO_VERSION) {
2527 bss_list->version); 2150 brcmf_err("Version %d != WL_BSS_INFO_VERSION\n",
2151 bss_list->version);
2528 return -EOPNOTSUPP; 2152 return -EOPNOTSUPP;
2529 } 2153 }
2530 WL_SCAN("scanned AP count (%d)\n", bss_list->count); 2154 brcmf_dbg(SCAN, "scanned AP count (%d)\n", bss_list->count);
2531 for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) { 2155 for (i = 0; i < bss_list->count; i++) {
2532 bi = next_bss_le(bss_list, bi); 2156 bi = next_bss_le(bss_list, bi);
2533 err = brcmf_inform_single_bss(cfg, bi); 2157 err = brcmf_inform_single_bss(cfg, bi);
2534 if (err) 2158 if (err)
@@ -2555,7 +2179,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
2555 size_t notify_ielen; 2179 size_t notify_ielen;
2556 s32 notify_signal; 2180 s32 notify_signal;
2557 2181
2558 WL_TRACE("Enter\n"); 2182 brcmf_dbg(TRACE, "Enter\n");
2559 2183
2560 buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); 2184 buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
2561 if (buf == NULL) { 2185 if (buf == NULL) {
@@ -2565,9 +2189,10 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
2565 2189
2566 *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); 2190 *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
2567 2191
2568 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX); 2192 err = brcmf_fil_cmd_data_get(netdev_priv(ndev), BRCMF_C_GET_BSS_INFO,
2193 buf, WL_BSS_INFO_MAX);
2569 if (err) { 2194 if (err) {
2570 WL_ERR("WLC_GET_BSS_INFO failed: %d\n", err); 2195 brcmf_err("WLC_GET_BSS_INFO failed: %d\n", err);
2571 goto CleanUp; 2196 goto CleanUp;
2572 } 2197 }
2573 2198
@@ -2590,10 +2215,10 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_info *cfg,
2590 notify_ielen = le32_to_cpu(bi->ie_length); 2215 notify_ielen = le32_to_cpu(bi->ie_length);
2591 notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; 2216 notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100;
2592 2217
2593 WL_CONN("channel: %d(%d)\n", channel, freq); 2218 brcmf_dbg(CONN, "channel: %d(%d)\n", channel, freq);
2594 WL_CONN("capability: %X\n", notify_capability); 2219 brcmf_dbg(CONN, "capability: %X\n", notify_capability);
2595 WL_CONN("beacon interval: %d\n", notify_interval); 2220 brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval);
2596 WL_CONN("signal: %d\n", notify_signal); 2221 brcmf_dbg(CONN, "signal: %d\n", notify_signal);
2597 2222
2598 bss = cfg80211_inform_bss(wiphy, notify_channel, bssid, 2223 bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
2599 0, notify_capability, notify_interval, 2224 0, notify_capability, notify_interval,
@@ -2610,14 +2235,14 @@ CleanUp:
2610 2235
2611 kfree(buf); 2236 kfree(buf);
2612 2237
2613 WL_TRACE("Exit\n"); 2238 brcmf_dbg(TRACE, "Exit\n");
2614 2239
2615 return err; 2240 return err;
2616} 2241}
2617 2242
2618static bool brcmf_is_ibssmode(struct brcmf_cfg80211_info *cfg) 2243static bool brcmf_is_ibssmode(struct brcmf_cfg80211_vif *vif)
2619{ 2244{
2620 return cfg->conf->mode == WL_MODE_IBSS; 2245 return vif->mode == WL_MODE_IBSS;
2621} 2246}
2622 2247
2623/* 2248/*
@@ -2674,12 +2299,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
2674 return false; 2299 return false;
2675} 2300}
2676 2301
2677struct brcmf_vs_tlv * 2302static struct brcmf_vs_tlv *
2678brcmf_find_wpaie(u8 *parse, u32 len) 2303brcmf_find_wpaie(u8 *parse, u32 len)
2679{ 2304{
2680 struct brcmf_tlv *ie; 2305 struct brcmf_tlv *ie;
2681 2306
2682 while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_WPA))) { 2307 while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
2683 if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len, 2308 if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
2684 WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE)) 2309 WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
2685 return (struct brcmf_vs_tlv *)ie; 2310 return (struct brcmf_vs_tlv *)ie;
@@ -2689,7 +2314,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
2689 2314
2690static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg) 2315static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
2691{ 2316{
2692 struct brcmf_cfg80211_profile *profile = cfg->profile; 2317 struct net_device *ndev = cfg_to_ndev(cfg);
2318 struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev);
2319 struct brcmf_if *ifp = netdev_priv(ndev);
2693 struct brcmf_bss_info_le *bi; 2320 struct brcmf_bss_info_le *bi;
2694 struct brcmf_ssid *ssid; 2321 struct brcmf_ssid *ssid;
2695 struct brcmf_tlv *tim; 2322 struct brcmf_tlv *tim;
@@ -2699,17 +2326,17 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
2699 u8 *ie; 2326 u8 *ie;
2700 s32 err = 0; 2327 s32 err = 0;
2701 2328
2702 WL_TRACE("Enter\n"); 2329 brcmf_dbg(TRACE, "Enter\n");
2703 if (brcmf_is_ibssmode(cfg)) 2330 if (brcmf_is_ibssmode(ifp->vif))
2704 return err; 2331 return err;
2705 2332
2706 ssid = &profile->ssid; 2333 ssid = &profile->ssid;
2707 2334
2708 *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX); 2335 *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX);
2709 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCMF_C_GET_BSS_INFO, 2336 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
2710 cfg->extra_buf, WL_EXTRA_BUF_MAX); 2337 cfg->extra_buf, WL_EXTRA_BUF_MAX);
2711 if (err) { 2338 if (err) {
2712 WL_ERR("Could not get bss info %d\n", err); 2339 brcmf_err("Could not get bss info %d\n", err);
2713 goto update_bss_info_out; 2340 goto update_bss_info_out;
2714 } 2341 }
2715 2342
@@ -2732,252 +2359,30 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg)
2732 * so we speficially query dtim information to dongle. 2359 * so we speficially query dtim information to dongle.
2733 */ 2360 */
2734 u32 var; 2361 u32 var;
2735 err = brcmf_dev_intvar_get(cfg_to_ndev(cfg), 2362 err = brcmf_fil_iovar_int_get(ifp, "dtim_assoc", &var);
2736 "dtim_assoc", &var);
2737 if (err) { 2363 if (err) {
2738 WL_ERR("wl dtim_assoc failed (%d)\n", err); 2364 brcmf_err("wl dtim_assoc failed (%d)\n", err);
2739 goto update_bss_info_out; 2365 goto update_bss_info_out;
2740 } 2366 }
2741 dtim_period = (u8)var; 2367 dtim_period = (u8)var;
2742 } 2368 }
2743 2369
2744 profile->beacon_interval = beacon_interval;
2745 profile->dtim_period = dtim_period;
2746
2747update_bss_info_out: 2370update_bss_info_out:
2748 WL_TRACE("Exit"); 2371 brcmf_dbg(TRACE, "Exit");
2749 return err; 2372 return err;
2750} 2373}
2751 2374
2752static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg) 2375static void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
2753{ 2376{
2754 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2755 struct escan_info *escan = &cfg->escan_info; 2377 struct escan_info *escan = &cfg->escan_info;
2756 struct brcmf_ssid ssid;
2757
2758 set_bit(WL_STATUS_SCAN_ABORTING, &cfg->status);
2759 if (cfg->iscan_on) {
2760 iscan->state = WL_ISCAN_STATE_IDLE;
2761
2762 if (iscan->timer_on) {
2763 del_timer_sync(&iscan->timer);
2764 iscan->timer_on = 0;
2765 }
2766
2767 cancel_work_sync(&iscan->work);
2768 2378
2769 /* Abort iscan running in FW */ 2379 set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
2770 memset(&ssid, 0, sizeof(ssid)); 2380 if (cfg->scan_request) {
2771 brcmf_run_iscan(iscan, &ssid, WL_SCAN_ACTION_ABORT);
2772
2773 if (cfg->scan_request) {
2774 /* Indidate scan abort to cfg80211 layer */
2775 WL_INFO("Terminating scan in progress\n");
2776 cfg80211_scan_done(cfg->scan_request, true);
2777 cfg->scan_request = NULL;
2778 }
2779 }
2780 if (cfg->escan_on && cfg->scan_request) {
2781 escan->escan_state = WL_ESCAN_STATE_IDLE; 2381 escan->escan_state = WL_ESCAN_STATE_IDLE;
2782 brcmf_notify_escan_complete(cfg, escan->ndev, true, true); 2382 brcmf_notify_escan_complete(cfg, escan->ndev, true, true);
2783 } 2383 }
2784 clear_bit(WL_STATUS_SCANNING, &cfg->status); 2384 clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
2785 clear_bit(WL_STATUS_SCAN_ABORTING, &cfg->status); 2385 clear_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
2786}
2787
2788static void brcmf_notify_iscan_complete(struct brcmf_cfg80211_iscan_ctrl *iscan,
2789 bool aborted)
2790{
2791 struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
2792 struct net_device *ndev = cfg_to_ndev(cfg);
2793
2794 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
2795 WL_ERR("Scan complete while device not scanning\n");
2796 return;
2797 }
2798 if (cfg->scan_request) {
2799 WL_SCAN("ISCAN Completed scan: %s\n",
2800 aborted ? "Aborted" : "Done");
2801 cfg80211_scan_done(cfg->scan_request, aborted);
2802 brcmf_set_mpc(ndev, 1);
2803 cfg->scan_request = NULL;
2804 }
2805 cfg->iscan_kickstart = false;
2806}
2807
2808static s32 brcmf_wakeup_iscan(struct brcmf_cfg80211_iscan_ctrl *iscan)
2809{
2810 if (iscan->state != WL_ISCAN_STATE_IDLE) {
2811 WL_SCAN("wake up iscan\n");
2812 schedule_work(&iscan->work);
2813 return 0;
2814 }
2815
2816 return -EIO;
2817}
2818
2819static s32
2820brcmf_get_iscan_results(struct brcmf_cfg80211_iscan_ctrl *iscan, u32 *status,
2821 struct brcmf_scan_results **bss_list)
2822{
2823 struct brcmf_iscan_results list;
2824 struct brcmf_scan_results *results;
2825 struct brcmf_scan_results_le *results_le;
2826 struct brcmf_iscan_results *list_buf;
2827 s32 err = 0;
2828
2829 memset(iscan->scan_buf, 0, WL_ISCAN_BUF_MAX);
2830 list_buf = (struct brcmf_iscan_results *)iscan->scan_buf;
2831 results = &list_buf->results;
2832 results_le = &list_buf->results_le;
2833 results->buflen = BRCMF_ISCAN_RESULTS_FIXED_SIZE;
2834 results->version = 0;
2835 results->count = 0;
2836
2837 memset(&list, 0, sizeof(list));
2838 list.results_le.buflen = cpu_to_le32(WL_ISCAN_BUF_MAX);
2839 err = brcmf_dev_iovar_getbuf(iscan->ndev, "iscanresults", &list,
2840 BRCMF_ISCAN_RESULTS_FIXED_SIZE,
2841 iscan->scan_buf, WL_ISCAN_BUF_MAX);
2842 if (err) {
2843 WL_ERR("error (%d)\n", err);
2844 return err;
2845 }
2846 results->buflen = le32_to_cpu(results_le->buflen);
2847 results->version = le32_to_cpu(results_le->version);
2848 results->count = le32_to_cpu(results_le->count);
2849 WL_SCAN("results->count = %d\n", results_le->count);
2850 WL_SCAN("results->buflen = %d\n", results_le->buflen);
2851 *status = le32_to_cpu(list_buf->status_le);
2852 WL_SCAN("status = %d\n", *status);
2853 *bss_list = results;
2854
2855 return err;
2856}
2857
2858static s32 brcmf_iscan_done(struct brcmf_cfg80211_info *cfg)
2859{
2860 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2861 s32 err = 0;
2862
2863 iscan->state = WL_ISCAN_STATE_IDLE;
2864 brcmf_inform_bss(cfg);
2865 brcmf_notify_iscan_complete(iscan, false);
2866
2867 return err;
2868}
2869
2870static s32 brcmf_iscan_pending(struct brcmf_cfg80211_info *cfg)
2871{
2872 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2873 s32 err = 0;
2874
2875 /* Reschedule the timer */
2876 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
2877 iscan->timer_on = 1;
2878
2879 return err;
2880}
2881
2882static s32 brcmf_iscan_inprogress(struct brcmf_cfg80211_info *cfg)
2883{
2884 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2885 s32 err = 0;
2886
2887 brcmf_inform_bss(cfg);
2888 brcmf_run_iscan(iscan, NULL, BRCMF_SCAN_ACTION_CONTINUE);
2889 /* Reschedule the timer */
2890 mod_timer(&iscan->timer, jiffies + iscan->timer_ms * HZ / 1000);
2891 iscan->timer_on = 1;
2892
2893 return err;
2894}
2895
2896static s32 brcmf_iscan_aborted(struct brcmf_cfg80211_info *cfg)
2897{
2898 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg->iscan;
2899 s32 err = 0;
2900
2901 iscan->state = WL_ISCAN_STATE_IDLE;
2902 brcmf_notify_iscan_complete(iscan, true);
2903
2904 return err;
2905}
2906
2907static void brcmf_cfg80211_iscan_handler(struct work_struct *work)
2908{
2909 struct brcmf_cfg80211_iscan_ctrl *iscan =
2910 container_of(work, struct brcmf_cfg80211_iscan_ctrl,
2911 work);
2912 struct brcmf_cfg80211_info *cfg = iscan_to_cfg(iscan);
2913 struct brcmf_cfg80211_iscan_eloop *el = &iscan->el;
2914 u32 status = BRCMF_SCAN_RESULTS_PARTIAL;
2915
2916 if (iscan->timer_on) {
2917 del_timer_sync(&iscan->timer);
2918 iscan->timer_on = 0;
2919 }
2920
2921 if (brcmf_get_iscan_results(iscan, &status, &cfg->bss_list)) {
2922 status = BRCMF_SCAN_RESULTS_ABORTED;
2923 WL_ERR("Abort iscan\n");
2924 }
2925
2926 el->handler[status](cfg);
2927}
2928
2929static void brcmf_iscan_timer(unsigned long data)
2930{
2931 struct brcmf_cfg80211_iscan_ctrl *iscan =
2932 (struct brcmf_cfg80211_iscan_ctrl *)data;
2933
2934 if (iscan) {
2935 iscan->timer_on = 0;
2936 WL_SCAN("timer expired\n");
2937 brcmf_wakeup_iscan(iscan);
2938 }
2939}
2940
2941static s32 brcmf_invoke_iscan(struct brcmf_cfg80211_info *cfg)
2942{
2943 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2944
2945 if (cfg->iscan_on) {
2946 iscan->state = WL_ISCAN_STATE_IDLE;
2947 INIT_WORK(&iscan->work, brcmf_cfg80211_iscan_handler);
2948 }
2949
2950 return 0;
2951}
2952
2953static void brcmf_init_iscan_eloop(struct brcmf_cfg80211_iscan_eloop *el)
2954{
2955 memset(el, 0, sizeof(*el));
2956 el->handler[BRCMF_SCAN_RESULTS_SUCCESS] = brcmf_iscan_done;
2957 el->handler[BRCMF_SCAN_RESULTS_PARTIAL] = brcmf_iscan_inprogress;
2958 el->handler[BRCMF_SCAN_RESULTS_PENDING] = brcmf_iscan_pending;
2959 el->handler[BRCMF_SCAN_RESULTS_ABORTED] = brcmf_iscan_aborted;
2960 el->handler[BRCMF_SCAN_RESULTS_NO_MEM] = brcmf_iscan_aborted;
2961}
2962
2963static s32 brcmf_init_iscan(struct brcmf_cfg80211_info *cfg)
2964{
2965 struct brcmf_cfg80211_iscan_ctrl *iscan = cfg_to_iscan(cfg);
2966 int err = 0;
2967
2968 if (cfg->iscan_on) {
2969 iscan->ndev = cfg_to_ndev(cfg);
2970 brcmf_init_iscan_eloop(&iscan->el);
2971 iscan->timer_ms = WL_ISCAN_TIMER_INTERVAL_MS;
2972 init_timer(&iscan->timer);
2973 iscan->timer.data = (unsigned long) iscan;
2974 iscan->timer.function = brcmf_iscan_timer;
2975 err = brcmf_invoke_iscan(cfg);
2976 if (!err)
2977 iscan->data = cfg;
2978 }
2979
2980 return err;
2981} 2386}
2982 2387
2983static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work) 2388static void brcmf_cfg80211_escan_timeout_worker(struct work_struct *work)
@@ -2996,9 +2401,8 @@ static void brcmf_escan_timeout(unsigned long data)
2996 (struct brcmf_cfg80211_info *)data; 2401 (struct brcmf_cfg80211_info *)data;
2997 2402
2998 if (cfg->scan_request) { 2403 if (cfg->scan_request) {
2999 WL_ERR("timer expired\n"); 2404 brcmf_err("timer expired\n");
3000 if (cfg->escan_on) 2405 schedule_work(&cfg->escan_timeout_work);
3001 schedule_work(&cfg->escan_timeout_work);
3002 } 2406 }
3003} 2407}
3004 2408
@@ -3035,10 +2439,11 @@ brcmf_compare_update_same_bss(struct brcmf_bss_info_le *bss,
3035} 2439}
3036 2440
3037static s32 2441static s32
3038brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg, 2442brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
3039 struct net_device *ndev,
3040 const struct brcmf_event_msg *e, void *data) 2443 const struct brcmf_event_msg *e, void *data)
3041{ 2444{
2445 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
2446 struct net_device *ndev = ifp->ndev;
3042 s32 status; 2447 s32 status;
3043 s32 err = 0; 2448 s32 err = 0;
3044 struct brcmf_escan_result_le *escan_result_le; 2449 struct brcmf_escan_result_le *escan_result_le;
@@ -3049,31 +2454,29 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
3049 u32 i; 2454 u32 i;
3050 bool aborted; 2455 bool aborted;
3051 2456
3052 status = be32_to_cpu(e->status); 2457 status = e->status;
3053 2458
3054 if (!ndev || !cfg->escan_on || 2459 if (!ndev || !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
3055 !test_bit(WL_STATUS_SCANNING, &cfg->status)) { 2460 brcmf_err("scan not ready ndev %p drv_status %x\n", ndev,
3056 WL_ERR("scan not ready ndev %p wl->escan_on %d drv_status %x\n", 2461 !test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status));
3057 ndev, cfg->escan_on,
3058 !test_bit(WL_STATUS_SCANNING, &cfg->status));
3059 return -EPERM; 2462 return -EPERM;
3060 } 2463 }
3061 2464
3062 if (status == BRCMF_E_STATUS_PARTIAL) { 2465 if (status == BRCMF_E_STATUS_PARTIAL) {
3063 WL_SCAN("ESCAN Partial result\n"); 2466 brcmf_dbg(SCAN, "ESCAN Partial result\n");
3064 escan_result_le = (struct brcmf_escan_result_le *) data; 2467 escan_result_le = (struct brcmf_escan_result_le *) data;
3065 if (!escan_result_le) { 2468 if (!escan_result_le) {
3066 WL_ERR("Invalid escan result (NULL pointer)\n"); 2469 brcmf_err("Invalid escan result (NULL pointer)\n");
3067 goto exit; 2470 goto exit;
3068 } 2471 }
3069 if (!cfg->scan_request) { 2472 if (!cfg->scan_request) {
3070 WL_SCAN("result without cfg80211 request\n"); 2473 brcmf_dbg(SCAN, "result without cfg80211 request\n");
3071 goto exit; 2474 goto exit;
3072 } 2475 }
3073 2476
3074 if (le16_to_cpu(escan_result_le->bss_count) != 1) { 2477 if (le16_to_cpu(escan_result_le->bss_count) != 1) {
3075 WL_ERR("Invalid bss_count %d: ignoring\n", 2478 brcmf_err("Invalid bss_count %d: ignoring\n",
3076 escan_result_le->bss_count); 2479 escan_result_le->bss_count);
3077 goto exit; 2480 goto exit;
3078 } 2481 }
3079 bss_info_le = &escan_result_le->bss_info_le; 2482 bss_info_le = &escan_result_le->bss_info_le;
@@ -3081,8 +2484,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
3081 bi_length = le32_to_cpu(bss_info_le->length); 2484 bi_length = le32_to_cpu(bss_info_le->length);
3082 if (bi_length != (le32_to_cpu(escan_result_le->buflen) - 2485 if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
3083 WL_ESCAN_RESULTS_FIXED_SIZE)) { 2486 WL_ESCAN_RESULTS_FIXED_SIZE)) {
3084 WL_ERR("Invalid bss_info length %d: ignoring\n", 2487 brcmf_err("Invalid bss_info length %d: ignoring\n",
3085 bi_length); 2488 bi_length);
3086 goto exit; 2489 goto exit;
3087 } 2490 }
3088 2491
@@ -3090,7 +2493,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
3090 BIT(NL80211_IFTYPE_ADHOC))) { 2493 BIT(NL80211_IFTYPE_ADHOC))) {
3091 if (le16_to_cpu(bss_info_le->capability) & 2494 if (le16_to_cpu(bss_info_le->capability) &
3092 WLAN_CAPABILITY_IBSS) { 2495 WLAN_CAPABILITY_IBSS) {
3093 WL_ERR("Ignoring IBSS result\n"); 2496 brcmf_err("Ignoring IBSS result\n");
3094 goto exit; 2497 goto exit;
3095 } 2498 }
3096 } 2499 }
@@ -3098,7 +2501,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
3098 list = (struct brcmf_scan_results *) 2501 list = (struct brcmf_scan_results *)
3099 cfg->escan_info.escan_buf; 2502 cfg->escan_info.escan_buf;
3100 if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) { 2503 if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) {
3101 WL_ERR("Buffer is too small: ignoring\n"); 2504 brcmf_err("Buffer is too small: ignoring\n");
3102 goto exit; 2505 goto exit;
3103 } 2506 }
3104 2507
@@ -3124,7 +2527,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_cfg80211_info *cfg,
3124 brcmf_notify_escan_complete(cfg, ndev, aborted, 2527 brcmf_notify_escan_complete(cfg, ndev, aborted,
3125 false); 2528 false);
3126 } else 2529 } else
3127 WL_ERR("Unexpected scan result 0x%x\n", status); 2530 brcmf_err("Unexpected scan result 0x%x\n", status);
3128 } 2531 }
3129exit: 2532exit:
3130 return err; 2533 return err;
@@ -3132,18 +2535,15 @@ exit:
3132 2535
3133static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg) 2536static void brcmf_init_escan(struct brcmf_cfg80211_info *cfg)
3134{ 2537{
3135 2538 brcmf_fweh_register(cfg->pub, BRCMF_E_ESCAN_RESULT,
3136 if (cfg->escan_on) { 2539 brcmf_cfg80211_escan_handler);
3137 cfg->el.handler[BRCMF_E_ESCAN_RESULT] = 2540 cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
3138 brcmf_cfg80211_escan_handler; 2541 /* Init scan_timeout timer */
3139 cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; 2542 init_timer(&cfg->escan_timeout);
3140 /* Init scan_timeout timer */ 2543 cfg->escan_timeout.data = (unsigned long) cfg;
3141 init_timer(&cfg->escan_timeout); 2544 cfg->escan_timeout.function = brcmf_escan_timeout;
3142 cfg->escan_timeout.data = (unsigned long) cfg; 2545 INIT_WORK(&cfg->escan_timeout_work,
3143 cfg->escan_timeout.function = brcmf_escan_timeout; 2546 brcmf_cfg80211_escan_timeout_worker);
3144 INIT_WORK(&cfg->escan_timeout_work,
3145 brcmf_cfg80211_escan_timeout_worker);
3146 }
3147} 2547}
3148 2548
3149static __always_inline void brcmf_delay(u32 ms) 2549static __always_inline void brcmf_delay(u32 ms)
@@ -3158,19 +2558,8 @@ static __always_inline void brcmf_delay(u32 ms)
3158 2558
3159static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) 2559static s32 brcmf_cfg80211_resume(struct wiphy *wiphy)
3160{ 2560{
3161 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 2561 brcmf_dbg(TRACE, "Enter\n");
3162
3163 /*
3164 * Check for WL_STATUS_READY before any function call which
3165 * could result is bus access. Don't block the resume for
3166 * any driver error conditions
3167 */
3168 WL_TRACE("Enter\n");
3169 2562
3170 if (test_bit(WL_STATUS_READY, &cfg->status))
3171 brcmf_invoke_iscan(wiphy_to_cfg(wiphy));
3172
3173 WL_TRACE("Exit\n");
3174 return 0; 2563 return 0;
3175} 2564}
3176 2565
@@ -3179,85 +2568,49 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
3179{ 2568{
3180 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 2569 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3181 struct net_device *ndev = cfg_to_ndev(cfg); 2570 struct net_device *ndev = cfg_to_ndev(cfg);
2571 struct brcmf_cfg80211_vif *vif;
3182 2572
3183 WL_TRACE("Enter\n"); 2573 brcmf_dbg(TRACE, "Enter\n");
3184 2574
3185 /* 2575 /*
3186 * Check for WL_STATUS_READY before any function call which 2576 * if the primary net_device is not READY there is nothing
3187 * could result is bus access. Don't block the suspend for 2577 * we can do but pray resume goes smoothly.
3188 * any driver error conditions
3189 */ 2578 */
2579 vif = ((struct brcmf_if *)netdev_priv(ndev))->vif;
2580 if (!check_vif_up(vif))
2581 goto exit;
3190 2582
3191 /* 2583 list_for_each_entry(vif, &cfg->vif_list, list) {
3192 * While going to suspend if associated with AP disassociate 2584 if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state))
3193 * from AP to save power while system is in suspended state 2585 continue;
3194 */
3195 if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) ||
3196 test_bit(WL_STATUS_CONNECTING, &cfg->status)) &&
3197 test_bit(WL_STATUS_READY, &cfg->status)) {
3198 WL_INFO("Disassociating from AP"
3199 " while entering suspend state\n");
3200 brcmf_link_down(cfg);
3201
3202 /* 2586 /*
3203 * Make sure WPA_Supplicant receives all the event 2587 * While going to suspend if associated with AP disassociate
2588 * from AP to save power while system is in suspended state
2589 */
2590 brcmf_link_down(vif);
2591
2592 /* Make sure WPA_Supplicant receives all the event
3204 * generated due to DISASSOC call to the fw to keep 2593 * generated due to DISASSOC call to the fw to keep
3205 * the state fw and WPA_Supplicant state consistent 2594 * the state fw and WPA_Supplicant state consistent
3206 */ 2595 */
3207 brcmf_delay(500); 2596 brcmf_delay(500);
3208 } 2597 }
3209 2598
3210 if (test_bit(WL_STATUS_READY, &cfg->status)) 2599 /* end any scanning */
2600 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status))
3211 brcmf_abort_scanning(cfg); 2601 brcmf_abort_scanning(cfg);
3212 else
3213 clear_bit(WL_STATUS_SCANNING, &cfg->status);
3214 2602
3215 /* Turn off watchdog timer */ 2603 /* Turn off watchdog timer */
3216 if (test_bit(WL_STATUS_READY, &cfg->status)) 2604 brcmf_set_mpc(ndev, 1);
3217 brcmf_set_mpc(ndev, 1);
3218
3219 WL_TRACE("Exit\n");
3220 2605
2606exit:
2607 brcmf_dbg(TRACE, "Exit\n");
2608 /* clear any scanning activity */
2609 cfg->scan_status = 0;
3221 return 0; 2610 return 0;
3222} 2611}
3223 2612
3224static __used s32 2613static __used s32
3225brcmf_dev_bufvar_set(struct net_device *ndev, s8 *name, s8 *buf, s32 len)
3226{
3227 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
3228 u32 buflen;
3229
3230 buflen = brcmf_c_mkiovar(name, buf, len, cfg->dcmd_buf,
3231 WL_DCMD_LEN_MAX);
3232 BUG_ON(!buflen);
3233
3234 return brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, cfg->dcmd_buf,
3235 buflen);
3236}
3237
3238static s32
3239brcmf_dev_bufvar_get(struct net_device *ndev, s8 *name, s8 *buf,
3240 s32 buf_len)
3241{
3242 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
3243 u32 len;
3244 s32 err = 0;
3245
3246 len = brcmf_c_mkiovar(name, NULL, 0, cfg->dcmd_buf,
3247 WL_DCMD_LEN_MAX);
3248 BUG_ON(!len);
3249 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, cfg->dcmd_buf,
3250 WL_DCMD_LEN_MAX);
3251 if (err) {
3252 WL_ERR("error (%d)\n", err);
3253 return err;
3254 }
3255 memcpy(buf, cfg->dcmd_buf, buf_len);
3256
3257 return err;
3258}
3259
3260static __used s32
3261brcmf_update_pmklist(struct net_device *ndev, 2614brcmf_update_pmklist(struct net_device *ndev,
3262 struct brcmf_cfg80211_pmk_list *pmk_list, s32 err) 2615 struct brcmf_cfg80211_pmk_list *pmk_list, s32 err)
3263{ 2616{
@@ -3266,17 +2619,18 @@ brcmf_update_pmklist(struct net_device *ndev,
3266 2619
3267 pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid); 2620 pmkid_len = le32_to_cpu(pmk_list->pmkids.npmkid);
3268 2621
3269 WL_CONN("No of elements %d\n", pmkid_len); 2622 brcmf_dbg(CONN, "No of elements %d\n", pmkid_len);
3270 for (i = 0; i < pmkid_len; i++) { 2623 for (i = 0; i < pmkid_len; i++) {
3271 WL_CONN("PMKID[%d]: %pM =\n", i, 2624 brcmf_dbg(CONN, "PMKID[%d]: %pM =\n", i,
3272 &pmk_list->pmkids.pmkid[i].BSSID); 2625 &pmk_list->pmkids.pmkid[i].BSSID);
3273 for (j = 0; j < WLAN_PMKID_LEN; j++) 2626 for (j = 0; j < WLAN_PMKID_LEN; j++)
3274 WL_CONN("%02x\n", pmk_list->pmkids.pmkid[i].PMKID[j]); 2627 brcmf_dbg(CONN, "%02x\n",
2628 pmk_list->pmkids.pmkid[i].PMKID[j]);
3275 } 2629 }
3276 2630
3277 if (!err) 2631 if (!err)
3278 brcmf_dev_bufvar_set(ndev, "pmkid_info", (char *)pmk_list, 2632 brcmf_fil_iovar_data_set(netdev_priv(ndev), "pmkid_info",
3279 sizeof(*pmk_list)); 2633 (char *)pmk_list, sizeof(*pmk_list));
3280 2634
3281 return err; 2635 return err;
3282} 2636}
@@ -3286,13 +2640,14 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
3286 struct cfg80211_pmksa *pmksa) 2640 struct cfg80211_pmksa *pmksa)
3287{ 2641{
3288 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 2642 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2643 struct brcmf_if *ifp = netdev_priv(ndev);
3289 struct pmkid_list *pmkids = &cfg->pmk_list->pmkids; 2644 struct pmkid_list *pmkids = &cfg->pmk_list->pmkids;
3290 s32 err = 0; 2645 s32 err = 0;
3291 int i; 2646 int i;
3292 int pmkid_len; 2647 int pmkid_len;
3293 2648
3294 WL_TRACE("Enter\n"); 2649 brcmf_dbg(TRACE, "Enter\n");
3295 if (!check_sys_up(wiphy)) 2650 if (!check_vif_up(ifp->vif))
3296 return -EIO; 2651 return -EIO;
3297 2652
3298 pmkid_len = le32_to_cpu(pmkids->npmkid); 2653 pmkid_len = le32_to_cpu(pmkids->npmkid);
@@ -3309,14 +2664,14 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
3309 } else 2664 } else
3310 err = -EINVAL; 2665 err = -EINVAL;
3311 2666
3312 WL_CONN("set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n", 2667 brcmf_dbg(CONN, "set_pmksa,IW_PMKSA_ADD - PMKID: %pM =\n",
3313 pmkids->pmkid[pmkid_len].BSSID); 2668 pmkids->pmkid[pmkid_len].BSSID);
3314 for (i = 0; i < WLAN_PMKID_LEN; i++) 2669 for (i = 0; i < WLAN_PMKID_LEN; i++)
3315 WL_CONN("%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]); 2670 brcmf_dbg(CONN, "%02x\n", pmkids->pmkid[pmkid_len].PMKID[i]);
3316 2671
3317 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err); 2672 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
3318 2673
3319 WL_TRACE("Exit\n"); 2674 brcmf_dbg(TRACE, "Exit\n");
3320 return err; 2675 return err;
3321} 2676}
3322 2677
@@ -3325,21 +2680,22 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
3325 struct cfg80211_pmksa *pmksa) 2680 struct cfg80211_pmksa *pmksa)
3326{ 2681{
3327 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 2682 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2683 struct brcmf_if *ifp = netdev_priv(ndev);
3328 struct pmkid_list pmkid; 2684 struct pmkid_list pmkid;
3329 s32 err = 0; 2685 s32 err = 0;
3330 int i, pmkid_len; 2686 int i, pmkid_len;
3331 2687
3332 WL_TRACE("Enter\n"); 2688 brcmf_dbg(TRACE, "Enter\n");
3333 if (!check_sys_up(wiphy)) 2689 if (!check_vif_up(ifp->vif))
3334 return -EIO; 2690 return -EIO;
3335 2691
3336 memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN); 2692 memcpy(&pmkid.pmkid[0].BSSID, pmksa->bssid, ETH_ALEN);
3337 memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN); 2693 memcpy(&pmkid.pmkid[0].PMKID, pmksa->pmkid, WLAN_PMKID_LEN);
3338 2694
3339 WL_CONN("del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n", 2695 brcmf_dbg(CONN, "del_pmksa,IW_PMKSA_REMOVE - PMKID: %pM =\n",
3340 &pmkid.pmkid[0].BSSID); 2696 &pmkid.pmkid[0].BSSID);
3341 for (i = 0; i < WLAN_PMKID_LEN; i++) 2697 for (i = 0; i < WLAN_PMKID_LEN; i++)
3342 WL_CONN("%02x\n", pmkid.pmkid[0].PMKID[i]); 2698 brcmf_dbg(CONN, "%02x\n", pmkid.pmkid[0].PMKID[i]);
3343 2699
3344 pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid); 2700 pmkid_len = le32_to_cpu(cfg->pmk_list->pmkids.npmkid);
3345 for (i = 0; i < pmkid_len; i++) 2701 for (i = 0; i < pmkid_len; i++)
@@ -3366,7 +2722,7 @@ brcmf_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev,
3366 2722
3367 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err); 2723 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
3368 2724
3369 WL_TRACE("Exit\n"); 2725 brcmf_dbg(TRACE, "Exit\n");
3370 return err; 2726 return err;
3371 2727
3372} 2728}
@@ -3375,16 +2731,17 @@ static s32
3375brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) 2731brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
3376{ 2732{
3377 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 2733 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
2734 struct brcmf_if *ifp = netdev_priv(ndev);
3378 s32 err = 0; 2735 s32 err = 0;
3379 2736
3380 WL_TRACE("Enter\n"); 2737 brcmf_dbg(TRACE, "Enter\n");
3381 if (!check_sys_up(wiphy)) 2738 if (!check_vif_up(ifp->vif))
3382 return -EIO; 2739 return -EIO;
3383 2740
3384 memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list)); 2741 memset(cfg->pmk_list, 0, sizeof(*cfg->pmk_list));
3385 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err); 2742 err = brcmf_update_pmklist(ndev, cfg->pmk_list, err);
3386 2743
3387 WL_TRACE("Exit\n"); 2744 brcmf_dbg(TRACE, "Exit\n");
3388 return err; 2745 return err;
3389 2746
3390} 2747}
@@ -3398,10 +2755,11 @@ brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev)
3398 * cfg80211_scan_request one out of the received PNO event. 2755 * cfg80211_scan_request one out of the received PNO event.
3399 */ 2756 */
3400static s32 2757static s32
3401brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg, 2758brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
3402 struct net_device *ndev,
3403 const struct brcmf_event_msg *e, void *data) 2759 const struct brcmf_event_msg *e, void *data)
3404{ 2760{
2761 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
2762 struct net_device *ndev = ifp->ndev;
3405 struct brcmf_pno_net_info_le *netinfo, *netinfo_start; 2763 struct brcmf_pno_net_info_le *netinfo, *netinfo_start;
3406 struct cfg80211_scan_request *request = NULL; 2764 struct cfg80211_scan_request *request = NULL;
3407 struct cfg80211_ssid *ssid = NULL; 2765 struct cfg80211_ssid *ssid = NULL;
@@ -3414,10 +2772,10 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
3414 u32 result_count; 2772 u32 result_count;
3415 u32 status; 2773 u32 status;
3416 2774
3417 WL_SCAN("Enter\n"); 2775 brcmf_dbg(SCAN, "Enter\n");
3418 2776
3419 if (e->event_type == cpu_to_be32(BRCMF_E_PFN_NET_LOST)) { 2777 if (e->event_code == BRCMF_E_PFN_NET_LOST) {
3420 WL_SCAN("PFN NET LOST event. Do Nothing\n"); 2778 brcmf_dbg(SCAN, "PFN NET LOST event. Do Nothing\n");
3421 return 0; 2779 return 0;
3422 } 2780 }
3423 2781
@@ -3430,7 +2788,7 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
3430 * multiple NET_FOUND events. For now place a warning here. 2788 * multiple NET_FOUND events. For now place a warning here.
3431 */ 2789 */
3432 WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE); 2790 WARN_ON(status != BRCMF_PNO_SCAN_COMPLETE);
3433 WL_SCAN("PFN NET FOUND event. count: %d\n", result_count); 2791 brcmf_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count);
3434 if (result_count > 0) { 2792 if (result_count > 0) {
3435 int i; 2793 int i;
3436 2794
@@ -3449,13 +2807,14 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
3449 for (i = 0; i < result_count; i++) { 2807 for (i = 0; i < result_count; i++) {
3450 netinfo = &netinfo_start[i]; 2808 netinfo = &netinfo_start[i];
3451 if (!netinfo) { 2809 if (!netinfo) {
3452 WL_ERR("Invalid netinfo ptr. index: %d\n", i); 2810 brcmf_err("Invalid netinfo ptr. index: %d\n",
2811 i);
3453 err = -EINVAL; 2812 err = -EINVAL;
3454 goto out_err; 2813 goto out_err;
3455 } 2814 }
3456 2815
3457 WL_SCAN("SSID:%s Channel:%d\n", 2816 brcmf_dbg(SCAN, "SSID:%s Channel:%d\n",
3458 netinfo->SSID, netinfo->channel); 2817 netinfo->SSID, netinfo->channel);
3459 memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len); 2818 memcpy(ssid[i].ssid, netinfo->SSID, netinfo->SSID_len);
3460 ssid[i].ssid_len = netinfo->SSID_len; 2819 ssid[i].ssid_len = netinfo->SSID_len;
3461 request->n_ssids++; 2820 request->n_ssids++;
@@ -3478,21 +2837,21 @@ brcmf_notify_sched_scan_results(struct brcmf_cfg80211_info *cfg,
3478 if (request->n_ssids) 2837 if (request->n_ssids)
3479 request->ssids = &ssid[0]; 2838 request->ssids = &ssid[0];
3480 2839
3481 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) { 2840 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
3482 /* Abort any on-going scan */ 2841 /* Abort any on-going scan */
3483 brcmf_abort_scanning(cfg); 2842 brcmf_abort_scanning(cfg);
3484 } 2843 }
3485 2844
3486 set_bit(WL_STATUS_SCANNING, &cfg->status); 2845 set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
3487 err = brcmf_do_escan(cfg, wiphy, ndev, request); 2846 err = brcmf_do_escan(cfg, wiphy, ndev, request);
3488 if (err) { 2847 if (err) {
3489 clear_bit(WL_STATUS_SCANNING, &cfg->status); 2848 clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
3490 goto out_err; 2849 goto out_err;
3491 } 2850 }
3492 cfg->sched_escan = true; 2851 cfg->sched_escan = true;
3493 cfg->scan_request = request; 2852 cfg->scan_request = request;
3494 } else { 2853 } else {
3495 WL_ERR("FALSE PNO Event. (pfn_count == 0)\n"); 2854 brcmf_err("FALSE PNO Event. (pfn_count == 0)\n");
3496 goto out_err; 2855 goto out_err;
3497 } 2856 }
3498 2857
@@ -3509,21 +2868,19 @@ out_err:
3509 return err; 2868 return err;
3510} 2869}
3511 2870
3512#ifndef CONFIG_BRCMISCAN
3513static int brcmf_dev_pno_clean(struct net_device *ndev) 2871static int brcmf_dev_pno_clean(struct net_device *ndev)
3514{ 2872{
3515 char iovbuf[128];
3516 int ret; 2873 int ret;
3517 2874
3518 /* Disable pfn */ 2875 /* Disable pfn */
3519 ret = brcmf_dev_intvar_set(ndev, "pfn", 0); 2876 ret = brcmf_fil_iovar_int_set(netdev_priv(ndev), "pfn", 0);
3520 if (ret == 0) { 2877 if (ret == 0) {
3521 /* clear pfn */ 2878 /* clear pfn */
3522 ret = brcmf_dev_iovar_setbuf(ndev, "pfnclear", NULL, 0, 2879 ret = brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfnclear",
3523 iovbuf, sizeof(iovbuf)); 2880 NULL, 0);
3524 } 2881 }
3525 if (ret < 0) 2882 if (ret < 0)
3526 WL_ERR("failed code %d\n", ret); 2883 brcmf_err("failed code %d\n", ret);
3527 2884
3528 return ret; 2885 return ret;
3529} 2886}
@@ -3531,7 +2888,6 @@ static int brcmf_dev_pno_clean(struct net_device *ndev)
3531static int brcmf_dev_pno_config(struct net_device *ndev) 2888static int brcmf_dev_pno_config(struct net_device *ndev)
3532{ 2889{
3533 struct brcmf_pno_param_le pfn_param; 2890 struct brcmf_pno_param_le pfn_param;
3534 char iovbuf[128];
3535 2891
3536 memset(&pfn_param, 0, sizeof(pfn_param)); 2892 memset(&pfn_param, 0, sizeof(pfn_param));
3537 pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION); 2893 pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
@@ -3544,9 +2900,8 @@ static int brcmf_dev_pno_config(struct net_device *ndev)
3544 /* set up pno scan fr */ 2900 /* set up pno scan fr */
3545 pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME); 2901 pfn_param.scan_freq = cpu_to_le32(BRCMF_PNO_TIME);
3546 2902
3547 return brcmf_dev_iovar_setbuf(ndev, "pfn_set", 2903 return brcmf_fil_iovar_data_set(netdev_priv(ndev), "pfn_set",
3548 &pfn_param, sizeof(pfn_param), 2904 &pfn_param, sizeof(pfn_param));
3549 iovbuf, sizeof(iovbuf));
3550} 2905}
3551 2906
3552static int 2907static int
@@ -3554,30 +2909,30 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3554 struct net_device *ndev, 2909 struct net_device *ndev,
3555 struct cfg80211_sched_scan_request *request) 2910 struct cfg80211_sched_scan_request *request)
3556{ 2911{
3557 char iovbuf[128]; 2912 struct brcmf_if *ifp = netdev_priv(ndev);
3558 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); 2913 struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
3559 struct brcmf_pno_net_param_le pfn; 2914 struct brcmf_pno_net_param_le pfn;
3560 int i; 2915 int i;
3561 int ret = 0; 2916 int ret = 0;
3562 2917
3563 WL_SCAN("Enter n_match_sets:%d n_ssids:%d\n", 2918 brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
3564 request->n_match_sets, request->n_ssids); 2919 request->n_match_sets, request->n_ssids);
3565 if (test_bit(WL_STATUS_SCANNING, &cfg->status)) { 2920 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
3566 WL_ERR("Scanning already : status (%lu)\n", cfg->status); 2921 brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
3567 return -EAGAIN; 2922 return -EAGAIN;
3568 } 2923 }
3569 2924
3570 if (!request || !request->n_ssids || !request->n_match_sets) { 2925 if (!request || !request->n_ssids || !request->n_match_sets) {
3571 WL_ERR("Invalid sched scan req!! n_ssids:%d\n", 2926 brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
3572 request ? request->n_ssids : 0); 2927 request ? request->n_ssids : 0);
3573 return -EINVAL; 2928 return -EINVAL;
3574 } 2929 }
3575 2930
3576 if (request->n_ssids > 0) { 2931 if (request->n_ssids > 0) {
3577 for (i = 0; i < request->n_ssids; i++) { 2932 for (i = 0; i < request->n_ssids; i++) {
3578 /* Active scan req for ssids */ 2933 /* Active scan req for ssids */
3579 WL_SCAN(">>> Active scan req for ssid (%s)\n", 2934 brcmf_dbg(SCAN, ">>> Active scan req for ssid (%s)\n",
3580 request->ssids[i].ssid); 2935 request->ssids[i].ssid);
3581 2936
3582 /* 2937 /*
3583 * match_set ssids is a supert set of n_ssid list, 2938 * match_set ssids is a supert set of n_ssid list,
@@ -3590,14 +2945,14 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3590 /* clean up everything */ 2945 /* clean up everything */
3591 ret = brcmf_dev_pno_clean(ndev); 2946 ret = brcmf_dev_pno_clean(ndev);
3592 if (ret < 0) { 2947 if (ret < 0) {
3593 WL_ERR("failed error=%d\n", ret); 2948 brcmf_err("failed error=%d\n", ret);
3594 return ret; 2949 return ret;
3595 } 2950 }
3596 2951
3597 /* configure pno */ 2952 /* configure pno */
3598 ret = brcmf_dev_pno_config(ndev); 2953 ret = brcmf_dev_pno_config(ndev);
3599 if (ret < 0) { 2954 if (ret < 0) {
3600 WL_ERR("PNO setup failed!! ret=%d\n", ret); 2955 brcmf_err("PNO setup failed!! ret=%d\n", ret);
3601 return -EINVAL; 2956 return -EINVAL;
3602 } 2957 }
3603 2958
@@ -3610,7 +2965,7 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3610 ssid_len = ssid->ssid_len; 2965 ssid_len = ssid->ssid_len;
3611 2966
3612 if (!ssid_len) { 2967 if (!ssid_len) {
3613 WL_ERR("skip broadcast ssid\n"); 2968 brcmf_err("skip broadcast ssid\n");
3614 continue; 2969 continue;
3615 } 2970 }
3616 pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN); 2971 pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
@@ -3620,16 +2975,14 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3620 pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT); 2975 pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
3621 pfn.ssid.SSID_len = cpu_to_le32(ssid_len); 2976 pfn.ssid.SSID_len = cpu_to_le32(ssid_len);
3622 memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len); 2977 memcpy(pfn.ssid.SSID, ssid->ssid, ssid_len);
3623 ret = brcmf_dev_iovar_setbuf(ndev, "pfn_add", 2978 ret = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn,
3624 &pfn, sizeof(pfn), 2979 sizeof(pfn));
3625 iovbuf, sizeof(iovbuf)); 2980 brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
3626 WL_SCAN(">>> PNO filter %s for ssid (%s)\n", 2981 ret == 0 ? "set" : "failed", ssid->ssid);
3627 ret == 0 ? "set" : "failed",
3628 ssid->ssid);
3629 } 2982 }
3630 /* Enable the PNO */ 2983 /* Enable the PNO */
3631 if (brcmf_dev_intvar_set(ndev, "pfn", 1) < 0) { 2984 if (brcmf_fil_iovar_int_set(ifp, "pfn", 1) < 0) {
3632 WL_ERR("PNO enable failed!! ret=%d\n", ret); 2985 brcmf_err("PNO enable failed!! ret=%d\n", ret);
3633 return -EINVAL; 2986 return -EINVAL;
3634 } 2987 }
3635 } else { 2988 } else {
@@ -3644,24 +2997,31 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
3644{ 2997{
3645 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 2998 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3646 2999
3647 WL_SCAN("enter\n"); 3000 brcmf_dbg(SCAN, "enter\n");
3648 brcmf_dev_pno_clean(ndev); 3001 brcmf_dev_pno_clean(ndev);
3649 if (cfg->sched_escan) 3002 if (cfg->sched_escan)
3650 brcmf_notify_escan_complete(cfg, ndev, true, true); 3003 brcmf_notify_escan_complete(cfg, ndev, true, true);
3651 return 0; 3004 return 0;
3652} 3005}
3653#endif /* CONFIG_BRCMISCAN */
3654 3006
3655#ifdef CONFIG_NL80211_TESTMODE 3007#ifdef CONFIG_NL80211_TESTMODE
3656static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len) 3008static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
3657{ 3009{
3658 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 3010 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
3659 struct net_device *ndev = cfg->wdev->netdev; 3011 struct net_device *ndev = cfg_to_ndev(cfg);
3660 struct brcmf_dcmd *dcmd = data; 3012 struct brcmf_dcmd *dcmd = data;
3661 struct sk_buff *reply; 3013 struct sk_buff *reply;
3662 int ret; 3014 int ret;
3663 3015
3664 ret = brcmf_netlink_dcmd(ndev, dcmd); 3016 brcmf_dbg(TRACE, "cmd %x set %d buf %p len %d\n", dcmd->cmd, dcmd->set,
3017 dcmd->buf, dcmd->len);
3018
3019 if (dcmd->set)
3020 ret = brcmf_fil_cmd_data_set(netdev_priv(ndev), dcmd->cmd,
3021 dcmd->buf, dcmd->len);
3022 else
3023 ret = brcmf_fil_cmd_data_get(netdev_priv(ndev), dcmd->cmd,
3024 dcmd->buf, dcmd->len);
3665 if (ret == 0) { 3025 if (ret == 0) {
3666 reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd)); 3026 reply = cfg80211_testmode_alloc_reply_skb(wiphy, sizeof(*dcmd));
3667 nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd); 3027 nla_put(reply, NL80211_ATTR_TESTDATA, sizeof(*dcmd), dcmd);
@@ -3673,25 +3033,25 @@ static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
3673 3033
3674static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx) 3034static s32 brcmf_configure_opensecurity(struct net_device *ndev, s32 bssidx)
3675{ 3035{
3036 struct brcmf_if *ifp = netdev_priv(ndev);
3676 s32 err; 3037 s32 err;
3677 3038
3678 /* set auth */ 3039 /* set auth */
3679 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", 0, bssidx); 3040 err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0);
3680 if (err < 0) { 3041 if (err < 0) {
3681 WL_ERR("auth error %d\n", err); 3042 brcmf_err("auth error %d\n", err);
3682 return err; 3043 return err;
3683 } 3044 }
3684 /* set wsec */ 3045 /* set wsec */
3685 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", 0, bssidx); 3046 err = brcmf_fil_bsscfg_int_set(ifp, "wsec", 0);
3686 if (err < 0) { 3047 if (err < 0) {
3687 WL_ERR("wsec error %d\n", err); 3048 brcmf_err("wsec error %d\n", err);
3688 return err; 3049 return err;
3689 } 3050 }
3690 /* set upper-layer auth */ 3051 /* set upper-layer auth */
3691 err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", 3052 err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", WPA_AUTH_NONE);
3692 WPA_AUTH_NONE, bssidx);
3693 if (err < 0) { 3053 if (err < 0) {
3694 WL_ERR("wpa_auth error %d\n", err); 3054 brcmf_err("wpa_auth error %d\n", err);
3695 return err; 3055 return err;
3696 } 3056 }
3697 3057
@@ -3708,8 +3068,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
3708 3068
3709static s32 3069static s32
3710brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie, 3070brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3711 bool is_rsn_ie, s32 bssidx) 3071 bool is_rsn_ie)
3712{ 3072{
3073 struct brcmf_if *ifp = netdev_priv(ndev);
3713 u32 auth = 0; /* d11 open authentication */ 3074 u32 auth = 0; /* d11 open authentication */
3714 u16 count; 3075 u16 count;
3715 s32 err = 0; 3076 s32 err = 0;
@@ -3724,7 +3085,7 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3724 u16 rsn_cap; 3085 u16 rsn_cap;
3725 u32 wme_bss_disable; 3086 u32 wme_bss_disable;
3726 3087
3727 WL_TRACE("Enter\n"); 3088 brcmf_dbg(TRACE, "Enter\n");
3728 if (wpa_ie == NULL) 3089 if (wpa_ie == NULL)
3729 goto exit; 3090 goto exit;
3730 3091
@@ -3738,13 +3099,13 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3738 /* check for multicast cipher suite */ 3099 /* check for multicast cipher suite */
3739 if (offset + WPA_IE_MIN_OUI_LEN > len) { 3100 if (offset + WPA_IE_MIN_OUI_LEN > len) {
3740 err = -EINVAL; 3101 err = -EINVAL;
3741 WL_ERR("no multicast cipher suite\n"); 3102 brcmf_err("no multicast cipher suite\n");
3742 goto exit; 3103 goto exit;
3743 } 3104 }
3744 3105
3745 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { 3106 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3746 err = -EINVAL; 3107 err = -EINVAL;
3747 WL_ERR("ivalid OUI\n"); 3108 brcmf_err("ivalid OUI\n");
3748 goto exit; 3109 goto exit;
3749 } 3110 }
3750 offset += TLV_OUI_LEN; 3111 offset += TLV_OUI_LEN;
@@ -3766,7 +3127,7 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3766 break; 3127 break;
3767 default: 3128 default:
3768 err = -EINVAL; 3129 err = -EINVAL;
3769 WL_ERR("Invalid multi cast cipher info\n"); 3130 brcmf_err("Invalid multi cast cipher info\n");
3770 goto exit; 3131 goto exit;
3771 } 3132 }
3772 3133
@@ -3777,13 +3138,13 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3777 /* Check for unicast suite(s) */ 3138 /* Check for unicast suite(s) */
3778 if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) { 3139 if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
3779 err = -EINVAL; 3140 err = -EINVAL;
3780 WL_ERR("no unicast cipher suite\n"); 3141 brcmf_err("no unicast cipher suite\n");
3781 goto exit; 3142 goto exit;
3782 } 3143 }
3783 for (i = 0; i < count; i++) { 3144 for (i = 0; i < count; i++) {
3784 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { 3145 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3785 err = -EINVAL; 3146 err = -EINVAL;
3786 WL_ERR("ivalid OUI\n"); 3147 brcmf_err("ivalid OUI\n");
3787 goto exit; 3148 goto exit;
3788 } 3149 }
3789 offset += TLV_OUI_LEN; 3150 offset += TLV_OUI_LEN;
@@ -3801,7 +3162,7 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3801 pval |= AES_ENABLED; 3162 pval |= AES_ENABLED;
3802 break; 3163 break;
3803 default: 3164 default:
3804 WL_ERR("Ivalid unicast security info\n"); 3165 brcmf_err("Ivalid unicast security info\n");
3805 } 3166 }
3806 offset++; 3167 offset++;
3807 } 3168 }
@@ -3811,33 +3172,33 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3811 /* Check for auth key management suite(s) */ 3172 /* Check for auth key management suite(s) */
3812 if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) { 3173 if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) {
3813 err = -EINVAL; 3174 err = -EINVAL;
3814 WL_ERR("no auth key mgmt suite\n"); 3175 brcmf_err("no auth key mgmt suite\n");
3815 goto exit; 3176 goto exit;
3816 } 3177 }
3817 for (i = 0; i < count; i++) { 3178 for (i = 0; i < count; i++) {
3818 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) { 3179 if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
3819 err = -EINVAL; 3180 err = -EINVAL;
3820 WL_ERR("ivalid OUI\n"); 3181 brcmf_err("ivalid OUI\n");
3821 goto exit; 3182 goto exit;
3822 } 3183 }
3823 offset += TLV_OUI_LEN; 3184 offset += TLV_OUI_LEN;
3824 switch (data[offset]) { 3185 switch (data[offset]) {
3825 case RSN_AKM_NONE: 3186 case RSN_AKM_NONE:
3826 WL_TRACE("RSN_AKM_NONE\n"); 3187 brcmf_dbg(TRACE, "RSN_AKM_NONE\n");
3827 wpa_auth |= WPA_AUTH_NONE; 3188 wpa_auth |= WPA_AUTH_NONE;
3828 break; 3189 break;
3829 case RSN_AKM_UNSPECIFIED: 3190 case RSN_AKM_UNSPECIFIED:
3830 WL_TRACE("RSN_AKM_UNSPECIFIED\n"); 3191 brcmf_dbg(TRACE, "RSN_AKM_UNSPECIFIED\n");
3831 is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) : 3192 is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) :
3832 (wpa_auth |= WPA_AUTH_UNSPECIFIED); 3193 (wpa_auth |= WPA_AUTH_UNSPECIFIED);
3833 break; 3194 break;
3834 case RSN_AKM_PSK: 3195 case RSN_AKM_PSK:
3835 WL_TRACE("RSN_AKM_PSK\n"); 3196 brcmf_dbg(TRACE, "RSN_AKM_PSK\n");
3836 is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) : 3197 is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) :
3837 (wpa_auth |= WPA_AUTH_PSK); 3198 (wpa_auth |= WPA_AUTH_PSK);
3838 break; 3199 break;
3839 default: 3200 default:
3840 WL_ERR("Ivalid key mgmt info\n"); 3201 brcmf_err("Ivalid key mgmt info\n");
3841 } 3202 }
3842 offset++; 3203 offset++;
3843 } 3204 }
@@ -3850,10 +3211,10 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3850 wme_bss_disable = 0; 3211 wme_bss_disable = 0;
3851 } 3212 }
3852 /* set wme_bss_disable to sync RSN Capabilities */ 3213 /* set wme_bss_disable to sync RSN Capabilities */
3853 err = brcmf_dev_intvar_set_bsscfg(ndev, "wme_bss_disable", 3214 err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable",
3854 wme_bss_disable, bssidx); 3215 wme_bss_disable);
3855 if (err < 0) { 3216 if (err < 0) {
3856 WL_ERR("wme_bss_disable error %d\n", err); 3217 brcmf_err("wme_bss_disable error %d\n", err);
3857 goto exit; 3218 goto exit;
3858 } 3219 }
3859 } 3220 }
@@ -3861,21 +3222,21 @@ brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
3861 wsec = (pval | gval | SES_OW_ENABLED); 3222 wsec = (pval | gval | SES_OW_ENABLED);
3862 3223
3863 /* set auth */ 3224 /* set auth */
3864 err = brcmf_dev_intvar_set_bsscfg(ndev, "auth", auth, bssidx); 3225 err = brcmf_fil_bsscfg_int_set(ifp, "auth", auth);
3865 if (err < 0) { 3226 if (err < 0) {
3866 WL_ERR("auth error %d\n", err); 3227 brcmf_err("auth error %d\n", err);
3867 goto exit; 3228 goto exit;
3868 } 3229 }
3869 /* set wsec */ 3230 /* set wsec */
3870 err = brcmf_dev_intvar_set_bsscfg(ndev, "wsec", wsec, bssidx); 3231 err = brcmf_fil_bsscfg_int_set(ifp, "wsec", wsec);
3871 if (err < 0) { 3232 if (err < 0) {
3872 WL_ERR("wsec error %d\n", err); 3233 brcmf_err("wsec error %d\n", err);
3873 goto exit; 3234 goto exit;
3874 } 3235 }
3875 /* set upper-layer auth */ 3236 /* set upper-layer auth */
3876 err = brcmf_dev_intvar_set_bsscfg(ndev, "wpa_auth", wpa_auth, bssidx); 3237 err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth);
3877 if (err < 0) { 3238 if (err < 0) {
3878 WL_ERR("wpa_auth error %d\n", err); 3239 brcmf_err("wpa_auth error %d\n", err);
3879 goto exit; 3240 goto exit;
3880 } 3241 }
3881 3242
@@ -3884,7 +3245,7 @@ exit:
3884} 3245}
3885 3246
3886static s32 3247static s32
3887brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len, 3248brcmf_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len,
3888 struct parsed_vndr_ies *vndr_ies) 3249 struct parsed_vndr_ies *vndr_ies)
3889{ 3250{
3890 s32 err = 0; 3251 s32 err = 0;
@@ -3903,15 +3264,15 @@ brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
3903 vndrie = (struct brcmf_vs_tlv *)ie; 3264 vndrie = (struct brcmf_vs_tlv *)ie;
3904 /* len should be bigger than OUI length + one */ 3265 /* len should be bigger than OUI length + one */
3905 if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) { 3266 if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) {
3906 WL_ERR("invalid vndr ie. length is too small %d\n", 3267 brcmf_err("invalid vndr ie. length is too small %d\n",
3907 vndrie->len); 3268 vndrie->len);
3908 goto next; 3269 goto next;
3909 } 3270 }
3910 /* if wpa or wme ie, do not add ie */ 3271 /* if wpa or wme ie, do not add ie */
3911 if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) && 3272 if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) &&
3912 ((vndrie->oui_type == WPA_OUI_TYPE) || 3273 ((vndrie->oui_type == WPA_OUI_TYPE) ||
3913 (vndrie->oui_type == WME_OUI_TYPE))) { 3274 (vndrie->oui_type == WME_OUI_TYPE))) {
3914 WL_TRACE("Found WPA/WME oui. Do not add it\n"); 3275 brcmf_dbg(TRACE, "Found WPA/WME oui. Do not add it\n");
3915 goto next; 3276 goto next;
3916 } 3277 }
3917 3278
@@ -3924,20 +3285,21 @@ brcmf_parse_vndr_ies(u8 *vndr_ie_buf, u32 vndr_ie_len,
3924 3285
3925 vndr_ies->count++; 3286 vndr_ies->count++;
3926 3287
3927 WL_TRACE("** OUI %02x %02x %02x, type 0x%02x\n", 3288 brcmf_dbg(TRACE, "** OUI %02x %02x %02x, type 0x%02x\n",
3928 parsed_info->vndrie.oui[0], 3289 parsed_info->vndrie.oui[0],
3929 parsed_info->vndrie.oui[1], 3290 parsed_info->vndrie.oui[1],
3930 parsed_info->vndrie.oui[2], 3291 parsed_info->vndrie.oui[2],
3931 parsed_info->vndrie.oui_type); 3292 parsed_info->vndrie.oui_type);
3932 3293
3933 if (vndr_ies->count >= MAX_VNDR_IE_NUMBER) 3294 if (vndr_ies->count >= MAX_VNDR_IE_NUMBER)
3934 break; 3295 break;
3935next: 3296next:
3936 remaining_len -= ie->len; 3297 remaining_len -= (ie->len + TLV_HDR_LEN);
3937 if (remaining_len <= 2) 3298 if (remaining_len <= TLV_HDR_LEN)
3938 ie = NULL; 3299 ie = NULL;
3939 else 3300 else
3940 ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len); 3301 ie = (struct brcmf_tlv *)(((u8 *)ie) + ie->len +
3302 TLV_HDR_LEN);
3941 } 3303 }
3942 return err; 3304 return err;
3943} 3305}
@@ -3963,17 +3325,18 @@ brcmf_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd)
3963 return ie_len + VNDR_IE_HDR_SIZE; 3325 return ie_len + VNDR_IE_HDR_SIZE;
3964} 3326}
3965 3327
3966s32 3328static
3967brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg, 3329s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
3968 struct net_device *ndev, s32 bssidx, s32 pktflag, 3330 const u8 *vndr_ie_buf, u32 vndr_ie_len)
3969 u8 *vndr_ie_buf, u32 vndr_ie_len)
3970{ 3331{
3332 struct brcmf_if *ifp;
3333 struct vif_saved_ie *saved_ie;
3971 s32 err = 0; 3334 s32 err = 0;
3972 u8 *iovar_ie_buf; 3335 u8 *iovar_ie_buf;
3973 u8 *curr_ie_buf; 3336 u8 *curr_ie_buf;
3974 u8 *mgmt_ie_buf = NULL; 3337 u8 *mgmt_ie_buf = NULL;
3975 int mgmt_ie_buf_len; 3338 int mgmt_ie_buf_len;
3976 u32 *mgmt_ie_len = 0; 3339 u32 *mgmt_ie_len;
3977 u32 del_add_ie_buf_len = 0; 3340 u32 del_add_ie_buf_len = 0;
3978 u32 total_ie_buf_len = 0; 3341 u32 total_ie_buf_len = 0;
3979 u32 parsed_ie_buf_len = 0; 3342 u32 parsed_ie_buf_len = 0;
@@ -3984,40 +3347,42 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
3984 u8 *ptr; 3347 u8 *ptr;
3985 int remained_buf_len; 3348 int remained_buf_len;
3986 3349
3987 WL_TRACE("bssidx %d, pktflag : 0x%02X\n", bssidx, pktflag); 3350 if (!vif)
3351 return -ENODEV;
3352 ifp = vif->ifp;
3353 saved_ie = &vif->saved_ie;
3354
3355 brcmf_dbg(TRACE, "bssidx %d, pktflag : 0x%02X\n", ifp->bssidx, pktflag);
3988 iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); 3356 iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
3989 if (!iovar_ie_buf) 3357 if (!iovar_ie_buf)
3990 return -ENOMEM; 3358 return -ENOMEM;
3991 curr_ie_buf = iovar_ie_buf; 3359 curr_ie_buf = iovar_ie_buf;
3992 if (test_bit(WL_STATUS_AP_CREATING, &cfg->status) || 3360 if (ifp->vif->mode == WL_MODE_AP) {
3993 test_bit(WL_STATUS_AP_CREATED, &cfg->status)) {
3994 switch (pktflag) { 3361 switch (pktflag) {
3995 case VNDR_IE_PRBRSP_FLAG: 3362 case VNDR_IE_PRBRSP_FLAG:
3996 mgmt_ie_buf = cfg->ap_info->probe_res_ie; 3363 mgmt_ie_buf = saved_ie->probe_res_ie;
3997 mgmt_ie_len = &cfg->ap_info->probe_res_ie_len; 3364 mgmt_ie_len = &saved_ie->probe_res_ie_len;
3998 mgmt_ie_buf_len = 3365 mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie);
3999 sizeof(cfg->ap_info->probe_res_ie);
4000 break; 3366 break;
4001 case VNDR_IE_BEACON_FLAG: 3367 case VNDR_IE_BEACON_FLAG:
4002 mgmt_ie_buf = cfg->ap_info->beacon_ie; 3368 mgmt_ie_buf = saved_ie->beacon_ie;
4003 mgmt_ie_len = &cfg->ap_info->beacon_ie_len; 3369 mgmt_ie_len = &saved_ie->beacon_ie_len;
4004 mgmt_ie_buf_len = sizeof(cfg->ap_info->beacon_ie); 3370 mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie);
4005 break; 3371 break;
4006 default: 3372 default:
4007 err = -EPERM; 3373 err = -EPERM;
4008 WL_ERR("not suitable type\n"); 3374 brcmf_err("not suitable type\n");
4009 goto exit; 3375 goto exit;
4010 } 3376 }
4011 bssidx = 0;
4012 } else { 3377 } else {
4013 err = -EPERM; 3378 err = -EPERM;
4014 WL_ERR("not suitable type\n"); 3379 brcmf_err("not suitable type\n");
4015 goto exit; 3380 goto exit;
4016 } 3381 }
4017 3382
4018 if (vndr_ie_len > mgmt_ie_buf_len) { 3383 if (vndr_ie_len > mgmt_ie_buf_len) {
4019 err = -ENOMEM; 3384 err = -ENOMEM;
4020 WL_ERR("extra IE size too big\n"); 3385 brcmf_err("extra IE size too big\n");
4021 goto exit; 3386 goto exit;
4022 } 3387 }
4023 3388
@@ -4033,11 +3398,11 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
4033 } 3398 }
4034 } 3399 }
4035 3400
4036 if (mgmt_ie_buf != NULL) { 3401 if (mgmt_ie_buf && *mgmt_ie_len) {
4037 if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) && 3402 if (parsed_ie_buf_len && (parsed_ie_buf_len == *mgmt_ie_len) &&
4038 (memcmp(mgmt_ie_buf, curr_ie_buf, 3403 (memcmp(mgmt_ie_buf, curr_ie_buf,
4039 parsed_ie_buf_len) == 0)) { 3404 parsed_ie_buf_len) == 0)) {
4040 WL_TRACE("Previous mgmt IE is equals to current IE"); 3405 brcmf_dbg(TRACE, "Previous mgmt IE equals to current IE\n");
4041 goto exit; 3406 goto exit;
4042 } 3407 }
4043 3408
@@ -4048,12 +3413,12 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
4048 for (i = 0; i < old_vndr_ies.count; i++) { 3413 for (i = 0; i < old_vndr_ies.count; i++) {
4049 vndrie_info = &old_vndr_ies.ie_info[i]; 3414 vndrie_info = &old_vndr_ies.ie_info[i];
4050 3415
4051 WL_TRACE("DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n", 3416 brcmf_dbg(TRACE, "DEL ID : %d, Len: %d , OUI:%02x:%02x:%02x\n",
4052 vndrie_info->vndrie.id, 3417 vndrie_info->vndrie.id,
4053 vndrie_info->vndrie.len, 3418 vndrie_info->vndrie.len,
4054 vndrie_info->vndrie.oui[0], 3419 vndrie_info->vndrie.oui[0],
4055 vndrie_info->vndrie.oui[1], 3420 vndrie_info->vndrie.oui[1],
4056 vndrie_info->vndrie.oui[2]); 3421 vndrie_info->vndrie.oui[2]);
4057 3422
4058 del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag, 3423 del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
4059 vndrie_info->ie_ptr, 3424 vndrie_info->ie_ptr,
@@ -4075,24 +3440,27 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
4075 for (i = 0; i < new_vndr_ies.count; i++) { 3440 for (i = 0; i < new_vndr_ies.count; i++) {
4076 vndrie_info = &new_vndr_ies.ie_info[i]; 3441 vndrie_info = &new_vndr_ies.ie_info[i];
4077 3442
4078 WL_TRACE("ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n", 3443 /* verify remained buf size before copy data */
4079 vndrie_info->vndrie.id, 3444 if (remained_buf_len < (vndrie_info->vndrie.len +
4080 vndrie_info->vndrie.len, 3445 VNDR_IE_VSIE_OFFSET)) {
4081 vndrie_info->vndrie.oui[0], 3446 brcmf_err("no space in mgmt_ie_buf: len left %d",
4082 vndrie_info->vndrie.oui[1], 3447 remained_buf_len);
4083 vndrie_info->vndrie.oui[2]); 3448 break;
3449 }
3450 remained_buf_len -= (vndrie_info->ie_len +
3451 VNDR_IE_VSIE_OFFSET);
3452
3453 brcmf_dbg(TRACE, "ADDED ID : %d, Len: %d, OUI:%02x:%02x:%02x\n",
3454 vndrie_info->vndrie.id,
3455 vndrie_info->vndrie.len,
3456 vndrie_info->vndrie.oui[0],
3457 vndrie_info->vndrie.oui[1],
3458 vndrie_info->vndrie.oui[2]);
4084 3459
4085 del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag, 3460 del_add_ie_buf_len = brcmf_vndr_ie(curr_ie_buf, pktflag,
4086 vndrie_info->ie_ptr, 3461 vndrie_info->ie_ptr,
4087 vndrie_info->ie_len, 3462 vndrie_info->ie_len,
4088 "add"); 3463 "add");
4089 /* verify remained buf size before copy data */
4090 remained_buf_len -= vndrie_info->ie_len;
4091 if (remained_buf_len < 0) {
4092 WL_ERR("no space in mgmt_ie_buf: len left %d",
4093 remained_buf_len);
4094 break;
4095 }
4096 3464
4097 /* save the parsed IE in wl struct */ 3465 /* save the parsed IE in wl struct */
4098 memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr, 3466 memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr,
@@ -4104,13 +3472,10 @@ brcmf_set_management_ie(struct brcmf_cfg80211_info *cfg,
4104 } 3472 }
4105 } 3473 }
4106 if (total_ie_buf_len) { 3474 if (total_ie_buf_len) {
4107 err = brcmf_dev_iovar_setbuf_bsscfg(ndev, "vndr_ie", 3475 err = brcmf_fil_bsscfg_data_set(ifp, "vndr_ie", iovar_ie_buf,
4108 iovar_ie_buf, 3476 total_ie_buf_len);
4109 total_ie_buf_len,
4110 cfg->extra_buf,
4111 WL_EXTRA_BUF_MAX, bssidx);
4112 if (err) 3477 if (err)
4113 WL_ERR("vndr ie set error : %d\n", err); 3478 brcmf_err("vndr ie set error : %d\n", err);
4114 } 3479 }
4115 3480
4116exit: 3481exit:
@@ -4123,25 +3488,25 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4123 struct cfg80211_ap_settings *settings) 3488 struct cfg80211_ap_settings *settings)
4124{ 3489{
4125 s32 ie_offset; 3490 s32 ie_offset;
3491 struct brcmf_if *ifp = netdev_priv(ndev);
4126 struct brcmf_tlv *ssid_ie; 3492 struct brcmf_tlv *ssid_ie;
4127 struct brcmf_ssid_le ssid_le; 3493 struct brcmf_ssid_le ssid_le;
4128 s32 ioctl_value;
4129 s32 err = -EPERM; 3494 s32 err = -EPERM;
4130 struct brcmf_tlv *rsn_ie; 3495 struct brcmf_tlv *rsn_ie;
4131 struct brcmf_vs_tlv *wpa_ie; 3496 struct brcmf_vs_tlv *wpa_ie;
4132 struct brcmf_join_params join_params; 3497 struct brcmf_join_params join_params;
4133 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
4134 s32 bssidx = 0; 3498 s32 bssidx = 0;
4135 3499
4136 WL_TRACE("channel_type=%d, beacon_interval=%d, dtim_period=%d,\n", 3500 brcmf_dbg(TRACE, "channel_type=%d, beacon_interval=%d, dtim_period=%d,\n",
4137 settings->channel_type, settings->beacon_interval, 3501 cfg80211_get_chandef_type(&settings->chandef),
4138 settings->dtim_period); 3502 settings->beacon_interval,
4139 WL_TRACE("ssid=%s(%d), auth_type=%d, inactivity_timeout=%d\n", 3503 settings->dtim_period);
4140 settings->ssid, settings->ssid_len, settings->auth_type, 3504 brcmf_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n",
4141 settings->inactivity_timeout); 3505 settings->ssid, settings->ssid_len, settings->auth_type,
3506 settings->inactivity_timeout);
4142 3507
4143 if (!test_bit(WL_STATUS_AP_CREATING, &cfg->status)) { 3508 if (!test_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state)) {
4144 WL_ERR("Not in AP creation mode\n"); 3509 brcmf_err("Not in AP creation mode\n");
4145 return -EPERM; 3510 return -EPERM;
4146 } 3511 }
4147 3512
@@ -4157,29 +3522,26 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4157 3522
4158 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); 3523 memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len);
4159 ssid_le.SSID_len = cpu_to_le32(ssid_ie->len); 3524 ssid_le.SSID_len = cpu_to_le32(ssid_ie->len);
4160 WL_TRACE("SSID is (%s) in Head\n", ssid_le.SSID); 3525 brcmf_dbg(TRACE, "SSID is (%s) in Head\n", ssid_le.SSID);
4161 } else { 3526 } else {
4162 memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len); 3527 memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len);
4163 ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len); 3528 ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len);
4164 } 3529 }
4165 3530
4166 brcmf_set_mpc(ndev, 0); 3531 brcmf_set_mpc(ndev, 0);
4167 ioctl_value = 1; 3532 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
4168 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_DOWN, &ioctl_value);
4169 if (err < 0) { 3533 if (err < 0) {
4170 WL_ERR("BRCMF_C_DOWN error %d\n", err); 3534 brcmf_err("BRCMF_C_DOWN error %d\n", err);
4171 goto exit; 3535 goto exit;
4172 } 3536 }
4173 ioctl_value = 1; 3537 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 1);
4174 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_INFRA, &ioctl_value);
4175 if (err < 0) { 3538 if (err < 0) {
4176 WL_ERR("SET INFRA error %d\n", err); 3539 brcmf_err("SET INFRA error %d\n", err);
4177 goto exit; 3540 goto exit;
4178 } 3541 }
4179 ioctl_value = 1; 3542 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 1);
4180 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
4181 if (err < 0) { 3543 if (err < 0) {
4182 WL_ERR("setting AP mode failed %d\n", err); 3544 brcmf_err("setting AP mode failed %d\n", err);
4183 goto exit; 3545 goto exit;
4184 } 3546 }
4185 3547
@@ -4191,82 +3553,63 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4191 wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail, 3553 wpa_ie = brcmf_find_wpaie((u8 *)settings->beacon.tail,
4192 settings->beacon.tail_len); 3554 settings->beacon.tail_len);
4193 3555
4194 kfree(cfg->ap_info->rsn_ie);
4195 cfg->ap_info->rsn_ie = NULL;
4196 kfree(cfg->ap_info->wpa_ie);
4197 cfg->ap_info->wpa_ie = NULL;
4198
4199 if ((wpa_ie != NULL || rsn_ie != NULL)) { 3556 if ((wpa_ie != NULL || rsn_ie != NULL)) {
4200 WL_TRACE("WPA(2) IE is found\n"); 3557 brcmf_dbg(TRACE, "WPA(2) IE is found\n");
4201 if (wpa_ie != NULL) { 3558 if (wpa_ie != NULL) {
4202 /* WPA IE */ 3559 /* WPA IE */
4203 err = brcmf_configure_wpaie(ndev, wpa_ie, false, 3560 err = brcmf_configure_wpaie(ndev, wpa_ie, false);
4204 bssidx);
4205 if (err < 0) 3561 if (err < 0)
4206 goto exit; 3562 goto exit;
4207 cfg->ap_info->wpa_ie = kmemdup(wpa_ie,
4208 wpa_ie->len +
4209 TLV_HDR_LEN,
4210 GFP_KERNEL);
4211 } else { 3563 } else {
4212 /* RSN IE */ 3564 /* RSN IE */
4213 err = brcmf_configure_wpaie(ndev, 3565 err = brcmf_configure_wpaie(ndev,
4214 (struct brcmf_vs_tlv *)rsn_ie, true, bssidx); 3566 (struct brcmf_vs_tlv *)rsn_ie, true);
4215 if (err < 0) 3567 if (err < 0)
4216 goto exit; 3568 goto exit;
4217 cfg->ap_info->rsn_ie = kmemdup(rsn_ie,
4218 rsn_ie->len +
4219 TLV_HDR_LEN,
4220 GFP_KERNEL);
4221 } 3569 }
4222 cfg->ap_info->security_mode = true;
4223 } else { 3570 } else {
4224 WL_TRACE("No WPA(2) IEs found\n"); 3571 brcmf_dbg(TRACE, "No WPA(2) IEs found\n");
4225 brcmf_configure_opensecurity(ndev, bssidx); 3572 brcmf_configure_opensecurity(ndev, bssidx);
4226 cfg->ap_info->security_mode = false;
4227 } 3573 }
4228 /* Set Beacon IEs to FW */ 3574 /* Set Beacon IEs to FW */
4229 err = brcmf_set_management_ie(cfg, ndev, bssidx, 3575 err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
4230 VNDR_IE_BEACON_FLAG, 3576 VNDR_IE_BEACON_FLAG,
4231 (u8 *)settings->beacon.tail, 3577 settings->beacon.tail,
4232 settings->beacon.tail_len); 3578 settings->beacon.tail_len);
4233 if (err) 3579 if (err)
4234 WL_ERR("Set Beacon IE Failed\n"); 3580 brcmf_err("Set Beacon IE Failed\n");
4235 else 3581 else
4236 WL_TRACE("Applied Vndr IEs for Beacon\n"); 3582 brcmf_dbg(TRACE, "Applied Vndr IEs for Beacon\n");
4237 3583
4238 /* Set Probe Response IEs to FW */ 3584 /* Set Probe Response IEs to FW */
4239 err = brcmf_set_management_ie(cfg, ndev, bssidx, 3585 err = brcmf_vif_set_mgmt_ie(ndev_to_vif(ndev),
4240 VNDR_IE_PRBRSP_FLAG, 3586 VNDR_IE_PRBRSP_FLAG,
4241 (u8 *)settings->beacon.proberesp_ies, 3587 settings->beacon.proberesp_ies,
4242 settings->beacon.proberesp_ies_len); 3588 settings->beacon.proberesp_ies_len);
4243 if (err) 3589 if (err)
4244 WL_ERR("Set Probe Resp IE Failed\n"); 3590 brcmf_err("Set Probe Resp IE Failed\n");
4245 else 3591 else
4246 WL_TRACE("Applied Vndr IEs for Probe Resp\n"); 3592 brcmf_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n");
4247 3593
4248 if (settings->beacon_interval) { 3594 if (settings->beacon_interval) {
4249 ioctl_value = settings->beacon_interval; 3595 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_BCNPRD,
4250 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_BCNPRD, 3596 settings->beacon_interval);
4251 &ioctl_value);
4252 if (err < 0) { 3597 if (err < 0) {
4253 WL_ERR("Beacon Interval Set Error, %d\n", err); 3598 brcmf_err("Beacon Interval Set Error, %d\n", err);
4254 goto exit; 3599 goto exit;
4255 } 3600 }
4256 } 3601 }
4257 if (settings->dtim_period) { 3602 if (settings->dtim_period) {
4258 ioctl_value = settings->dtim_period; 3603 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_DTIMPRD,
4259 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_DTIMPRD, 3604 settings->dtim_period);
4260 &ioctl_value);
4261 if (err < 0) { 3605 if (err < 0) {
4262 WL_ERR("DTIM Interval Set Error, %d\n", err); 3606 brcmf_err("DTIM Interval Set Error, %d\n", err);
4263 goto exit; 3607 goto exit;
4264 } 3608 }
4265 } 3609 }
4266 ioctl_value = 1; 3610 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1);
4267 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
4268 if (err < 0) { 3611 if (err < 0) {
4269 WL_ERR("BRCMF_C_UP error (%d)\n", err); 3612 brcmf_err("BRCMF_C_UP error (%d)\n", err);
4270 goto exit; 3613 goto exit;
4271 } 3614 }
4272 3615
@@ -4274,14 +3617,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
4274 /* join parameters starts with ssid */ 3617 /* join parameters starts with ssid */
4275 memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le)); 3618 memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le));
4276 /* create softap */ 3619 /* create softap */
4277 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SSID, &join_params, 3620 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
4278 sizeof(join_params)); 3621 &join_params, sizeof(join_params));
4279 if (err < 0) { 3622 if (err < 0) {
4280 WL_ERR("SET SSID error (%d)\n", err); 3623 brcmf_err("SET SSID error (%d)\n", err);
4281 goto exit; 3624 goto exit;
4282 } 3625 }
4283 clear_bit(WL_STATUS_AP_CREATING, &cfg->status); 3626 clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
4284 set_bit(WL_STATUS_AP_CREATED, &cfg->status); 3627 set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
4285 3628
4286exit: 3629exit:
4287 if (err) 3630 if (err)
@@ -4291,31 +3634,28 @@ exit:
4291 3634
4292static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) 3635static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
4293{ 3636{
4294 struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); 3637 struct brcmf_if *ifp = netdev_priv(ndev);
4295 s32 ioctl_value;
4296 s32 err = -EPERM; 3638 s32 err = -EPERM;
4297 3639
4298 WL_TRACE("Enter\n"); 3640 brcmf_dbg(TRACE, "Enter\n");
4299 3641
4300 if (cfg->conf->mode == WL_MODE_AP) { 3642 if (ifp->vif->mode == WL_MODE_AP) {
4301 /* Due to most likely deauths outstanding we sleep */ 3643 /* Due to most likely deauths outstanding we sleep */
4302 /* first to make sure they get processed by fw. */ 3644 /* first to make sure they get processed by fw. */
4303 msleep(400); 3645 msleep(400);
4304 ioctl_value = 0; 3646 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
4305 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_AP, &ioctl_value);
4306 if (err < 0) { 3647 if (err < 0) {
4307 WL_ERR("setting AP mode failed %d\n", err); 3648 brcmf_err("setting AP mode failed %d\n", err);
4308 goto exit; 3649 goto exit;
4309 } 3650 }
4310 ioctl_value = 0; 3651 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
4311 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_UP, &ioctl_value);
4312 if (err < 0) { 3652 if (err < 0) {
4313 WL_ERR("BRCMF_C_UP error %d\n", err); 3653 brcmf_err("BRCMF_C_UP error %d\n", err);
4314 goto exit; 3654 goto exit;
4315 } 3655 }
4316 brcmf_set_mpc(ndev, 1); 3656 brcmf_set_mpc(ndev, 1);
4317 clear_bit(WL_STATUS_AP_CREATING, &cfg->status); 3657 clear_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
4318 clear_bit(WL_STATUS_AP_CREATED, &cfg->status); 3658 clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
4319 } 3659 }
4320exit: 3660exit:
4321 return err; 3661 return err;
@@ -4326,24 +3666,25 @@ brcmf_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev,
4326 u8 *mac) 3666 u8 *mac)
4327{ 3667{
4328 struct brcmf_scb_val_le scbval; 3668 struct brcmf_scb_val_le scbval;
3669 struct brcmf_if *ifp = netdev_priv(ndev);
4329 s32 err; 3670 s32 err;
4330 3671
4331 if (!mac) 3672 if (!mac)
4332 return -EFAULT; 3673 return -EFAULT;
4333 3674
4334 WL_TRACE("Enter %pM\n", mac); 3675 brcmf_dbg(TRACE, "Enter %pM\n", mac);
4335 3676
4336 if (!check_sys_up(wiphy)) 3677 if (!check_vif_up(ifp->vif))
4337 return -EIO; 3678 return -EIO;
4338 3679
4339 memcpy(&scbval.ea, mac, ETH_ALEN); 3680 memcpy(&scbval.ea, mac, ETH_ALEN);
4340 scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING); 3681 scbval.val = cpu_to_le32(WLAN_REASON_DEAUTH_LEAVING);
4341 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON, 3682 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON,
4342 &scbval, sizeof(scbval)); 3683 &scbval, sizeof(scbval));
4343 if (err) 3684 if (err)
4344 WL_ERR("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err); 3685 brcmf_err("SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", err);
4345 3686
4346 WL_TRACE("Exit\n"); 3687 brcmf_dbg(TRACE, "Exit\n");
4347 return err; 3688 return err;
4348} 3689}
4349 3690
@@ -4373,11 +3714,8 @@ static struct cfg80211_ops wl_cfg80211_ops = {
4373 .start_ap = brcmf_cfg80211_start_ap, 3714 .start_ap = brcmf_cfg80211_start_ap,
4374 .stop_ap = brcmf_cfg80211_stop_ap, 3715 .stop_ap = brcmf_cfg80211_stop_ap,
4375 .del_station = brcmf_cfg80211_del_station, 3716 .del_station = brcmf_cfg80211_del_station,
4376#ifndef CONFIG_BRCMISCAN
4377 /* scheduled scan need e-scan, which is mutual exclusive with i-scan */
4378 .sched_scan_start = brcmf_cfg80211_sched_scan_start, 3717 .sched_scan_start = brcmf_cfg80211_sched_scan_start,
4379 .sched_scan_stop = brcmf_cfg80211_sched_scan_stop, 3718 .sched_scan_stop = brcmf_cfg80211_sched_scan_stop,
4380#endif
4381#ifdef CONFIG_NL80211_TESTMODE 3719#ifdef CONFIG_NL80211_TESTMODE
4382 .testmode_cmd = brcmf_cfg80211_testmode 3720 .testmode_cmd = brcmf_cfg80211_testmode
4383#endif 3721#endif
@@ -4401,106 +3739,126 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
4401 3739
4402static void brcmf_wiphy_pno_params(struct wiphy *wiphy) 3740static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
4403{ 3741{
4404#ifndef CONFIG_BRCMISCAN
4405 /* scheduled scan settings */ 3742 /* scheduled scan settings */
4406 wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; 3743 wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
4407 wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; 3744 wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
4408 wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX; 3745 wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
4409 wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 3746 wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
4410#endif
4411} 3747}
4412 3748
4413static struct wireless_dev *brcmf_alloc_wdev(struct device *ndev) 3749static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
4414{ 3750{
4415 struct wireless_dev *wdev; 3751 struct wiphy *wiphy;
4416 s32 err = 0; 3752 s32 err = 0;
4417 3753
4418 wdev = kzalloc(sizeof(*wdev), GFP_KERNEL); 3754 wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info));
4419 if (!wdev) 3755 if (!wiphy) {
3756 brcmf_err("Could not allocate wiphy device\n");
4420 return ERR_PTR(-ENOMEM); 3757 return ERR_PTR(-ENOMEM);
4421 3758 }
4422 wdev->wiphy = wiphy_new(&wl_cfg80211_ops, 3759 set_wiphy_dev(wiphy, phydev);
4423 sizeof(struct brcmf_cfg80211_info)); 3760 wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
4424 if (!wdev->wiphy) { 3761 wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
4425 WL_ERR("Could not allocate wiphy device\n"); 3762 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
4426 err = -ENOMEM; 3763 BIT(NL80211_IFTYPE_ADHOC) |
4427 goto wiphy_new_out; 3764 BIT(NL80211_IFTYPE_AP);
4428 } 3765 wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
4429 set_wiphy_dev(wdev->wiphy, ndev); 3766 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
4430 wdev->wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
4431 wdev->wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
4432 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
4433 BIT(NL80211_IFTYPE_ADHOC) |
4434 BIT(NL80211_IFTYPE_AP);
4435 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
4436 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_a; /* Set
4437 * it as 11a by default. 3767 * it as 11a by default.
4438 * This will be updated with 3768 * This will be updated with
4439 * 11n phy tables in 3769 * 11n phy tables in
4440 * "ifconfig up" 3770 * "ifconfig up"
4441 * if phy has 11n capability 3771 * if phy has 11n capability
4442 */ 3772 */
4443 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 3773 wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
4444 wdev->wiphy->cipher_suites = __wl_cipher_suites; 3774 wiphy->cipher_suites = __wl_cipher_suites;
4445 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); 3775 wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
4446 wdev->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power 3776 wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; /* enable power
4447 * save mode 3777 * save mode
4448 * by default 3778 * by default
4449 */ 3779 */
4450 brcmf_wiphy_pno_params(wdev->wiphy); 3780 brcmf_wiphy_pno_params(wiphy);
4451 err = wiphy_register(wdev->wiphy); 3781 err = wiphy_register(wiphy);
4452 if (err < 0) { 3782 if (err < 0) {
4453 WL_ERR("Could not register wiphy device (%d)\n", err); 3783 brcmf_err("Could not register wiphy device (%d)\n", err);
4454 goto wiphy_register_out; 3784 wiphy_free(wiphy);
3785 return ERR_PTR(err);
4455 } 3786 }
4456 return wdev; 3787 return wiphy;
3788}
3789
3790static
3791struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
3792 struct net_device *netdev,
3793 s32 mode, bool pm_block)
3794{
3795 struct brcmf_cfg80211_vif *vif;
4457 3796
4458wiphy_register_out: 3797 if (cfg->vif_cnt == BRCMF_IFACE_MAX_CNT)
4459 wiphy_free(wdev->wiphy); 3798 return ERR_PTR(-ENOSPC);
4460 3799
4461wiphy_new_out: 3800 vif = kzalloc(sizeof(*vif), GFP_KERNEL);
4462 kfree(wdev); 3801 if (!vif)
3802 return ERR_PTR(-ENOMEM);
3803
3804 vif->wdev.wiphy = cfg->wiphy;
3805 vif->wdev.netdev = netdev;
3806 vif->wdev.iftype = brcmf_mode_to_nl80211_iftype(mode);
3807
3808 if (netdev) {
3809 vif->ifp = netdev_priv(netdev);
3810 netdev->ieee80211_ptr = &vif->wdev;
3811 SET_NETDEV_DEV(netdev, wiphy_dev(cfg->wiphy));
3812 }
4463 3813
4464 return ERR_PTR(err); 3814 vif->mode = mode;
3815 vif->pm_block = pm_block;
3816 vif->roam_off = -1;
3817
3818 brcmf_init_prof(&vif->profile);
3819
3820 list_add_tail(&vif->list, &cfg->vif_list);
3821 cfg->vif_cnt++;
3822 return vif;
4465} 3823}
4466 3824
4467static void brcmf_free_wdev(struct brcmf_cfg80211_info *cfg) 3825static void brcmf_free_vif(struct brcmf_cfg80211_vif *vif)
4468{ 3826{
4469 struct wireless_dev *wdev = cfg->wdev; 3827 struct brcmf_cfg80211_info *cfg;
3828 struct wiphy *wiphy;
4470 3829
4471 if (!wdev) { 3830 wiphy = vif->wdev.wiphy;
4472 WL_ERR("wdev is invalid\n"); 3831 cfg = wiphy_priv(wiphy);
4473 return; 3832 list_del(&vif->list);
3833 cfg->vif_cnt--;
3834
3835 kfree(vif);
3836 if (!cfg->vif_cnt) {
3837 wiphy_unregister(wiphy);
3838 wiphy_free(wiphy);
4474 } 3839 }
4475 wiphy_unregister(wdev->wiphy);
4476 wiphy_free(wdev->wiphy);
4477 kfree(wdev);
4478 cfg->wdev = NULL;
4479} 3840}
4480 3841
4481static bool brcmf_is_linkup(struct brcmf_cfg80211_info *cfg, 3842static bool brcmf_is_linkup(const struct brcmf_event_msg *e)
4482 const struct brcmf_event_msg *e)
4483{ 3843{
4484 u32 event = be32_to_cpu(e->event_type); 3844 u32 event = e->event_code;
4485 u32 status = be32_to_cpu(e->status); 3845 u32 status = e->status;
4486 3846
4487 if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) { 3847 if (event == BRCMF_E_SET_SSID && status == BRCMF_E_STATUS_SUCCESS) {
4488 WL_CONN("Processing set ssid\n"); 3848 brcmf_dbg(CONN, "Processing set ssid\n");
4489 cfg->link_up = true;
4490 return true; 3849 return true;
4491 } 3850 }
4492 3851
4493 return false; 3852 return false;
4494} 3853}
4495 3854
4496static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg, 3855static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
4497 const struct brcmf_event_msg *e)
4498{ 3856{
4499 u32 event = be32_to_cpu(e->event_type); 3857 u32 event = e->event_code;
4500 u16 flags = be16_to_cpu(e->flags); 3858 u16 flags = e->flags;
4501 3859
4502 if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) { 3860 if (event == BRCMF_E_LINK && (!(flags & BRCMF_EVENT_MSG_LINK))) {
4503 WL_CONN("Processing link down\n"); 3861 brcmf_dbg(CONN, "Processing link down\n");
4504 return true; 3862 return true;
4505 } 3863 }
4506 return false; 3864 return false;
@@ -4509,18 +3867,17 @@ static bool brcmf_is_linkdown(struct brcmf_cfg80211_info *cfg,
4509static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg, 3867static bool brcmf_is_nonetwork(struct brcmf_cfg80211_info *cfg,
4510 const struct brcmf_event_msg *e) 3868 const struct brcmf_event_msg *e)
4511{ 3869{
4512 u32 event = be32_to_cpu(e->event_type); 3870 u32 event = e->event_code;
4513 u32 status = be32_to_cpu(e->status); 3871 u32 status = e->status;
4514 3872
4515 if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) { 3873 if (event == BRCMF_E_LINK && status == BRCMF_E_STATUS_NO_NETWORKS) {
4516 WL_CONN("Processing Link %s & no network found\n", 3874 brcmf_dbg(CONN, "Processing Link %s & no network found\n",
4517 be16_to_cpu(e->flags) & BRCMF_EVENT_MSG_LINK ? 3875 e->flags & BRCMF_EVENT_MSG_LINK ? "up" : "down");
4518 "up" : "down");
4519 return true; 3876 return true;
4520 } 3877 }
4521 3878
4522 if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) { 3879 if (event == BRCMF_E_SET_SSID && status != BRCMF_E_STATUS_SUCCESS) {
4523 WL_CONN("Processing connecting & no network found\n"); 3880 brcmf_dbg(CONN, "Processing connecting & no network found\n");
4524 return true; 3881 return true;
4525 } 3882 }
4526 3883
@@ -4541,7 +3898,7 @@ static void brcmf_clear_assoc_ies(struct brcmf_cfg80211_info *cfg)
4541 3898
4542static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg) 3899static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
4543{ 3900{
4544 struct net_device *ndev = cfg_to_ndev(cfg); 3901 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
4545 struct brcmf_cfg80211_assoc_ielen_le *assoc_info; 3902 struct brcmf_cfg80211_assoc_ielen_le *assoc_info;
4546 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); 3903 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
4547 u32 req_len; 3904 u32 req_len;
@@ -4550,10 +3907,10 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
4550 3907
4551 brcmf_clear_assoc_ies(cfg); 3908 brcmf_clear_assoc_ies(cfg);
4552 3909
4553 err = brcmf_dev_bufvar_get(ndev, "assoc_info", cfg->extra_buf, 3910 err = brcmf_fil_iovar_data_get(ifp, "assoc_info",
4554 WL_ASSOC_INFO_MAX); 3911 cfg->extra_buf, WL_ASSOC_INFO_MAX);
4555 if (err) { 3912 if (err) {
4556 WL_ERR("could not get assoc info (%d)\n", err); 3913 brcmf_err("could not get assoc info (%d)\n", err);
4557 return err; 3914 return err;
4558 } 3915 }
4559 assoc_info = 3916 assoc_info =
@@ -4561,11 +3918,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
4561 req_len = le32_to_cpu(assoc_info->req_len); 3918 req_len = le32_to_cpu(assoc_info->req_len);
4562 resp_len = le32_to_cpu(assoc_info->resp_len); 3919 resp_len = le32_to_cpu(assoc_info->resp_len);
4563 if (req_len) { 3920 if (req_len) {
4564 err = brcmf_dev_bufvar_get(ndev, "assoc_req_ies", 3921 err = brcmf_fil_iovar_data_get(ifp, "assoc_req_ies",
4565 cfg->extra_buf, 3922 cfg->extra_buf,
4566 WL_ASSOC_INFO_MAX); 3923 WL_ASSOC_INFO_MAX);
4567 if (err) { 3924 if (err) {
4568 WL_ERR("could not get assoc req (%d)\n", err); 3925 brcmf_err("could not get assoc req (%d)\n", err);
4569 return err; 3926 return err;
4570 } 3927 }
4571 conn_info->req_ie_len = req_len; 3928 conn_info->req_ie_len = req_len;
@@ -4577,11 +3934,11 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
4577 conn_info->req_ie = NULL; 3934 conn_info->req_ie = NULL;
4578 } 3935 }
4579 if (resp_len) { 3936 if (resp_len) {
4580 err = brcmf_dev_bufvar_get(ndev, "assoc_resp_ies", 3937 err = brcmf_fil_iovar_data_get(ifp, "assoc_resp_ies",
4581 cfg->extra_buf, 3938 cfg->extra_buf,
4582 WL_ASSOC_INFO_MAX); 3939 WL_ASSOC_INFO_MAX);
4583 if (err) { 3940 if (err) {
4584 WL_ERR("could not get assoc resp (%d)\n", err); 3941 brcmf_err("could not get assoc resp (%d)\n", err);
4585 return err; 3942 return err;
4586 } 3943 }
4587 conn_info->resp_ie_len = resp_len; 3944 conn_info->resp_ie_len = resp_len;
@@ -4592,8 +3949,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg)
4592 conn_info->resp_ie_len = 0; 3949 conn_info->resp_ie_len = 0;
4593 conn_info->resp_ie = NULL; 3950 conn_info->resp_ie = NULL;
4594 } 3951 }
4595 WL_CONN("req len (%d) resp len (%d)\n", 3952 brcmf_dbg(CONN, "req len (%d) resp len (%d)\n",
4596 conn_info->req_ie_len, conn_info->resp_ie_len); 3953 conn_info->req_ie_len, conn_info->resp_ie_len);
4597 3954
4598 return err; 3955 return err;
4599} 3956}
@@ -4603,7 +3960,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4603 struct net_device *ndev, 3960 struct net_device *ndev,
4604 const struct brcmf_event_msg *e) 3961 const struct brcmf_event_msg *e)
4605{ 3962{
4606 struct brcmf_cfg80211_profile *profile = cfg->profile; 3963 struct brcmf_if *ifp = netdev_priv(ndev);
3964 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
4607 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); 3965 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
4608 struct wiphy *wiphy = cfg_to_wiphy(cfg); 3966 struct wiphy *wiphy = cfg_to_wiphy(cfg);
4609 struct ieee80211_channel *notify_channel = NULL; 3967 struct ieee80211_channel *notify_channel = NULL;
@@ -4614,7 +3972,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4614 u32 target_channel; 3972 u32 target_channel;
4615 u8 *buf; 3973 u8 *buf;
4616 3974
4617 WL_TRACE("Enter\n"); 3975 brcmf_dbg(TRACE, "Enter\n");
4618 3976
4619 brcmf_get_assoc_ies(cfg); 3977 brcmf_get_assoc_ies(cfg);
4620 memcpy(profile->bssid, e->addr, ETH_ALEN); 3978 memcpy(profile->bssid, e->addr, ETH_ALEN);
@@ -4628,7 +3986,8 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
4628 3986
4629 /* data sent to dongle has to be little endian */ 3987 /* data sent to dongle has to be little endian */
4630 *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); 3988 *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX);
4631 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_BSS_INFO, buf, WL_BSS_INFO_MAX); 3989 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BSS_INFO,
3990 buf, WL_BSS_INFO_MAX);
4632 3991
4633 if (err) 3992 if (err)
4634 goto done; 3993 goto done;
@@ -4650,10 +4009,10 @@ done:
4650 cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid, 4009 cfg80211_roamed(ndev, notify_channel, (u8 *)profile->bssid,
4651 conn_info->req_ie, conn_info->req_ie_len, 4010 conn_info->req_ie, conn_info->req_ie_len,
4652 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); 4011 conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL);
4653 WL_CONN("Report roaming result\n"); 4012 brcmf_dbg(CONN, "Report roaming result\n");
4654 4013
4655 set_bit(WL_STATUS_CONNECTED, &cfg->status); 4014 set_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
4656 WL_TRACE("Exit\n"); 4015 brcmf_dbg(TRACE, "Exit\n");
4657 return err; 4016 return err;
4658} 4017}
4659 4018
@@ -4662,13 +4021,15 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
4662 struct net_device *ndev, const struct brcmf_event_msg *e, 4021 struct net_device *ndev, const struct brcmf_event_msg *e,
4663 bool completed) 4022 bool completed)
4664{ 4023{
4665 struct brcmf_cfg80211_profile *profile = cfg->profile; 4024 struct brcmf_if *ifp = netdev_priv(ndev);
4025 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
4666 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); 4026 struct brcmf_cfg80211_connect_info *conn_info = cfg_to_conn(cfg);
4667 s32 err = 0; 4027 s32 err = 0;
4668 4028
4669 WL_TRACE("Enter\n"); 4029 brcmf_dbg(TRACE, "Enter\n");
4670 4030
4671 if (test_and_clear_bit(WL_STATUS_CONNECTING, &cfg->status)) { 4031 if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTING,
4032 &ifp->vif->sme_state)) {
4672 if (completed) { 4033 if (completed) {
4673 brcmf_get_assoc_ies(cfg); 4034 brcmf_get_assoc_ies(cfg);
4674 memcpy(profile->bssid, e->addr, ETH_ALEN); 4035 memcpy(profile->bssid, e->addr, ETH_ALEN);
@@ -4684,11 +4045,12 @@ brcmf_bss_connect_done(struct brcmf_cfg80211_info *cfg,
4684 WLAN_STATUS_AUTH_TIMEOUT, 4045 WLAN_STATUS_AUTH_TIMEOUT,
4685 GFP_KERNEL); 4046 GFP_KERNEL);
4686 if (completed) 4047 if (completed)
4687 set_bit(WL_STATUS_CONNECTED, &cfg->status); 4048 set_bit(BRCMF_VIF_STATUS_CONNECTED,
4688 WL_CONN("Report connect result - connection %s\n", 4049 &ifp->vif->sme_state);
4689 completed ? "succeeded" : "failed"); 4050 brcmf_dbg(CONN, "Report connect result - connection %s\n",
4051 completed ? "succeeded" : "failed");
4690 } 4052 }
4691 WL_TRACE("Exit\n"); 4053 brcmf_dbg(TRACE, "Exit\n");
4692 return err; 4054 return err;
4693} 4055}
4694 4056
@@ -4698,14 +4060,14 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
4698 const struct brcmf_event_msg *e, void *data) 4060 const struct brcmf_event_msg *e, void *data)
4699{ 4061{
4700 s32 err = 0; 4062 s32 err = 0;
4701 u32 event = be32_to_cpu(e->event_type); 4063 u32 event = e->event_code;
4702 u32 reason = be32_to_cpu(e->reason); 4064 u32 reason = e->reason;
4703 u32 len = be32_to_cpu(e->datalen); 4065 u32 len = e->datalen;
4704 static int generation; 4066 static int generation;
4705 4067
4706 struct station_info sinfo; 4068 struct station_info sinfo;
4707 4069
4708 WL_CONN("event %d, reason %d\n", event, reason); 4070 brcmf_dbg(CONN, "event %d, reason %d\n", event, reason);
4709 memset(&sinfo, 0, sizeof(sinfo)); 4071 memset(&sinfo, 0, sizeof(sinfo));
4710 4072
4711 sinfo.filled = 0; 4073 sinfo.filled = 0;
@@ -4713,7 +4075,7 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
4713 reason == BRCMF_E_STATUS_SUCCESS) { 4075 reason == BRCMF_E_STATUS_SUCCESS) {
4714 sinfo.filled = STATION_INFO_ASSOC_REQ_IES; 4076 sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
4715 if (!data) { 4077 if (!data) {
4716 WL_ERR("No IEs present in ASSOC/REASSOC_IND"); 4078 brcmf_err("No IEs present in ASSOC/REASSOC_IND");
4717 return -EINVAL; 4079 return -EINVAL;
4718 } 4080 }
4719 sinfo.assoc_req_ies = data; 4081 sinfo.assoc_req_ies = data;
@@ -4732,45 +4094,43 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg,
4732} 4094}
4733 4095
4734static s32 4096static s32
4735brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg, 4097brcmf_notify_connect_status(struct brcmf_if *ifp,
4736 struct net_device *ndev,
4737 const struct brcmf_event_msg *e, void *data) 4098 const struct brcmf_event_msg *e, void *data)
4738{ 4099{
4739 struct brcmf_cfg80211_profile *profile = cfg->profile; 4100 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
4101 struct net_device *ndev = ifp->ndev;
4102 struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
4740 s32 err = 0; 4103 s32 err = 0;
4741 4104
4742 if (cfg->conf->mode == WL_MODE_AP) { 4105 if (ifp->vif->mode == WL_MODE_AP) {
4743 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data); 4106 err = brcmf_notify_connect_status_ap(cfg, ndev, e, data);
4744 } else if (brcmf_is_linkup(cfg, e)) { 4107 } else if (brcmf_is_linkup(e)) {
4745 WL_CONN("Linkup\n"); 4108 brcmf_dbg(CONN, "Linkup\n");
4746 if (brcmf_is_ibssmode(cfg)) { 4109 if (brcmf_is_ibssmode(ifp->vif)) {
4747 memcpy(profile->bssid, e->addr, ETH_ALEN); 4110 memcpy(profile->bssid, e->addr, ETH_ALEN);
4748 wl_inform_ibss(cfg, ndev, e->addr); 4111 wl_inform_ibss(cfg, ndev, e->addr);
4749 cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL); 4112 cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
4750 clear_bit(WL_STATUS_CONNECTING, &cfg->status); 4113 clear_bit(BRCMF_VIF_STATUS_CONNECTING,
4751 set_bit(WL_STATUS_CONNECTED, &cfg->status); 4114 &ifp->vif->sme_state);
4115 set_bit(BRCMF_VIF_STATUS_CONNECTED,
4116 &ifp->vif->sme_state);
4752 } else 4117 } else
4753 brcmf_bss_connect_done(cfg, ndev, e, true); 4118 brcmf_bss_connect_done(cfg, ndev, e, true);
4754 } else if (brcmf_is_linkdown(cfg, e)) { 4119 } else if (brcmf_is_linkdown(e)) {
4755 WL_CONN("Linkdown\n"); 4120 brcmf_dbg(CONN, "Linkdown\n");
4756 if (brcmf_is_ibssmode(cfg)) { 4121 if (!brcmf_is_ibssmode(ifp->vif)) {
4757 clear_bit(WL_STATUS_CONNECTING, &cfg->status);
4758 if (test_and_clear_bit(WL_STATUS_CONNECTED,
4759 &cfg->status))
4760 brcmf_link_down(cfg);
4761 } else {
4762 brcmf_bss_connect_done(cfg, ndev, e, false); 4122 brcmf_bss_connect_done(cfg, ndev, e, false);
4763 if (test_and_clear_bit(WL_STATUS_CONNECTED, 4123 if (test_and_clear_bit(BRCMF_VIF_STATUS_CONNECTED,
4764 &cfg->status)) { 4124 &ifp->vif->sme_state))
4765 cfg80211_disconnected(ndev, 0, NULL, 0, 4125 cfg80211_disconnected(ndev, 0, NULL, 0,
4766 GFP_KERNEL); 4126 GFP_KERNEL);
4767 brcmf_link_down(cfg);
4768 }
4769 } 4127 }
4770 brcmf_init_prof(cfg->profile); 4128 brcmf_link_down(ifp->vif);
4129 brcmf_init_prof(ndev_to_prof(ndev));
4771 } else if (brcmf_is_nonetwork(cfg, e)) { 4130 } else if (brcmf_is_nonetwork(cfg, e)) {
4772 if (brcmf_is_ibssmode(cfg)) 4131 if (brcmf_is_ibssmode(ifp->vif))
4773 clear_bit(WL_STATUS_CONNECTING, &cfg->status); 4132 clear_bit(BRCMF_VIF_STATUS_CONNECTING,
4133 &ifp->vif->sme_state);
4774 else 4134 else
4775 brcmf_bss_connect_done(cfg, ndev, e, false); 4135 brcmf_bss_connect_done(cfg, ndev, e, false);
4776 } 4136 }
@@ -4779,30 +4139,29 @@ brcmf_notify_connect_status(struct brcmf_cfg80211_info *cfg,
4779} 4139}
4780 4140
4781static s32 4141static s32
4782brcmf_notify_roaming_status(struct brcmf_cfg80211_info *cfg, 4142brcmf_notify_roaming_status(struct brcmf_if *ifp,
4783 struct net_device *ndev,
4784 const struct brcmf_event_msg *e, void *data) 4143 const struct brcmf_event_msg *e, void *data)
4785{ 4144{
4145 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
4786 s32 err = 0; 4146 s32 err = 0;
4787 u32 event = be32_to_cpu(e->event_type); 4147 u32 event = e->event_code;
4788 u32 status = be32_to_cpu(e->status); 4148 u32 status = e->status;
4789 4149
4790 if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) { 4150 if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) {
4791 if (test_bit(WL_STATUS_CONNECTED, &cfg->status)) 4151 if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
4792 brcmf_bss_roaming_done(cfg, ndev, e); 4152 brcmf_bss_roaming_done(cfg, ifp->ndev, e);
4793 else 4153 else
4794 brcmf_bss_connect_done(cfg, ndev, e, true); 4154 brcmf_bss_connect_done(cfg, ifp->ndev, e, true);
4795 } 4155 }
4796 4156
4797 return err; 4157 return err;
4798} 4158}
4799 4159
4800static s32 4160static s32
4801brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg, 4161brcmf_notify_mic_status(struct brcmf_if *ifp,
4802 struct net_device *ndev,
4803 const struct brcmf_event_msg *e, void *data) 4162 const struct brcmf_event_msg *e, void *data)
4804{ 4163{
4805 u16 flags = be16_to_cpu(e->flags); 4164 u16 flags = e->flags;
4806 enum nl80211_key_type key_type; 4165 enum nl80211_key_type key_type;
4807 4166
4808 if (flags & BRCMF_EVENT_MSG_GROUP) 4167 if (flags & BRCMF_EVENT_MSG_GROUP)
@@ -4810,85 +4169,14 @@ brcmf_notify_mic_status(struct brcmf_cfg80211_info *cfg,
4810 else 4169 else
4811 key_type = NL80211_KEYTYPE_PAIRWISE; 4170 key_type = NL80211_KEYTYPE_PAIRWISE;
4812 4171
4813 cfg80211_michael_mic_failure(ndev, (u8 *)&e->addr, key_type, -1, 4172 cfg80211_michael_mic_failure(ifp->ndev, (u8 *)&e->addr, key_type, -1,
4814 NULL, GFP_KERNEL); 4173 NULL, GFP_KERNEL);
4815 4174
4816 return 0; 4175 return 0;
4817} 4176}
4818 4177
4819static s32
4820brcmf_notify_scan_status(struct brcmf_cfg80211_info *cfg,
4821 struct net_device *ndev,
4822 const struct brcmf_event_msg *e, void *data)
4823{
4824 struct brcmf_channel_info_le channel_inform_le;
4825 struct brcmf_scan_results_le *bss_list_le;
4826 u32 len = WL_SCAN_BUF_MAX;
4827 s32 err = 0;
4828 bool scan_abort = false;
4829 u32 scan_channel;
4830
4831 WL_TRACE("Enter\n");
4832
4833 if (cfg->iscan_on && cfg->iscan_kickstart) {
4834 WL_TRACE("Exit\n");
4835 return brcmf_wakeup_iscan(cfg_to_iscan(cfg));
4836 }
4837
4838 if (!test_and_clear_bit(WL_STATUS_SCANNING, &cfg->status)) {
4839 WL_ERR("Scan complete while device not scanning\n");
4840 scan_abort = true;
4841 err = -EINVAL;
4842 goto scan_done_out;
4843 }
4844
4845 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_CHANNEL, &channel_inform_le,
4846 sizeof(channel_inform_le));
4847 if (err) {
4848 WL_ERR("scan busy (%d)\n", err);
4849 scan_abort = true;
4850 goto scan_done_out;
4851 }
4852 scan_channel = le32_to_cpu(channel_inform_le.scan_channel);
4853 if (scan_channel)
4854 WL_CONN("channel_inform.scan_channel (%d)\n", scan_channel);
4855 cfg->bss_list = cfg->scan_results;
4856 bss_list_le = (struct brcmf_scan_results_le *) cfg->bss_list;
4857
4858 memset(cfg->scan_results, 0, len);
4859 bss_list_le->buflen = cpu_to_le32(len);
4860 err = brcmf_exec_dcmd(ndev, BRCMF_C_SCAN_RESULTS,
4861 cfg->scan_results, len);
4862 if (err) {
4863 WL_ERR("%s Scan_results error (%d)\n", ndev->name, err);
4864 err = -EINVAL;
4865 scan_abort = true;
4866 goto scan_done_out;
4867 }
4868 cfg->scan_results->buflen = le32_to_cpu(bss_list_le->buflen);
4869 cfg->scan_results->version = le32_to_cpu(bss_list_le->version);
4870 cfg->scan_results->count = le32_to_cpu(bss_list_le->count);
4871
4872 err = brcmf_inform_bss(cfg);
4873 if (err)
4874 scan_abort = true;
4875
4876scan_done_out:
4877 if (cfg->scan_request) {
4878 WL_SCAN("calling cfg80211_scan_done\n");
4879 cfg80211_scan_done(cfg->scan_request, scan_abort);
4880 brcmf_set_mpc(ndev, 1);
4881 cfg->scan_request = NULL;
4882 }
4883
4884 WL_TRACE("Exit\n");
4885
4886 return err;
4887}
4888
4889static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf) 4178static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
4890{ 4179{
4891 conf->mode = (u32)-1;
4892 conf->frag_threshold = (u32)-1; 4180 conf->frag_threshold = (u32)-1;
4893 conf->rts_threshold = (u32)-1; 4181 conf->rts_threshold = (u32)-1;
4894 conf->retry_short = (u32)-1; 4182 conf->retry_short = (u32)-1;
@@ -4896,82 +4184,53 @@ static void brcmf_init_conf(struct brcmf_cfg80211_conf *conf)
4896 conf->tx_power = -1; 4184 conf->tx_power = -1;
4897} 4185}
4898 4186
4899static void brcmf_init_eloop_handler(struct brcmf_cfg80211_event_loop *el) 4187static void brcmf_register_event_handlers(struct brcmf_cfg80211_info *cfg)
4900{ 4188{
4901 memset(el, 0, sizeof(*el)); 4189 brcmf_fweh_register(cfg->pub, BRCMF_E_LINK,
4902 el->handler[BRCMF_E_SCAN_COMPLETE] = brcmf_notify_scan_status; 4190 brcmf_notify_connect_status);
4903 el->handler[BRCMF_E_LINK] = brcmf_notify_connect_status; 4191 brcmf_fweh_register(cfg->pub, BRCMF_E_DEAUTH_IND,
4904 el->handler[BRCMF_E_DEAUTH_IND] = brcmf_notify_connect_status; 4192 brcmf_notify_connect_status);
4905 el->handler[BRCMF_E_DEAUTH] = brcmf_notify_connect_status; 4193 brcmf_fweh_register(cfg->pub, BRCMF_E_DEAUTH,
4906 el->handler[BRCMF_E_DISASSOC_IND] = brcmf_notify_connect_status; 4194 brcmf_notify_connect_status);
4907 el->handler[BRCMF_E_ASSOC_IND] = brcmf_notify_connect_status; 4195 brcmf_fweh_register(cfg->pub, BRCMF_E_DISASSOC_IND,
4908 el->handler[BRCMF_E_REASSOC_IND] = brcmf_notify_connect_status; 4196 brcmf_notify_connect_status);
4909 el->handler[BRCMF_E_ROAM] = brcmf_notify_roaming_status; 4197 brcmf_fweh_register(cfg->pub, BRCMF_E_ASSOC_IND,
4910 el->handler[BRCMF_E_MIC_ERROR] = brcmf_notify_mic_status; 4198 brcmf_notify_connect_status);
4911 el->handler[BRCMF_E_SET_SSID] = brcmf_notify_connect_status; 4199 brcmf_fweh_register(cfg->pub, BRCMF_E_REASSOC_IND,
4912 el->handler[BRCMF_E_PFN_NET_FOUND] = brcmf_notify_sched_scan_results; 4200 brcmf_notify_connect_status);
4201 brcmf_fweh_register(cfg->pub, BRCMF_E_ROAM,
4202 brcmf_notify_roaming_status);
4203 brcmf_fweh_register(cfg->pub, BRCMF_E_MIC_ERROR,
4204 brcmf_notify_mic_status);
4205 brcmf_fweh_register(cfg->pub, BRCMF_E_SET_SSID,
4206 brcmf_notify_connect_status);
4207 brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND,
4208 brcmf_notify_sched_scan_results);
4913} 4209}
4914 4210
4915static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg) 4211static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg)
4916{ 4212{
4917 kfree(cfg->scan_results);
4918 cfg->scan_results = NULL;
4919 kfree(cfg->bss_info);
4920 cfg->bss_info = NULL;
4921 kfree(cfg->conf); 4213 kfree(cfg->conf);
4922 cfg->conf = NULL; 4214 cfg->conf = NULL;
4923 kfree(cfg->profile);
4924 cfg->profile = NULL;
4925 kfree(cfg->scan_req_int);
4926 cfg->scan_req_int = NULL;
4927 kfree(cfg->escan_ioctl_buf); 4215 kfree(cfg->escan_ioctl_buf);
4928 cfg->escan_ioctl_buf = NULL; 4216 cfg->escan_ioctl_buf = NULL;
4929 kfree(cfg->dcmd_buf);
4930 cfg->dcmd_buf = NULL;
4931 kfree(cfg->extra_buf); 4217 kfree(cfg->extra_buf);
4932 cfg->extra_buf = NULL; 4218 cfg->extra_buf = NULL;
4933 kfree(cfg->iscan);
4934 cfg->iscan = NULL;
4935 kfree(cfg->pmk_list); 4219 kfree(cfg->pmk_list);
4936 cfg->pmk_list = NULL; 4220 cfg->pmk_list = NULL;
4937 if (cfg->ap_info) {
4938 kfree(cfg->ap_info->wpa_ie);
4939 kfree(cfg->ap_info->rsn_ie);
4940 kfree(cfg->ap_info);
4941 cfg->ap_info = NULL;
4942 }
4943} 4221}
4944 4222
4945static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg) 4223static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg)
4946{ 4224{
4947 cfg->scan_results = kzalloc(WL_SCAN_BUF_MAX, GFP_KERNEL);
4948 if (!cfg->scan_results)
4949 goto init_priv_mem_out;
4950 cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL); 4225 cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL);
4951 if (!cfg->conf) 4226 if (!cfg->conf)
4952 goto init_priv_mem_out; 4227 goto init_priv_mem_out;
4953 cfg->profile = kzalloc(sizeof(*cfg->profile), GFP_KERNEL);
4954 if (!cfg->profile)
4955 goto init_priv_mem_out;
4956 cfg->bss_info = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL);
4957 if (!cfg->bss_info)
4958 goto init_priv_mem_out;
4959 cfg->scan_req_int = kzalloc(sizeof(*cfg->scan_req_int),
4960 GFP_KERNEL);
4961 if (!cfg->scan_req_int)
4962 goto init_priv_mem_out;
4963 cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); 4228 cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
4964 if (!cfg->escan_ioctl_buf) 4229 if (!cfg->escan_ioctl_buf)
4965 goto init_priv_mem_out; 4230 goto init_priv_mem_out;
4966 cfg->dcmd_buf = kzalloc(WL_DCMD_LEN_MAX, GFP_KERNEL);
4967 if (!cfg->dcmd_buf)
4968 goto init_priv_mem_out;
4969 cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); 4231 cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL);
4970 if (!cfg->extra_buf) 4232 if (!cfg->extra_buf)
4971 goto init_priv_mem_out; 4233 goto init_priv_mem_out;
4972 cfg->iscan = kzalloc(sizeof(*cfg->iscan), GFP_KERNEL);
4973 if (!cfg->iscan)
4974 goto init_priv_mem_out;
4975 cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL); 4234 cfg->pmk_list = kzalloc(sizeof(*cfg->pmk_list), GFP_KERNEL);
4976 if (!cfg->pmk_list) 4235 if (!cfg->pmk_list)
4977 goto init_priv_mem_out; 4236 goto init_priv_mem_out;
@@ -4984,298 +4243,107 @@ init_priv_mem_out:
4984 return -ENOMEM; 4243 return -ENOMEM;
4985} 4244}
4986 4245
4987/*
4988* retrieve first queued event from head
4989*/
4990
4991static struct brcmf_cfg80211_event_q *brcmf_deq_event(
4992 struct brcmf_cfg80211_info *cfg)
4993{
4994 struct brcmf_cfg80211_event_q *e = NULL;
4995
4996 spin_lock_irq(&cfg->evt_q_lock);
4997 if (!list_empty(&cfg->evt_q_list)) {
4998 e = list_first_entry(&cfg->evt_q_list,
4999 struct brcmf_cfg80211_event_q, evt_q_list);
5000 list_del(&e->evt_q_list);
5001 }
5002 spin_unlock_irq(&cfg->evt_q_lock);
5003
5004 return e;
5005}
5006
5007/*
5008* push event to tail of the queue
5009*
5010* remark: this function may not sleep as it is called in atomic context.
5011*/
5012
5013static s32
5014brcmf_enq_event(struct brcmf_cfg80211_info *cfg, u32 event,
5015 const struct brcmf_event_msg *msg, void *data)
5016{
5017 struct brcmf_cfg80211_event_q *e;
5018 s32 err = 0;
5019 ulong flags;
5020 u32 data_len;
5021 u32 total_len;
5022
5023 total_len = sizeof(struct brcmf_cfg80211_event_q);
5024 if (data)
5025 data_len = be32_to_cpu(msg->datalen);
5026 else
5027 data_len = 0;
5028 total_len += data_len;
5029 e = kzalloc(total_len, GFP_ATOMIC);
5030 if (!e)
5031 return -ENOMEM;
5032
5033 e->etype = event;
5034 memcpy(&e->emsg, msg, sizeof(struct brcmf_event_msg));
5035 if (data)
5036 memcpy(&e->edata, data, data_len);
5037
5038 spin_lock_irqsave(&cfg->evt_q_lock, flags);
5039 list_add_tail(&e->evt_q_list, &cfg->evt_q_list);
5040 spin_unlock_irqrestore(&cfg->evt_q_lock, flags);
5041
5042 return err;
5043}
5044
5045static void brcmf_put_event(struct brcmf_cfg80211_event_q *e)
5046{
5047 kfree(e);
5048}
5049
5050static void brcmf_cfg80211_event_handler(struct work_struct *work)
5051{
5052 struct brcmf_cfg80211_info *cfg =
5053 container_of(work, struct brcmf_cfg80211_info,
5054 event_work);
5055 struct brcmf_cfg80211_event_q *e;
5056
5057 e = brcmf_deq_event(cfg);
5058 if (unlikely(!e)) {
5059 WL_ERR("event queue empty...\n");
5060 return;
5061 }
5062
5063 do {
5064 WL_INFO("event type (%d)\n", e->etype);
5065 if (cfg->el.handler[e->etype])
5066 cfg->el.handler[e->etype](cfg,
5067 cfg_to_ndev(cfg),
5068 &e->emsg, e->edata);
5069 else
5070 WL_INFO("Unknown Event (%d): ignoring\n", e->etype);
5071 brcmf_put_event(e);
5072 } while ((e = brcmf_deq_event(cfg)));
5073
5074}
5075
5076static void brcmf_init_eq(struct brcmf_cfg80211_info *cfg)
5077{
5078 spin_lock_init(&cfg->evt_q_lock);
5079 INIT_LIST_HEAD(&cfg->evt_q_list);
5080}
5081
5082static void brcmf_flush_eq(struct brcmf_cfg80211_info *cfg)
5083{
5084 struct brcmf_cfg80211_event_q *e;
5085
5086 spin_lock_irq(&cfg->evt_q_lock);
5087 while (!list_empty(&cfg->evt_q_list)) {
5088 e = list_first_entry(&cfg->evt_q_list,
5089 struct brcmf_cfg80211_event_q, evt_q_list);
5090 list_del(&e->evt_q_list);
5091 kfree(e);
5092 }
5093 spin_unlock_irq(&cfg->evt_q_lock);
5094}
5095
5096static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg) 4246static s32 wl_init_priv(struct brcmf_cfg80211_info *cfg)
5097{ 4247{
5098 s32 err = 0; 4248 s32 err = 0;
5099 4249
5100 cfg->scan_request = NULL; 4250 cfg->scan_request = NULL;
5101 cfg->pwr_save = true; 4251 cfg->pwr_save = true;
5102#ifdef CONFIG_BRCMISCAN
5103 cfg->iscan_on = true; /* iscan on & off switch.
5104 we enable iscan per default */
5105 cfg->escan_on = false; /* escan on & off switch.
5106 we disable escan per default */
5107#else
5108 cfg->iscan_on = false; /* iscan on & off switch.
5109 we disable iscan per default */
5110 cfg->escan_on = true; /* escan on & off switch.
5111 we enable escan per default */
5112#endif
5113 cfg->roam_on = true; /* roam on & off switch. 4252 cfg->roam_on = true; /* roam on & off switch.
5114 we enable roam per default */ 4253 we enable roam per default */
5115
5116 cfg->iscan_kickstart = false;
5117 cfg->active_scan = true; /* we do active scan for 4254 cfg->active_scan = true; /* we do active scan for
5118 specific scan per default */ 4255 specific scan per default */
5119 cfg->dongle_up = false; /* dongle is not up yet */ 4256 cfg->dongle_up = false; /* dongle is not up yet */
5120 brcmf_init_eq(cfg);
5121 err = brcmf_init_priv_mem(cfg); 4257 err = brcmf_init_priv_mem(cfg);
5122 if (err) 4258 if (err)
5123 return err; 4259 return err;
5124 INIT_WORK(&cfg->event_work, brcmf_cfg80211_event_handler); 4260 brcmf_register_event_handlers(cfg);
5125 brcmf_init_eloop_handler(&cfg->el);
5126 mutex_init(&cfg->usr_sync); 4261 mutex_init(&cfg->usr_sync);
5127 err = brcmf_init_iscan(cfg);
5128 if (err)
5129 return err;
5130 brcmf_init_escan(cfg); 4262 brcmf_init_escan(cfg);
5131 brcmf_init_conf(cfg->conf); 4263 brcmf_init_conf(cfg->conf);
5132 brcmf_init_prof(cfg->profile);
5133 brcmf_link_down(cfg);
5134 4264
5135 return err; 4265 return err;
5136} 4266}
5137 4267
5138static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg) 4268static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg)
5139{ 4269{
5140 cancel_work_sync(&cfg->event_work);
5141 cfg->dongle_up = false; /* dongle down */ 4270 cfg->dongle_up = false; /* dongle down */
5142 brcmf_flush_eq(cfg);
5143 brcmf_link_down(cfg);
5144 brcmf_abort_scanning(cfg); 4271 brcmf_abort_scanning(cfg);
5145 brcmf_deinit_priv_mem(cfg); 4272 brcmf_deinit_priv_mem(cfg);
5146} 4273}
5147 4274
5148struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev, 4275struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
5149 struct device *busdev, 4276 struct device *busdev)
5150 struct brcmf_pub *drvr)
5151{ 4277{
5152 struct wireless_dev *wdev; 4278 struct net_device *ndev = drvr->iflist[0]->ndev;
5153 struct brcmf_cfg80211_info *cfg; 4279 struct brcmf_cfg80211_info *cfg;
4280 struct wiphy *wiphy;
4281 struct brcmf_cfg80211_vif *vif;
4282 struct brcmf_if *ifp;
5154 s32 err = 0; 4283 s32 err = 0;
5155 4284
5156 if (!ndev) { 4285 if (!ndev) {
5157 WL_ERR("ndev is invalid\n"); 4286 brcmf_err("ndev is invalid\n");
5158 return NULL; 4287 return NULL;
5159 } 4288 }
5160 4289
5161 wdev = brcmf_alloc_wdev(busdev); 4290 ifp = netdev_priv(ndev);
5162 if (IS_ERR(wdev)) { 4291 wiphy = brcmf_setup_wiphy(busdev);
4292 if (IS_ERR(wiphy))
5163 return NULL; 4293 return NULL;
5164 }
5165 4294
5166 wdev->iftype = brcmf_mode_to_nl80211_iftype(WL_MODE_BSS); 4295 cfg = wiphy_priv(wiphy);
5167 cfg = wdev_to_cfg(wdev); 4296 cfg->wiphy = wiphy;
5168 cfg->wdev = wdev;
5169 cfg->pub = drvr; 4297 cfg->pub = drvr;
5170 ndev->ieee80211_ptr = wdev; 4298 INIT_LIST_HEAD(&cfg->vif_list);
5171 SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); 4299
5172 wdev->netdev = ndev; 4300 vif = brcmf_alloc_vif(cfg, ndev, WL_MODE_BSS, false);
4301 if (IS_ERR(vif)) {
4302 wiphy_free(wiphy);
4303 return NULL;
4304 }
4305
5173 err = wl_init_priv(cfg); 4306 err = wl_init_priv(cfg);
5174 if (err) { 4307 if (err) {
5175 WL_ERR("Failed to init iwm_priv (%d)\n", err); 4308 brcmf_err("Failed to init iwm_priv (%d)\n", err);
5176 goto cfg80211_attach_out; 4309 goto cfg80211_attach_out;
5177 } 4310 }
5178 4311
4312 ifp->vif = vif;
5179 return cfg; 4313 return cfg;
5180 4314
5181cfg80211_attach_out: 4315cfg80211_attach_out:
5182 brcmf_free_wdev(cfg); 4316 brcmf_free_vif(vif);
5183 return NULL; 4317 return NULL;
5184} 4318}
5185 4319
5186void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) 4320void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
5187{ 4321{
5188 wl_deinit_priv(cfg); 4322 struct brcmf_cfg80211_vif *vif;
5189 brcmf_free_wdev(cfg); 4323 struct brcmf_cfg80211_vif *tmp;
5190}
5191
5192void
5193brcmf_cfg80211_event(struct net_device *ndev,
5194 const struct brcmf_event_msg *e, void *data)
5195{
5196 u32 event_type = be32_to_cpu(e->event_type);
5197 struct brcmf_cfg80211_info *cfg = ndev_to_cfg(ndev);
5198
5199 if (!brcmf_enq_event(cfg, event_type, e, data))
5200 schedule_work(&cfg->event_work);
5201}
5202
5203static s32 brcmf_dongle_eventmsg(struct net_device *ndev)
5204{
5205 /* Room for "event_msgs" + '\0' + bitvec */
5206 s8 iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
5207 s8 eventmask[BRCMF_EVENTING_MASK_LEN];
5208 s32 err = 0;
5209 4324
5210 WL_TRACE("Enter\n"); 4325 wl_deinit_priv(cfg);
5211 4326 list_for_each_entry_safe(vif, tmp, &cfg->vif_list, list) {
5212 /* Setup event_msgs */ 4327 brcmf_free_vif(vif);
5213 brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
5214 iovbuf, sizeof(iovbuf));
5215 err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_VAR, iovbuf, sizeof(iovbuf));
5216 if (err) {
5217 WL_ERR("Get event_msgs error (%d)\n", err);
5218 goto dongle_eventmsg_out;
5219 }
5220 memcpy(eventmask, iovbuf, BRCMF_EVENTING_MASK_LEN);
5221
5222 setbit(eventmask, BRCMF_E_SET_SSID);
5223 setbit(eventmask, BRCMF_E_ROAM);
5224 setbit(eventmask, BRCMF_E_PRUNE);
5225 setbit(eventmask, BRCMF_E_AUTH);
5226 setbit(eventmask, BRCMF_E_REASSOC);
5227 setbit(eventmask, BRCMF_E_REASSOC_IND);
5228 setbit(eventmask, BRCMF_E_DEAUTH_IND);
5229 setbit(eventmask, BRCMF_E_DISASSOC_IND);
5230 setbit(eventmask, BRCMF_E_DISASSOC);
5231 setbit(eventmask, BRCMF_E_JOIN);
5232 setbit(eventmask, BRCMF_E_ASSOC_IND);
5233 setbit(eventmask, BRCMF_E_PSK_SUP);
5234 setbit(eventmask, BRCMF_E_LINK);
5235 setbit(eventmask, BRCMF_E_NDIS_LINK);
5236 setbit(eventmask, BRCMF_E_MIC_ERROR);
5237 setbit(eventmask, BRCMF_E_PMKID_CACHE);
5238 setbit(eventmask, BRCMF_E_TXFAIL);
5239 setbit(eventmask, BRCMF_E_JOIN_START);
5240 setbit(eventmask, BRCMF_E_SCAN_COMPLETE);
5241 setbit(eventmask, BRCMF_E_ESCAN_RESULT);
5242 setbit(eventmask, BRCMF_E_PFN_NET_FOUND);
5243
5244 brcmf_c_mkiovar("event_msgs", eventmask, BRCMF_EVENTING_MASK_LEN,
5245 iovbuf, sizeof(iovbuf));
5246 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
5247 if (err) {
5248 WL_ERR("Set event_msgs error (%d)\n", err);
5249 goto dongle_eventmsg_out;
5250 } 4328 }
5251
5252dongle_eventmsg_out:
5253 WL_TRACE("Exit\n");
5254 return err;
5255} 4329}
5256 4330
5257static s32 4331static s32
5258brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout) 4332brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
5259{ 4333{
5260 s8 iovbuf[32]; 4334 struct brcmf_if *ifp = netdev_priv(ndev);
5261 s32 err = 0; 4335 s32 err = 0;
5262 __le32 roamtrigger[2]; 4336 __le32 roamtrigger[2];
5263 __le32 roam_delta[2]; 4337 __le32 roam_delta[2];
5264 __le32 bcn_to_le;
5265 __le32 roamvar_le;
5266 4338
5267 /* 4339 /*
5268 * Setup timeout if Beacons are lost and roam is 4340 * Setup timeout if Beacons are lost and roam is
5269 * off to report link down 4341 * off to report link down
5270 */ 4342 */
5271 if (roamvar) { 4343 if (roamvar) {
5272 bcn_to_le = cpu_to_le32(bcn_timeout); 4344 err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout);
5273 brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_to_le,
5274 sizeof(bcn_to_le), iovbuf, sizeof(iovbuf));
5275 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR,
5276 iovbuf, sizeof(iovbuf));
5277 if (err) { 4345 if (err) {
5278 WL_ERR("bcn_timeout error (%d)\n", err); 4346 brcmf_err("bcn_timeout error (%d)\n", err);
5279 goto dongle_rom_out; 4347 goto dongle_rom_out;
5280 } 4348 }
5281 } 4349 }
@@ -5284,31 +4352,28 @@ brcmf_dongle_roam(struct net_device *ndev, u32 roamvar, u32 bcn_timeout)
5284 * Enable/Disable built-in roaming to allow supplicant 4352 * Enable/Disable built-in roaming to allow supplicant
5285 * to take care of roaming 4353 * to take care of roaming
5286 */ 4354 */
5287 WL_INFO("Internal Roaming = %s\n", roamvar ? "Off" : "On"); 4355 brcmf_dbg(INFO, "Internal Roaming = %s\n", roamvar ? "Off" : "On");
5288 roamvar_le = cpu_to_le32(roamvar); 4356 err = brcmf_fil_iovar_int_set(ifp, "roam_off", roamvar);
5289 brcmf_c_mkiovar("roam_off", (char *)&roamvar_le,
5290 sizeof(roamvar_le), iovbuf, sizeof(iovbuf));
5291 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf));
5292 if (err) { 4357 if (err) {
5293 WL_ERR("roam_off error (%d)\n", err); 4358 brcmf_err("roam_off error (%d)\n", err);
5294 goto dongle_rom_out; 4359 goto dongle_rom_out;
5295 } 4360 }
5296 4361
5297 roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL); 4362 roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL);
5298 roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL); 4363 roamtrigger[1] = cpu_to_le32(BRCM_BAND_ALL);
5299 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_TRIGGER, 4364 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_TRIGGER,
5300 (void *)roamtrigger, sizeof(roamtrigger)); 4365 (void *)roamtrigger, sizeof(roamtrigger));
5301 if (err) { 4366 if (err) {
5302 WL_ERR("WLC_SET_ROAM_TRIGGER error (%d)\n", err); 4367 brcmf_err("WLC_SET_ROAM_TRIGGER error (%d)\n", err);
5303 goto dongle_rom_out; 4368 goto dongle_rom_out;
5304 } 4369 }
5305 4370
5306 roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA); 4371 roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA);
5307 roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL); 4372 roam_delta[1] = cpu_to_le32(BRCM_BAND_ALL);
5308 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_ROAM_DELTA, 4373 err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_ROAM_DELTA,
5309 (void *)roam_delta, sizeof(roam_delta)); 4374 (void *)roam_delta, sizeof(roam_delta));
5310 if (err) { 4375 if (err) {
5311 WL_ERR("WLC_SET_ROAM_DELTA error (%d)\n", err); 4376 brcmf_err("WLC_SET_ROAM_DELTA error (%d)\n", err);
5312 goto dongle_rom_out; 4377 goto dongle_rom_out;
5313 } 4378 }
5314 4379
@@ -5320,37 +4385,35 @@ static s32
5320brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time, 4385brcmf_dongle_scantime(struct net_device *ndev, s32 scan_assoc_time,
5321 s32 scan_unassoc_time, s32 scan_passive_time) 4386 s32 scan_unassoc_time, s32 scan_passive_time)
5322{ 4387{
4388 struct brcmf_if *ifp = netdev_priv(ndev);
5323 s32 err = 0; 4389 s32 err = 0;
5324 __le32 scan_assoc_tm_le = cpu_to_le32(scan_assoc_time);
5325 __le32 scan_unassoc_tm_le = cpu_to_le32(scan_unassoc_time);
5326 __le32 scan_passive_tm_le = cpu_to_le32(scan_passive_time);
5327 4390
5328 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_CHANNEL_TIME, 4391 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
5329 &scan_assoc_tm_le, sizeof(scan_assoc_tm_le)); 4392 scan_assoc_time);
5330 if (err) { 4393 if (err) {
5331 if (err == -EOPNOTSUPP) 4394 if (err == -EOPNOTSUPP)
5332 WL_INFO("Scan assoc time is not supported\n"); 4395 brcmf_dbg(INFO, "Scan assoc time is not supported\n");
5333 else 4396 else
5334 WL_ERR("Scan assoc time error (%d)\n", err); 4397 brcmf_err("Scan assoc time error (%d)\n", err);
5335 goto dongle_scantime_out; 4398 goto dongle_scantime_out;
5336 } 4399 }
5337 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_UNASSOC_TIME, 4400 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
5338 &scan_unassoc_tm_le, sizeof(scan_unassoc_tm_le)); 4401 scan_unassoc_time);
5339 if (err) { 4402 if (err) {
5340 if (err == -EOPNOTSUPP) 4403 if (err == -EOPNOTSUPP)
5341 WL_INFO("Scan unassoc time is not supported\n"); 4404 brcmf_dbg(INFO, "Scan unassoc time is not supported\n");
5342 else 4405 else
5343 WL_ERR("Scan unassoc time error (%d)\n", err); 4406 brcmf_err("Scan unassoc time error (%d)\n", err);
5344 goto dongle_scantime_out; 4407 goto dongle_scantime_out;
5345 } 4408 }
5346 4409
5347 err = brcmf_exec_dcmd(ndev, BRCMF_C_SET_SCAN_PASSIVE_TIME, 4410 err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_PASSIVE_TIME,
5348 &scan_passive_tm_le, sizeof(scan_passive_tm_le)); 4411 scan_passive_time);
5349 if (err) { 4412 if (err) {
5350 if (err == -EOPNOTSUPP) 4413 if (err == -EOPNOTSUPP)
5351 WL_INFO("Scan passive time is not supported\n"); 4414 brcmf_dbg(INFO, "Scan passive time is not supported\n");
5352 else 4415 else
5353 WL_ERR("Scan passive time error (%d)\n", err); 4416 brcmf_err("Scan passive time error (%d)\n", err);
5354 goto dongle_scantime_out; 4417 goto dongle_scantime_out;
5355 } 4418 }
5356 4419
@@ -5360,20 +4423,21 @@ dongle_scantime_out:
5360 4423
5361static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg) 4424static s32 wl_update_wiphybands(struct brcmf_cfg80211_info *cfg)
5362{ 4425{
4426 struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
5363 struct wiphy *wiphy; 4427 struct wiphy *wiphy;
5364 s32 phy_list; 4428 s32 phy_list;
5365 s8 phy; 4429 s8 phy;
5366 s32 err = 0; 4430 s32 err = 0;
5367 4431
5368 err = brcmf_exec_dcmd(cfg_to_ndev(cfg), BRCM_GET_PHYLIST, 4432 err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_PHYLIST,
5369 &phy_list, sizeof(phy_list)); 4433 &phy_list, sizeof(phy_list));
5370 if (err) { 4434 if (err) {
5371 WL_ERR("error (%d)\n", err); 4435 brcmf_err("error (%d)\n", err);
5372 return err; 4436 return err;
5373 } 4437 }
5374 4438
5375 phy = ((char *)&phy_list)[0]; 4439 phy = ((char *)&phy_list)[0];
5376 WL_INFO("%c phy\n", phy); 4440 brcmf_dbg(INFO, "%c phy\n", phy);
5377 if (phy == 'n' || phy == 'a') { 4441 if (phy == 'n' || phy == 'a') {
5378 wiphy = cfg_to_wiphy(cfg); 4442 wiphy = cfg_to_wiphy(cfg);
5379 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n; 4443 wiphy->bands[IEEE80211_BAND_5GHZ] = &__wl_band_5ghz_n;
@@ -5403,16 +4467,13 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
5403 brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME, 4467 brcmf_dongle_scantime(ndev, WL_SCAN_CHANNEL_TIME,
5404 WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME); 4468 WL_SCAN_UNASSOC_TIME, WL_SCAN_PASSIVE_TIME);
5405 4469
5406 err = brcmf_dongle_eventmsg(ndev);
5407 if (err)
5408 goto default_conf_out;
5409
5410 power_mode = cfg->pwr_save ? PM_FAST : PM_OFF; 4470 power_mode = cfg->pwr_save ? PM_FAST : PM_OFF;
5411 err = brcmf_exec_dcmd_u32(ndev, BRCMF_C_SET_PM, &power_mode); 4471 err = brcmf_fil_cmd_int_set(netdev_priv(ndev), BRCMF_C_SET_PM,
4472 power_mode);
5412 if (err) 4473 if (err)
5413 goto default_conf_out; 4474 goto default_conf_out;
5414 WL_INFO("power save set to %s\n", 4475 brcmf_dbg(INFO, "power save set to %s\n",
5415 (power_mode ? "enabled" : "disabled")); 4476 (power_mode ? "enabled" : "disabled"));
5416 4477
5417 err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1), 4478 err = brcmf_dongle_roam(ndev, (cfg->roam_on ? 0 : 1),
5418 WL_BEACON_TIMEOUT); 4479 WL_BEACON_TIMEOUT);
@@ -5436,68 +4497,25 @@ default_conf_out:
5436 4497
5437} 4498}
5438 4499
5439static int brcmf_debugfs_add_netdev_params(struct brcmf_cfg80211_info *cfg) 4500static s32 __brcmf_cfg80211_up(struct brcmf_if *ifp)
5440{ 4501{
5441 char buf[10+IFNAMSIZ]; 4502 set_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
5442 struct dentry *fd; 4503 if (ifp->idx)
5443 s32 err = 0; 4504 return 0;
5444
5445 sprintf(buf, "netdev:%s", cfg_to_ndev(cfg)->name);
5446 cfg->debugfsdir = debugfs_create_dir(buf,
5447 cfg_to_wiphy(cfg)->debugfsdir);
5448
5449 fd = debugfs_create_u16("beacon_int", S_IRUGO, cfg->debugfsdir,
5450 (u16 *)&cfg->profile->beacon_interval);
5451 if (!fd) {
5452 err = -ENOMEM;
5453 goto err_out;
5454 }
5455
5456 fd = debugfs_create_u8("dtim_period", S_IRUGO, cfg->debugfsdir,
5457 (u8 *)&cfg->profile->dtim_period);
5458 if (!fd) {
5459 err = -ENOMEM;
5460 goto err_out;
5461 }
5462 4505
5463err_out: 4506 return brcmf_config_dongle(ifp->drvr->config);
5464 return err;
5465} 4507}
5466 4508
5467static void brcmf_debugfs_remove_netdev(struct brcmf_cfg80211_info *cfg) 4509static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp)
5468{ 4510{
5469 debugfs_remove_recursive(cfg->debugfsdir); 4511 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
5470 cfg->debugfsdir = NULL;
5471}
5472 4512
5473static s32 __brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg)
5474{
5475 s32 err = 0;
5476
5477 set_bit(WL_STATUS_READY, &cfg->status);
5478
5479 brcmf_debugfs_add_netdev_params(cfg);
5480
5481 err = brcmf_config_dongle(cfg);
5482 if (err)
5483 return err;
5484
5485 brcmf_invoke_iscan(cfg);
5486
5487 return err;
5488}
5489
5490static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
5491{
5492 /* 4513 /*
5493 * While going down, if associated with AP disassociate 4514 * While going down, if associated with AP disassociate
5494 * from AP to save power 4515 * from AP to save power
5495 */ 4516 */
5496 if ((test_bit(WL_STATUS_CONNECTED, &cfg->status) || 4517 if (check_vif_up(ifp->vif)) {
5497 test_bit(WL_STATUS_CONNECTING, &cfg->status)) && 4518 brcmf_link_down(ifp->vif);
5498 test_bit(WL_STATUS_READY, &cfg->status)) {
5499 WL_INFO("Disassociating from AP");
5500 brcmf_link_down(cfg);
5501 4519
5502 /* Make sure WPA_Supplicant receives all the event 4520 /* Make sure WPA_Supplicant receives all the event
5503 generated due to DISASSOC call to the fw to keep 4521 generated due to DISASSOC call to the fw to keep
@@ -5507,30 +4525,32 @@ static s32 __brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg)
5507 } 4525 }
5508 4526
5509 brcmf_abort_scanning(cfg); 4527 brcmf_abort_scanning(cfg);
5510 clear_bit(WL_STATUS_READY, &cfg->status); 4528 clear_bit(BRCMF_VIF_STATUS_READY, &ifp->vif->sme_state);
5511
5512 brcmf_debugfs_remove_netdev(cfg);
5513 4529
5514 return 0; 4530 return 0;
5515} 4531}
5516 4532
5517s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg) 4533s32 brcmf_cfg80211_up(struct net_device *ndev)
5518{ 4534{
4535 struct brcmf_if *ifp = netdev_priv(ndev);
4536 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
5519 s32 err = 0; 4537 s32 err = 0;
5520 4538
5521 mutex_lock(&cfg->usr_sync); 4539 mutex_lock(&cfg->usr_sync);
5522 err = __brcmf_cfg80211_up(cfg); 4540 err = __brcmf_cfg80211_up(ifp);
5523 mutex_unlock(&cfg->usr_sync); 4541 mutex_unlock(&cfg->usr_sync);
5524 4542
5525 return err; 4543 return err;
5526} 4544}
5527 4545
5528s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg) 4546s32 brcmf_cfg80211_down(struct net_device *ndev)
5529{ 4547{
4548 struct brcmf_if *ifp = netdev_priv(ndev);
4549 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
5530 s32 err = 0; 4550 s32 err = 0;
5531 4551
5532 mutex_lock(&cfg->usr_sync); 4552 mutex_lock(&cfg->usr_sync);
5533 err = __brcmf_cfg80211_down(cfg); 4553 err = __brcmf_cfg80211_down(ifp);
5534 mutex_unlock(&cfg->usr_sync); 4554 mutex_unlock(&cfg->usr_sync);
5535 4555
5536 return err; 4556 return err;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 71ced174748a..e4d9cc7a8e63 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -17,98 +17,12 @@
17#ifndef _wl_cfg80211_h_ 17#ifndef _wl_cfg80211_h_
18#define _wl_cfg80211_h_ 18#define _wl_cfg80211_h_
19 19
20#define WL_DBG_NONE 0 20#define WL_NUM_SCAN_MAX 10
21#define WL_DBG_CONN (1 << 5) 21#define WL_NUM_PMKIDS_MAX MAXPMKID
22#define WL_DBG_SCAN (1 << 4)
23#define WL_DBG_TRACE (1 << 3)
24#define WL_DBG_INFO (1 << 1)
25#define WL_DBG_ERR (1 << 0)
26#define WL_DBG_MASK ((WL_DBG_INFO | WL_DBG_ERR | WL_DBG_TRACE) | \
27 (WL_DBG_SCAN) | (WL_DBG_CONN))
28
29#define WL_ERR(fmt, ...) \
30do { \
31 if (brcmf_dbg_level & WL_DBG_ERR) { \
32 if (net_ratelimit()) { \
33 pr_err("ERROR @%s : " fmt, \
34 __func__, ##__VA_ARGS__); \
35 } \
36 } \
37} while (0)
38
39#if (defined DEBUG)
40#define WL_INFO(fmt, ...) \
41do { \
42 if (brcmf_dbg_level & WL_DBG_INFO) { \
43 if (net_ratelimit()) { \
44 pr_err("INFO @%s : " fmt, \
45 __func__, ##__VA_ARGS__); \
46 } \
47 } \
48} while (0)
49
50#define WL_TRACE(fmt, ...) \
51do { \
52 if (brcmf_dbg_level & WL_DBG_TRACE) { \
53 if (net_ratelimit()) { \
54 pr_err("TRACE @%s : " fmt, \
55 __func__, ##__VA_ARGS__); \
56 } \
57 } \
58} while (0)
59
60#define WL_SCAN(fmt, ...) \
61do { \
62 if (brcmf_dbg_level & WL_DBG_SCAN) { \
63 if (net_ratelimit()) { \
64 pr_err("SCAN @%s : " fmt, \
65 __func__, ##__VA_ARGS__); \
66 } \
67 } \
68} while (0)
69
70#define WL_CONN(fmt, ...) \
71do { \
72 if (brcmf_dbg_level & WL_DBG_CONN) { \
73 if (net_ratelimit()) { \
74 pr_err("CONN @%s : " fmt, \
75 __func__, ##__VA_ARGS__); \
76 } \
77 } \
78} while (0)
79
80#else /* (defined DEBUG) */
81#define WL_INFO(fmt, args...)
82#define WL_TRACE(fmt, args...)
83#define WL_SCAN(fmt, args...)
84#define WL_CONN(fmt, args...)
85#endif /* (defined DEBUG) */
86
87#define WL_NUM_SCAN_MAX 1
88#define WL_NUM_PMKIDS_MAX MAXPMKID /* will be used
89 * for 2.6.33 kernel
90 * or later
91 */
92#define WL_SCAN_BUF_MAX (1024 * 8)
93#define WL_TLV_INFO_MAX 1024 22#define WL_TLV_INFO_MAX 1024
94#define WL_BSS_INFO_MAX 2048 23#define WL_BSS_INFO_MAX 2048
95#define WL_ASSOC_INFO_MAX 512 /* 24#define WL_ASSOC_INFO_MAX 512 /* assoc related fil max buf */
96 * needs to grab assoc info from dongle to 25#define WL_EXTRA_BUF_MAX 2048
97 * report it to cfg80211 through "connect"
98 * event
99 */
100#define WL_DCMD_LEN_MAX 1024
101#define WL_EXTRA_BUF_MAX 2048
102#define WL_ISCAN_BUF_MAX 2048 /*
103 * the buf length can be BRCMF_DCMD_MAXLEN
104 * to reduce iteration
105 */
106#define WL_ISCAN_TIMER_INTERVAL_MS 3000
107#define WL_SCAN_ERSULTS_LAST (BRCMF_SCAN_RESULTS_NO_MEM+1)
108#define WL_AP_MAX 256 /* virtually unlimitted as long
109 * as kernel memory allows
110 */
111
112#define WL_ROAM_TRIGGER_LEVEL -75 26#define WL_ROAM_TRIGGER_LEVEL -75
113#define WL_ROAM_DELTA 20 27#define WL_ROAM_DELTA 20
114#define WL_BEACON_TIMEOUT 3 28#define WL_BEACON_TIMEOUT 3
@@ -127,15 +41,15 @@ do { \
127#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */ 41#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */
128#define IE_MAX_LEN 512 42#define IE_MAX_LEN 512
129 43
130/* dongle status */ 44/**
131enum wl_status { 45 * enum brcmf_scan_status - dongle scan status
132 WL_STATUS_READY, 46 *
133 WL_STATUS_SCANNING, 47 * @BRCMF_SCAN_STATUS_BUSY: scanning in progress on dongle.
134 WL_STATUS_SCAN_ABORTING, 48 * @BRCMF_SCAN_STATUS_ABORT: scan being aborted on dongle.
135 WL_STATUS_CONNECTING, 49 */
136 WL_STATUS_CONNECTED, 50enum brcmf_scan_status {
137 WL_STATUS_AP_CREATING, 51 BRCMF_SCAN_STATUS_BUSY,
138 WL_STATUS_AP_CREATED 52 BRCMF_SCAN_STATUS_ABORT,
139}; 53};
140 54
141/* wi-fi mode */ 55/* wi-fi mode */
@@ -145,28 +59,8 @@ enum wl_mode {
145 WL_MODE_AP 59 WL_MODE_AP
146}; 60};
147 61
148/* dongle profile list */
149enum wl_prof_list {
150 WL_PROF_MODE,
151 WL_PROF_SSID,
152 WL_PROF_SEC,
153 WL_PROF_IBSS,
154 WL_PROF_BAND,
155 WL_PROF_BSSID,
156 WL_PROF_ACT,
157 WL_PROF_BEACONINT,
158 WL_PROF_DTIMPERIOD
159};
160
161/* dongle iscan state */
162enum wl_iscan_state {
163 WL_ISCAN_STATE_IDLE,
164 WL_ISCAN_STATE_SCANING
165};
166
167/* dongle configuration */ 62/* dongle configuration */
168struct brcmf_cfg80211_conf { 63struct brcmf_cfg80211_conf {
169 u32 mode; /* adhoc , infrastructure or ap */
170 u32 frag_threshold; 64 u32 frag_threshold;
171 u32 rts_threshold; 65 u32 rts_threshold;
172 u32 retry_short; 66 u32 retry_short;
@@ -175,17 +69,6 @@ struct brcmf_cfg80211_conf {
175 struct ieee80211_channel channel; 69 struct ieee80211_channel channel;
176}; 70};
177 71
178/* forward declaration */
179struct brcmf_cfg80211_info;
180
181/* cfg80211 main event loop */
182struct brcmf_cfg80211_event_loop {
183 s32(*handler[BRCMF_E_LAST]) (struct brcmf_cfg80211_info *cfg,
184 struct net_device *ndev,
185 const struct brcmf_event_msg *e,
186 void *data);
187};
188
189/* basic structure of scan request */ 72/* basic structure of scan request */
190struct brcmf_cfg80211_scan_req { 73struct brcmf_cfg80211_scan_req {
191 struct brcmf_ssid_le ssid_le; 74 struct brcmf_ssid_le ssid_le;
@@ -197,14 +80,6 @@ struct brcmf_cfg80211_ie {
197 u8 buf[WL_TLV_INFO_MAX]; 80 u8 buf[WL_TLV_INFO_MAX];
198}; 81};
199 82
200/* event queue for cfg80211 main event */
201struct brcmf_cfg80211_event_q {
202 struct list_head evt_q_list;
203 u32 etype;
204 struct brcmf_event_msg emsg;
205 s8 edata[1];
206};
207
208/* security information with currently associated ap */ 83/* security information with currently associated ap */
209struct brcmf_cfg80211_security { 84struct brcmf_cfg80211_security {
210 u32 wpa_versions; 85 u32 wpa_versions;
@@ -214,45 +89,73 @@ struct brcmf_cfg80211_security {
214 u32 wpa_auth; 89 u32 wpa_auth;
215}; 90};
216 91
217/* ibss information for currently joined ibss network */ 92/**
218struct brcmf_cfg80211_ibss { 93 * struct brcmf_cfg80211_profile - profile information.
219 u8 beacon_interval; /* in millisecond */ 94 *
220 u8 atim; /* in millisecond */ 95 * @ssid: ssid of associated/associating ap.
221 s8 join_only; 96 * @bssid: bssid of joined/joining ibss.
222 u8 band; 97 * @sec: security information.
223 u8 channel; 98 */
224};
225
226/* dongle profile */
227struct brcmf_cfg80211_profile { 99struct brcmf_cfg80211_profile {
228 u32 mode;
229 struct brcmf_ssid ssid; 100 struct brcmf_ssid ssid;
230 u8 bssid[ETH_ALEN]; 101 u8 bssid[ETH_ALEN];
231 u16 beacon_interval;
232 u8 dtim_period;
233 struct brcmf_cfg80211_security sec; 102 struct brcmf_cfg80211_security sec;
234 struct brcmf_cfg80211_ibss ibss;
235 s32 band;
236}; 103};
237 104
238/* dongle iscan event loop */ 105/**
239struct brcmf_cfg80211_iscan_eloop { 106 * enum brcmf_vif_status - bit indices for vif status.
240 s32 (*handler[WL_SCAN_ERSULTS_LAST]) 107 *
241 (struct brcmf_cfg80211_info *cfg); 108 * @BRCMF_VIF_STATUS_READY: ready for operation.
109 * @BRCMF_VIF_STATUS_CONNECTING: connect/join in progress.
110 * @BRCMF_VIF_STATUS_CONNECTED: connected/joined succesfully.
111 * @BRCMF_VIF_STATUS_AP_CREATING: interface configured for AP operation.
112 * @BRCMF_VIF_STATUS_AP_CREATED: AP operation started.
113 */
114enum brcmf_vif_status {
115 BRCMF_VIF_STATUS_READY,
116 BRCMF_VIF_STATUS_CONNECTING,
117 BRCMF_VIF_STATUS_CONNECTED,
118 BRCMF_VIF_STATUS_AP_CREATING,
119 BRCMF_VIF_STATUS_AP_CREATED
120};
121
122/**
123 * struct vif_saved_ie - holds saved IEs for a virtual interface.
124 *
125 * @probe_res_ie: IE info for probe response.
126 * @beacon_ie: IE info for beacon frame.
127 * @probe_res_ie_len: IE info length for probe response.
128 * @beacon_ie_len: IE info length for beacon frame.
129 */
130struct vif_saved_ie {
131 u8 probe_res_ie[IE_MAX_LEN];
132 u8 beacon_ie[IE_MAX_LEN];
133 u32 probe_res_ie_len;
134 u32 beacon_ie_len;
242}; 135};
243 136
244/* dongle iscan controller */ 137/**
245struct brcmf_cfg80211_iscan_ctrl { 138 * struct brcmf_cfg80211_vif - virtual interface specific information.
246 struct net_device *ndev; 139 *
247 struct timer_list timer; 140 * @ifp: lower layer interface pointer
248 u32 timer_ms; 141 * @wdev: wireless device.
249 u32 timer_on; 142 * @profile: profile information.
250 s32 state; 143 * @mode: operating mode.
251 struct work_struct work; 144 * @roam_off: roaming state.
252 struct brcmf_cfg80211_iscan_eloop el; 145 * @sme_state: SME state using enum brcmf_vif_status bits.
253 void *data; 146 * @pm_block: power-management blocked.
254 s8 dcmd_buf[BRCMF_DCMD_SMLEN]; 147 * @list: linked list.
255 s8 scan_buf[WL_ISCAN_BUF_MAX]; 148 */
149struct brcmf_cfg80211_vif {
150 struct brcmf_if *ifp;
151 struct wireless_dev wdev;
152 struct brcmf_cfg80211_profile profile;
153 s32 mode;
154 s32 roam_off;
155 unsigned long sme_state;
156 bool pm_block;
157 struct vif_saved_ie saved_ie;
158 struct list_head list;
256}; 159};
257 160
258/* association inform */ 161/* association inform */
@@ -288,17 +191,6 @@ struct escan_info {
288 struct net_device *ndev; 191 struct net_device *ndev;
289}; 192};
290 193
291/* Structure to hold WPS, WPA IEs for a AP */
292struct ap_info {
293 u8 probe_res_ie[IE_MAX_LEN];
294 u8 beacon_ie[IE_MAX_LEN];
295 u32 probe_res_ie_len;
296 u32 beacon_ie_len;
297 u8 *wpa_ie;
298 u8 *rsn_ie;
299 bool security_mode;
300};
301
302/** 194/**
303 * struct brcmf_pno_param_le - PNO scan configuration parameters 195 * struct brcmf_pno_param_le - PNO scan configuration parameters
304 * 196 *
@@ -383,32 +275,22 @@ struct brcmf_pno_scanresults_le {
383/** 275/**
384 * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface 276 * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface
385 * 277 *
386 * @wdev: representing wl cfg80211 device. 278 * @wiphy: wiphy object for cfg80211 interface.
387 * @conf: dongle configuration. 279 * @conf: dongle configuration.
388 * @scan_request: cfg80211 scan request object. 280 * @scan_request: cfg80211 scan request object.
389 * @el: main event loop.
390 * @evt_q_list: used for event queue.
391 * @evt_q_lock: for event queue synchronization.
392 * @usr_sync: mainly for dongle up/down synchronization. 281 * @usr_sync: mainly for dongle up/down synchronization.
393 * @bss_list: bss_list holding scanned ap information. 282 * @bss_list: bss_list holding scanned ap information.
394 * @scan_results: results of the last scan.
395 * @scan_req_int: internal scan request object. 283 * @scan_req_int: internal scan request object.
396 * @bss_info: bss information for cfg80211 layer. 284 * @bss_info: bss information for cfg80211 layer.
397 * @ie: information element object for internal purpose. 285 * @ie: information element object for internal purpose.
398 * @profile: holding dongle profile.
399 * @iscan: iscan controller information.
400 * @conn_info: association info. 286 * @conn_info: association info.
401 * @pmk_list: wpa2 pmk list. 287 * @pmk_list: wpa2 pmk list.
402 * @event_work: event handler work struct. 288 * @scan_status: scan activity on the dongle.
403 * @status: current dongle status.
404 * @pub: common driver information. 289 * @pub: common driver information.
405 * @channel: current channel. 290 * @channel: current channel.
406 * @iscan_on: iscan on/off switch.
407 * @iscan_kickstart: indicate iscan already started.
408 * @active_scan: current scan mode. 291 * @active_scan: current scan mode.
409 * @sched_escan: e-scan for scheduled scan support running. 292 * @sched_escan: e-scan for scheduled scan support running.
410 * @ibss_starter: indicates this sta is ibss starter. 293 * @ibss_starter: indicates this sta is ibss starter.
411 * @link_up: link/connection up flag.
412 * @pwr_save: indicate whether dongle to support power save mode. 294 * @pwr_save: indicate whether dongle to support power save mode.
413 * @dongle_up: indicate whether dongle up or not. 295 * @dongle_up: indicate whether dongle up or not.
414 * @roam_on: on/off switch for dongle self-roaming. 296 * @roam_on: on/off switch for dongle self-roaming.
@@ -416,41 +298,30 @@ struct brcmf_pno_scanresults_le {
416 * @dcmd_buf: dcmd buffer. 298 * @dcmd_buf: dcmd buffer.
417 * @extra_buf: mainly to grab assoc information. 299 * @extra_buf: mainly to grab assoc information.
418 * @debugfsdir: debugfs folder for this device. 300 * @debugfsdir: debugfs folder for this device.
419 * @escan_on: escan on/off switch.
420 * @escan_info: escan information. 301 * @escan_info: escan information.
421 * @escan_timeout: Timer for catch scan timeout. 302 * @escan_timeout: Timer for catch scan timeout.
422 * @escan_timeout_work: scan timeout worker. 303 * @escan_timeout_work: scan timeout worker.
423 * @escan_ioctl_buf: dongle command buffer for escan commands. 304 * @escan_ioctl_buf: dongle command buffer for escan commands.
424 * @ap_info: host ap information. 305 * @vif_list: linked list of vif instances.
425 * @ci: used to link this structure to netdev private data. 306 * @vif_cnt: number of vif instances.
426 */ 307 */
427struct brcmf_cfg80211_info { 308struct brcmf_cfg80211_info {
428 struct wireless_dev *wdev; 309 struct wiphy *wiphy;
429 struct brcmf_cfg80211_conf *conf; 310 struct brcmf_cfg80211_conf *conf;
430 struct cfg80211_scan_request *scan_request; 311 struct cfg80211_scan_request *scan_request;
431 struct brcmf_cfg80211_event_loop el;
432 struct list_head evt_q_list;
433 spinlock_t evt_q_lock;
434 struct mutex usr_sync; 312 struct mutex usr_sync;
435 struct brcmf_scan_results *bss_list; 313 struct brcmf_scan_results *bss_list;
436 struct brcmf_scan_results *scan_results; 314 struct brcmf_cfg80211_scan_req scan_req_int;
437 struct brcmf_cfg80211_scan_req *scan_req_int;
438 struct wl_cfg80211_bss_info *bss_info; 315 struct wl_cfg80211_bss_info *bss_info;
439 struct brcmf_cfg80211_ie ie; 316 struct brcmf_cfg80211_ie ie;
440 struct brcmf_cfg80211_profile *profile;
441 struct brcmf_cfg80211_iscan_ctrl *iscan;
442 struct brcmf_cfg80211_connect_info conn_info; 317 struct brcmf_cfg80211_connect_info conn_info;
443 struct brcmf_cfg80211_pmk_list *pmk_list; 318 struct brcmf_cfg80211_pmk_list *pmk_list;
444 struct work_struct event_work; 319 unsigned long scan_status;
445 unsigned long status;
446 struct brcmf_pub *pub; 320 struct brcmf_pub *pub;
447 u32 channel; 321 u32 channel;
448 bool iscan_on;
449 bool iscan_kickstart;
450 bool active_scan; 322 bool active_scan;
451 bool sched_escan; 323 bool sched_escan;
452 bool ibss_starter; 324 bool ibss_starter;
453 bool link_up;
454 bool pwr_save; 325 bool pwr_save;
455 bool dongle_up; 326 bool dongle_up;
456 bool roam_on; 327 bool roam_on;
@@ -458,17 +329,17 @@ struct brcmf_cfg80211_info {
458 u8 *dcmd_buf; 329 u8 *dcmd_buf;
459 u8 *extra_buf; 330 u8 *extra_buf;
460 struct dentry *debugfsdir; 331 struct dentry *debugfsdir;
461 bool escan_on;
462 struct escan_info escan_info; 332 struct escan_info escan_info;
463 struct timer_list escan_timeout; 333 struct timer_list escan_timeout;
464 struct work_struct escan_timeout_work; 334 struct work_struct escan_timeout_work;
465 u8 *escan_ioctl_buf; 335 u8 *escan_ioctl_buf;
466 struct ap_info *ap_info; 336 struct list_head vif_list;
337 u8 vif_cnt;
467}; 338};
468 339
469static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *w) 340static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
470{ 341{
471 return w->wdev->wiphy; 342 return cfg->wiphy;
472} 343}
473 344
474static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w) 345static inline struct brcmf_cfg80211_info *wiphy_to_cfg(struct wiphy *w)
@@ -481,9 +352,12 @@ static inline struct brcmf_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd)
481 return (struct brcmf_cfg80211_info *)(wdev_priv(wd)); 352 return (struct brcmf_cfg80211_info *)(wdev_priv(wd));
482} 353}
483 354
484static inline struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg) 355static inline
356struct net_device *cfg_to_ndev(struct brcmf_cfg80211_info *cfg)
485{ 357{
486 return cfg->wdev->netdev; 358 struct brcmf_cfg80211_vif *vif;
359 vif = list_first_entry(&cfg->vif_list, struct brcmf_cfg80211_vif, list);
360 return vif->wdev.netdev;
487} 361}
488 362
489static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev) 363static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
@@ -491,8 +365,17 @@ static inline struct brcmf_cfg80211_info *ndev_to_cfg(struct net_device *ndev)
491 return wdev_to_cfg(ndev->ieee80211_ptr); 365 return wdev_to_cfg(ndev->ieee80211_ptr);
492} 366}
493 367
494#define iscan_to_cfg(i) ((struct brcmf_cfg80211_info *)(i->data)) 368static inline struct brcmf_cfg80211_profile *ndev_to_prof(struct net_device *nd)
495#define cfg_to_iscan(w) (w->iscan) 369{
370 struct brcmf_if *ifp = netdev_priv(nd);
371 return &ifp->vif->profile;
372}
373
374static inline struct brcmf_cfg80211_vif *ndev_to_vif(struct net_device *ndev)
375{
376 struct brcmf_if *ifp = netdev_priv(ndev);
377 return ifp->vif;
378}
496 379
497static inline struct 380static inline struct
498brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg) 381brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
@@ -500,15 +383,10 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_info *cfg)
500 return &cfg->conn_info; 383 return &cfg->conn_info;
501} 384}
502 385
503struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct net_device *ndev, 386struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
504 struct device *busdev, 387 struct device *busdev);
505 struct brcmf_pub *drvr);
506void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg); 388void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
507 389s32 brcmf_cfg80211_up(struct net_device *ndev);
508/* event handler from dongle */ 390s32 brcmf_cfg80211_down(struct net_device *ndev);
509void brcmf_cfg80211_event(struct net_device *ndev,
510 const struct brcmf_event_msg *e, void *data);
511s32 brcmf_cfg80211_up(struct brcmf_cfg80211_info *cfg);
512s32 brcmf_cfg80211_down(struct brcmf_cfg80211_info *cfg);
513 391
514#endif /* _wl_cfg80211_h_ */ 392#endif /* _wl_cfg80211_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
index e227c4c68ef9..d3d4151c3eda 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -40,7 +40,8 @@ BRCMSMAC_OFILES := \
40 phy/phytbl_n.o \ 40 phy/phytbl_n.o \
41 phy/phy_qmath.o \ 41 phy/phy_qmath.o \
42 dma.o \ 42 dma.o \
43 brcms_trace_events.o 43 brcms_trace_events.o \
44 debug.o
44 45
45MODULEPFX := brcmsmac 46MODULEPFX := brcmsmac
46 47
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index b89f1272b93f..f0888a9ee32e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -692,7 +692,7 @@ void ai_pci_up(struct si_pub *sih)
692 sii = container_of(sih, struct si_info, pub); 692 sii = container_of(sih, struct si_info, pub);
693 693
694 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) 694 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
695 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true); 695 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true);
696} 696}
697 697
698/* Unconfigure and/or apply various WARs when going down */ 698/* Unconfigure and/or apply various WARs when going down */
@@ -703,7 +703,7 @@ void ai_pci_down(struct si_pub *sih)
703 sii = container_of(sih, struct si_info, pub); 703 sii = container_of(sih, struct si_info, pub);
704 704
705 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) 705 if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
706 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false); 706 bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false);
707} 707}
708 708
709/* Enable BT-COEX & Ex-PA for 4313 */ 709/* Enable BT-COEX & Ex-PA for 4313 */
@@ -721,7 +721,7 @@ void ai_epa_4313war(struct si_pub *sih)
721/* check if the device is removed */ 721/* check if the device is removed */
722bool ai_deviceremoved(struct si_pub *sih) 722bool ai_deviceremoved(struct si_pub *sih)
723{ 723{
724 u32 w; 724 u32 w = 0;
725 struct si_info *sii; 725 struct si_info *sii;
726 726
727 sii = container_of(sih, struct si_info, pub); 727 sii = container_of(sih, struct si_info, pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index be5bcfb9153b..1de94f30564f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -21,6 +21,8 @@
21#include "antsel.h" 21#include "antsel.h"
22#include "main.h" 22#include "main.h"
23#include "ampdu.h" 23#include "ampdu.h"
24#include "debug.h"
25#include "brcms_trace_events.h"
24 26
25/* max number of mpdus in an ampdu */ 27/* max number of mpdus in an ampdu */
26#define AMPDU_MAX_MPDU 32 28#define AMPDU_MAX_MPDU 32
@@ -40,8 +42,6 @@
40#define AMPDU_DEF_RETRY_LIMIT 5 42#define AMPDU_DEF_RETRY_LIMIT 5
41/* default tx retry limit at reg rate */ 43/* default tx retry limit at reg rate */
42#define AMPDU_DEF_RR_RETRY_LIMIT 2 44#define AMPDU_DEF_RR_RETRY_LIMIT 2
43/* default weight of ampdu in txfifo */
44#define AMPDU_DEF_TXPKT_WEIGHT 2
45/* default ffpld reserved bytes */ 45/* default ffpld reserved bytes */
46#define AMPDU_DEF_FFPLD_RSVD 2048 46#define AMPDU_DEF_FFPLD_RSVD 2048
47/* # of inis to be freed on detach */ 47/* # of inis to be freed on detach */
@@ -114,7 +114,6 @@ struct brcms_fifo_info {
114 * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec 114 * mpdu_density: min mpdu spacing (0-7) ==> 2^(x-1)/8 usec
115 * max_pdu: max pdus allowed in ampdu 115 * max_pdu: max pdus allowed in ampdu
116 * dur: max duration of an ampdu (in msec) 116 * dur: max duration of an ampdu (in msec)
117 * txpkt_weight: weight of ampdu in txfifo; reduces rate lag
118 * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes 117 * rx_factor: maximum rx ampdu factor (0-3) ==> 2^(13+x) bytes
119 * ffpld_rsvd: number of bytes to reserve for preload 118 * ffpld_rsvd: number of bytes to reserve for preload
120 * max_txlen: max size of ampdu per mcs, bw and sgi 119 * max_txlen: max size of ampdu per mcs, bw and sgi
@@ -136,7 +135,6 @@ struct ampdu_info {
136 u8 mpdu_density; 135 u8 mpdu_density;
137 s8 max_pdu; 136 s8 max_pdu;
138 u8 dur; 137 u8 dur;
139 u8 txpkt_weight;
140 u8 rx_factor; 138 u8 rx_factor;
141 u32 ffpld_rsvd; 139 u32 ffpld_rsvd;
142 u32 max_txlen[MCS_TABLE_SIZE][2][2]; 140 u32 max_txlen[MCS_TABLE_SIZE][2][2];
@@ -183,18 +181,19 @@ static bool brcms_c_ampdu_cap(struct ampdu_info *ampdu)
183static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on) 181static int brcms_c_ampdu_set(struct ampdu_info *ampdu, bool on)
184{ 182{
185 struct brcms_c_info *wlc = ampdu->wlc; 183 struct brcms_c_info *wlc = ampdu->wlc;
184 struct bcma_device *core = wlc->hw->d11core;
186 185
187 wlc->pub->_ampdu = false; 186 wlc->pub->_ampdu = false;
188 187
189 if (on) { 188 if (on) {
190 if (!(wlc->pub->_n_enab & SUPPORT_11N)) { 189 if (!(wlc->pub->_n_enab & SUPPORT_11N)) {
191 wiphy_err(ampdu->wlc->wiphy, "wl%d: driver not " 190 brcms_err(core, "wl%d: driver not nmode enabled\n",
192 "nmode enabled\n", wlc->pub->unit); 191 wlc->pub->unit);
193 return -ENOTSUPP; 192 return -ENOTSUPP;
194 } 193 }
195 if (!brcms_c_ampdu_cap(ampdu)) { 194 if (!brcms_c_ampdu_cap(ampdu)) {
196 wiphy_err(ampdu->wlc->wiphy, "wl%d: device not " 195 brcms_err(core, "wl%d: device not ampdu capable\n",
197 "ampdu capable\n", wlc->pub->unit); 196 wlc->pub->unit);
198 return -ENOTSUPP; 197 return -ENOTSUPP;
199 } 198 }
200 wlc->pub->_ampdu = on; 199 wlc->pub->_ampdu = on;
@@ -247,7 +246,6 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
247 ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY; 246 ampdu->mpdu_density = AMPDU_DEF_MPDU_DENSITY;
248 ampdu->max_pdu = AUTO; 247 ampdu->max_pdu = AUTO;
249 ampdu->dur = AMPDU_MAX_DUR; 248 ampdu->dur = AMPDU_MAX_DUR;
250 ampdu->txpkt_weight = AMPDU_DEF_TXPKT_WEIGHT;
251 249
252 ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD; 250 ampdu->ffpld_rsvd = AMPDU_DEF_FFPLD_RSVD;
253 /* 251 /*
@@ -374,7 +372,8 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
374 offsetof(struct macstat, txfunfl[fid])); 372 offsetof(struct macstat, txfunfl[fid]));
375 new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl); 373 new_txunfl = (u16) (cur_txunfl - fifo->prev_txfunfl);
376 if (new_txunfl == 0) { 374 if (new_txunfl == 0) {
377 BCMMSG(wlc->wiphy, "TX status FRAG set but no tx underflows\n"); 375 brcms_dbg_ht(wlc->hw->d11core,
376 "TX status FRAG set but no tx underflows\n");
378 return -1; 377 return -1;
379 } 378 }
380 fifo->prev_txfunfl = cur_txunfl; 379 fifo->prev_txfunfl = cur_txunfl;
@@ -396,8 +395,8 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
396 if (fifo->accum_txfunfl < 10) 395 if (fifo->accum_txfunfl < 10)
397 return 0; 396 return 0;
398 397
399 BCMMSG(wlc->wiphy, "ampdu_count %d tx_underflows %d\n", 398 brcms_dbg_ht(wlc->hw->d11core, "ampdu_count %d tx_underflows %d\n",
400 current_ampdu_cnt, fifo->accum_txfunfl); 399 current_ampdu_cnt, fifo->accum_txfunfl);
401 400
402 /* 401 /*
403 compute the current ratio of tx unfl per ampdu. 402 compute the current ratio of tx unfl per ampdu.
@@ -450,9 +449,10 @@ static int brcms_c_ffpld_check_txfunfl(struct brcms_c_info *wlc, int fid)
450 (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size)) 449 (max_mpdu * FFPLD_MPDU_SIZE - fifo->ampdu_pld_size))
451 / (max_mpdu * FFPLD_MPDU_SIZE)) * 100; 450 / (max_mpdu * FFPLD_MPDU_SIZE)) * 100;
452 451
453 BCMMSG(wlc->wiphy, "DMA estimated transfer rate %d; " 452 brcms_dbg_ht(wlc->hw->d11core,
454 "pre-load size %d\n", 453 "DMA estimated transfer rate %d; "
455 fifo->dmaxferrate, fifo->ampdu_pld_size); 454 "pre-load size %d\n",
455 fifo->dmaxferrate, fifo->ampdu_pld_size);
456 } else { 456 } else {
457 457
458 /* decrease ampdu size */ 458 /* decrease ampdu size */
@@ -486,7 +486,7 @@ brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
486 scb_ampdu = &scb->scb_ampdu; 486 scb_ampdu = &scb->scb_ampdu;
487 487
488 if (!ampdu->ini_enable[tid]) { 488 if (!ampdu->ini_enable[tid]) {
489 wiphy_err(ampdu->wlc->wiphy, "%s: Rejecting tid %d\n", 489 brcms_err(wlc->hw->d11core, "%s: Rejecting tid %d\n",
490 __func__, tid); 490 __func__, tid);
491 return; 491 return;
492 } 492 }
@@ -498,378 +498,324 @@ brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
498 scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes; 498 scb_ampdu->max_rx_ampdu_bytes = max_rx_ampdu_bytes;
499} 499}
500 500
501int 501void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
502brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, 502 struct brcms_c_info *wlc)
503 struct sk_buff **pdu, int prec)
504{ 503{
505 struct brcms_c_info *wlc; 504 session->wlc = wlc;
506 struct sk_buff *p, *pkt[AMPDU_MAX_MPDU]; 505 skb_queue_head_init(&session->skb_list);
507 u8 tid, ndelim; 506 session->max_ampdu_len = 0; /* determined from first MPDU */
508 int err = 0; 507 session->max_ampdu_frames = 0; /* determined from first MPDU */
509 u8 preamble_type = BRCMS_GF_PREAMBLE; 508 session->ampdu_len = 0;
510 u8 fbr_preamble_type = BRCMS_GF_PREAMBLE; 509 session->dma_len = 0;
511 u8 rts_preamble_type = BRCMS_LONG_PREAMBLE; 510}
512 u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
513 511
514 bool rr = true, fbr = false; 512/*
515 uint i, count = 0, fifo, seg_cnt = 0; 513 * Preps the given packet for AMPDU based on the session data. If the
516 u16 plen, len, seq = 0, mcl, mch, index, frameid, dma_len = 0; 514 * frame cannot be accomodated in the current session, -ENOSPC is
517 u32 ampdu_len, max_ampdu_bytes = 0; 515 * returned.
518 struct d11txh *txh = NULL; 516 */
517int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
518 struct sk_buff *p)
519{
520 struct brcms_c_info *wlc = session->wlc;
521 struct ampdu_info *ampdu = wlc->ampdu;
522 struct scb *scb = &wlc->pri_scb;
523 struct scb_ampdu *scb_ampdu = &scb->scb_ampdu;
524 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
525 struct ieee80211_tx_rate *txrate = tx_info->status.rates;
526 struct d11txh *txh = (struct d11txh *)p->data;
527 unsigned ampdu_frames;
528 u8 ndelim, tid;
519 u8 *plcp; 529 u8 *plcp;
520 struct ieee80211_hdr *h; 530 uint len;
521 struct scb *scb; 531 u16 mcl;
522 struct scb_ampdu *scb_ampdu;
523 struct scb_ampdu_tid_ini *ini;
524 u8 mcs = 0;
525 bool use_rts = false, use_cts = false;
526 u32 rspec = 0, rspec_fallback = 0;
527 u32 rts_rspec = 0, rts_rspec_fallback = 0;
528 u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
529 struct ieee80211_rts *rts;
530 u8 rr_retry_limit;
531 struct brcms_fifo_info *f;
532 bool fbr_iscck; 532 bool fbr_iscck;
533 struct ieee80211_tx_info *tx_info; 533 bool rr;
534 u16 qlen;
535 struct wiphy *wiphy;
536
537 wlc = ampdu->wlc;
538 wiphy = wlc->wiphy;
539 p = *pdu;
540
541 tid = (u8) (p->priority);
542 534
543 f = ampdu->fifo_tb + prio2fifo[tid]; 535 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
536 plcp = (u8 *)(txh + 1);
537 fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
538 len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
539 BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
540 len = roundup(len, 4) + (ndelim + 1) * AMPDU_DELIMITER_LEN;
544 541
545 scb = &wlc->pri_scb; 542 ampdu_frames = skb_queue_len(&session->skb_list);
546 scb_ampdu = &scb->scb_ampdu; 543 if (ampdu_frames != 0) {
547 ini = &scb_ampdu->ini[tid]; 544 struct sk_buff *first;
548 545
549 /* Let pressure continue to build ... */ 546 if (ampdu_frames + 1 > session->max_ampdu_frames ||
550 qlen = pktq_plen(&qi->q, prec); 547 session->ampdu_len + len > session->max_ampdu_len)
551 if (ini->tx_in_transit > 0 && 548 return -ENOSPC;
552 qlen < min(scb_ampdu->max_pdu, ini->ba_wsize))
553 /* Collect multiple MPDU's to be sent in the next AMPDU */
554 return -EBUSY;
555 549
556 /* at this point we intend to transmit an AMPDU */ 550 /*
557 rr_retry_limit = ampdu->rr_retry_limit_tid[tid]; 551 * We aren't really out of space if the new frame is of
558 ampdu_len = 0; 552 * a different priority, but we want the same behaviour
559 dma_len = 0; 553 * so return -ENOSPC anyway.
560 while (p) { 554 *
561 struct ieee80211_tx_rate *txrate; 555 * XXX: The old AMPDU code did this, but is it really
562 556 * necessary?
563 tx_info = IEEE80211_SKB_CB(p); 557 */
564 txrate = tx_info->status.rates; 558 first = skb_peek(&session->skb_list);
559 if (p->priority != first->priority)
560 return -ENOSPC;
561 }
565 562
566 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 563 /*
567 err = brcms_c_prep_pdu(wlc, p, &fifo); 564 * Now that we're sure this frame can be accomodated, update the
568 } else { 565 * session information.
569 wiphy_err(wiphy, "%s: AMPDU flag is off!\n", __func__); 566 */
570 *pdu = NULL; 567 session->ampdu_len += len;
571 err = 0; 568 session->dma_len += p->len;
572 break;
573 }
574 569
575 if (err) { 570 tid = (u8)p->priority;
576 if (err == -EBUSY) {
577 wiphy_err(wiphy, "wl%d: sendampdu: "
578 "prep_xdu retry; seq 0x%x\n",
579 wlc->pub->unit, seq);
580 *pdu = p;
581 break;
582 }
583 571
584 /* error in the packet; reject it */ 572 /* Handle retry limits */
585 wiphy_err(wiphy, "wl%d: sendampdu: prep_xdu " 573 if (txrate[0].count <= ampdu->rr_retry_limit_tid[tid]) {
586 "rejected; seq 0x%x\n", wlc->pub->unit, seq); 574 txrate[0].count++;
587 *pdu = NULL; 575 rr = true;
588 break; 576 } else {
589 } 577 txrate[1].count++;
578 rr = false;
579 }
590 580
591 /* pkt is good to be aggregated */ 581 if (ampdu_frames == 0) {
592 txh = (struct d11txh *) p->data; 582 u8 plcp0, plcp3, is40, sgi, mcs;
593 plcp = (u8 *) (txh + 1); 583 uint fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
594 h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN); 584 struct brcms_fifo_info *f = &ampdu->fifo_tb[fifo];
595 seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
596 index = TX_SEQ_TO_INDEX(seq);
597 585
598 /* check mcl fields and test whether it can be agg'd */ 586 if (rr) {
599 mcl = le16_to_cpu(txh->MacTxControlLow); 587 plcp0 = plcp[0];
600 mcl &= ~TXC_AMPDU_MASK; 588 plcp3 = plcp[3];
601 fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x3);
602 txh->PreloadSize = 0; /* always default to 0 */
603
604 /* Handle retry limits */
605 if (txrate[0].count <= rr_retry_limit) {
606 txrate[0].count++;
607 rr = true;
608 fbr = false;
609 } else { 589 } else {
610 fbr = true; 590 plcp0 = txh->FragPLCPFallback[0];
611 rr = false; 591 plcp3 = txh->FragPLCPFallback[3];
612 txrate[1].count++;
613 }
614
615 /* extract the length info */
616 len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
617 : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
618
619 /* retrieve null delimiter count */
620 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
621 seg_cnt += 1;
622 592
623 BCMMSG(wlc->wiphy, "wl%d: mpdu %d plcp_len %d\n",
624 wlc->pub->unit, count, len);
625
626 /*
627 * aggregateable mpdu. For ucode/hw agg,
628 * test whether need to break or change the epoch
629 */
630 if (count == 0) {
631 mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
632 /* refill the bits since might be a retx mpdu */
633 mcl |= TXC_STARTMSDU;
634 rts = (struct ieee80211_rts *)&txh->rts_frame;
635
636 if (ieee80211_is_rts(rts->frame_control)) {
637 mcl |= TXC_SENDRTS;
638 use_rts = true;
639 }
640 if (ieee80211_is_cts(rts->frame_control)) {
641 mcl |= TXC_SENDCTS;
642 use_cts = true;
643 }
644 } else {
645 mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
646 mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
647 } 593 }
648 594
649 len = roundup(len, 4); 595 /* Limit AMPDU size based on MCS */
650 ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN); 596 is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
597 sgi = plcp3_issgi(plcp3) ? 1 : 0;
598 mcs = plcp0 & ~MIMO_PLCP_40MHZ;
599 session->max_ampdu_len = min(scb_ampdu->max_rx_ampdu_bytes,
600 ampdu->max_txlen[mcs][is40][sgi]);
651 601
652 dma_len += (u16) p->len; 602 session->max_ampdu_frames = scb_ampdu->max_pdu;
603 if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
604 session->max_ampdu_frames =
605 min_t(u16, f->mcs2ampdu_table[mcs],
606 session->max_ampdu_frames);
607 }
608 }
653 609
654 BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d" 610 /*
655 " seg_cnt %d null delim %d\n", 611 * Treat all frames as "middle" frames of AMPDU here. First and
656 wlc->pub->unit, ampdu_len, seg_cnt, ndelim); 612 * last frames must be fixed up after all MPDUs have been prepped.
613 */
614 mcl = le16_to_cpu(txh->MacTxControlLow);
615 mcl &= ~TXC_AMPDU_MASK;
616 mcl |= (TXC_AMPDU_MIDDLE << TXC_AMPDU_SHIFT);
617 mcl &= ~(TXC_STARTMSDU | TXC_SENDRTS | TXC_SENDCTS);
618 txh->MacTxControlLow = cpu_to_le16(mcl);
619 txh->PreloadSize = 0; /* always default to 0 */
657 620
658 txh->MacTxControlLow = cpu_to_le16(mcl); 621 skb_queue_tail(&session->skb_list, p);
659 622
660 /* this packet is added */ 623 return 0;
661 pkt[count++] = p; 624}
662 625
663 /* patch the first MPDU */ 626void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
664 if (count == 1) { 627{
665 u8 plcp0, plcp3, is40, sgi; 628 struct brcms_c_info *wlc = session->wlc;
629 struct ampdu_info *ampdu = wlc->ampdu;
630 struct sk_buff *first, *last;
631 struct d11txh *txh;
632 struct ieee80211_tx_info *tx_info;
633 struct ieee80211_tx_rate *txrate;
634 u8 ndelim;
635 u8 *plcp;
636 uint len;
637 uint fifo;
638 struct brcms_fifo_info *f;
639 u16 mcl;
640 bool fbr;
641 bool fbr_iscck;
642 struct ieee80211_rts *rts;
643 bool use_rts = false, use_cts = false;
644 u16 dma_len = session->dma_len;
645 u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
646 u32 rspec = 0, rspec_fallback = 0;
647 u32 rts_rspec = 0, rts_rspec_fallback = 0;
648 u8 plcp0, plcp3, is40, sgi, mcs;
649 u16 mch;
650 u8 preamble_type = BRCMS_GF_PREAMBLE;
651 u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
652 u8 rts_preamble_type = BRCMS_LONG_PREAMBLE;
653 u8 rts_fbr_preamble_type = BRCMS_LONG_PREAMBLE;
666 654
667 if (rr) { 655 if (skb_queue_empty(&session->skb_list))
668 plcp0 = plcp[0]; 656 return;
669 plcp3 = plcp[3];
670 } else {
671 plcp0 = txh->FragPLCPFallback[0];
672 plcp3 = txh->FragPLCPFallback[3];
673 657
674 } 658 first = skb_peek(&session->skb_list);
675 is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0; 659 last = skb_peek_tail(&session->skb_list);
676 sgi = plcp3_issgi(plcp3) ? 1 : 0; 660
677 mcs = plcp0 & ~MIMO_PLCP_40MHZ; 661 /* Need to fix up last MPDU first to adjust AMPDU length */
678 max_ampdu_bytes = 662 txh = (struct d11txh *)last->data;
679 min(scb_ampdu->max_rx_ampdu_bytes, 663 fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
680 ampdu->max_txlen[mcs][is40][sgi]); 664 f = &ampdu->fifo_tb[fifo];
681 665
682 if (is40) 666 mcl = le16_to_cpu(txh->MacTxControlLow);
683 mimo_ctlchbw = 667 mcl &= ~TXC_AMPDU_MASK;
684 CHSPEC_SB_UPPER(wlc_phy_chanspec_get( 668 mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
685 wlc->band->pi)) 669 txh->MacTxControlLow = cpu_to_le16(mcl);
686 ? PHY_TXC1_BW_20MHZ_UP : PHY_TXC1_BW_20MHZ; 670
687 671 /* remove the null delimiter after last mpdu */
688 /* rebuild the rspec and rspec_fallback */ 672 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
689 rspec = RSPEC_MIMORATE; 673 txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
690 rspec |= plcp[0] & ~MIMO_PLCP_40MHZ; 674 session->ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
691 if (plcp[0] & MIMO_PLCP_40MHZ) 675
692 rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT); 676 /* remove the pad len from last mpdu */
693 677 fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
694 if (fbr_iscck) /* CCK */ 678 len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback) :
695 rspec_fallback = cck_rspec(cck_phy2mac_rate 679 BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
696 (txh->FragPLCPFallback[0])); 680 session->ampdu_len -= roundup(len, 4) - len;
697 else { /* MIMO */ 681
698 rspec_fallback = RSPEC_MIMORATE; 682 /* Now fix up the first MPDU */
699 rspec_fallback |= 683 tx_info = IEEE80211_SKB_CB(first);
700 txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ; 684 txrate = tx_info->status.rates;
701 if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ) 685 txh = (struct d11txh *)first->data;
702 rspec_fallback |= 686 plcp = (u8 *)(txh + 1);
703 (PHY_TXC1_BW_40MHZ << 687 rts = (struct ieee80211_rts *)&txh->rts_frame;
704 RSPEC_BW_SHIFT); 688
705 } 689 mcl = le16_to_cpu(txh->MacTxControlLow);
690 /* If only one MPDU leave it marked as last */
691 if (first != last) {
692 mcl &= ~TXC_AMPDU_MASK;
693 mcl |= (TXC_AMPDU_FIRST << TXC_AMPDU_SHIFT);
694 }
695 mcl |= TXC_STARTMSDU;
696 if (ieee80211_is_rts(rts->frame_control)) {
697 mcl |= TXC_SENDRTS;
698 use_rts = true;
699 }
700 if (ieee80211_is_cts(rts->frame_control)) {
701 mcl |= TXC_SENDCTS;
702 use_cts = true;
703 }
704 txh->MacTxControlLow = cpu_to_le16(mcl);
706 705
707 if (use_rts || use_cts) { 706 fbr = txrate[1].count > 0;
708 rts_rspec = 707 if (!fbr) {
709 brcms_c_rspec_to_rts_rspec(wlc, 708 plcp0 = plcp[0];
710 rspec, false, mimo_ctlchbw); 709 plcp3 = plcp[3];
711 rts_rspec_fallback = 710 } else {
712 brcms_c_rspec_to_rts_rspec(wlc, 711 plcp0 = txh->FragPLCPFallback[0];
713 rspec_fallback, false, mimo_ctlchbw); 712 plcp3 = txh->FragPLCPFallback[3];
714 } 713 }
715 } 714 is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
715 sgi = plcp3_issgi(plcp3) ? 1 : 0;
716 mcs = plcp0 & ~MIMO_PLCP_40MHZ;
717
718 if (is40) {
719 if (CHSPEC_SB_UPPER(wlc_phy_chanspec_get(wlc->band->pi)))
720 mimo_ctlchbw = PHY_TXC1_BW_20MHZ_UP;
721 else
722 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
723 }
716 724
717 /* if (first mpdu for host agg) */ 725 /* rebuild the rspec and rspec_fallback */
718 /* test whether to add more */ 726 rspec = RSPEC_MIMORATE;
719 if ((mcs_2_rate(mcs, true, false) >= f->dmaxferrate) && 727 rspec |= plcp[0] & ~MIMO_PLCP_40MHZ;
720 (count == f->mcs2ampdu_table[mcs])) { 728 if (plcp[0] & MIMO_PLCP_40MHZ)
721 BCMMSG(wlc->wiphy, "wl%d: PR 37644: stopping" 729 rspec |= (PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT);
722 " ampdu at %d for mcs %d\n",
723 wlc->pub->unit, count, mcs);
724 break;
725 }
726 730
727 if (count == scb_ampdu->max_pdu) 731 fbr_iscck = !(le16_to_cpu(txh->XtraFrameTypes) & 0x03);
728 break; 732 if (fbr_iscck) {
733 rspec_fallback =
734 cck_rspec(cck_phy2mac_rate(txh->FragPLCPFallback[0]));
735 } else {
736 rspec_fallback = RSPEC_MIMORATE;
737 rspec_fallback |= txh->FragPLCPFallback[0] & ~MIMO_PLCP_40MHZ;
738 if (txh->FragPLCPFallback[0] & MIMO_PLCP_40MHZ)
739 rspec_fallback |= PHY_TXC1_BW_40MHZ << RSPEC_BW_SHIFT;
740 }
729 741
730 /* 742 if (use_rts || use_cts) {
731 * check to see if the next pkt is 743 rts_rspec =
732 * a candidate for aggregation 744 brcms_c_rspec_to_rts_rspec(wlc, rspec,
733 */ 745 false, mimo_ctlchbw);
734 p = pktq_ppeek(&qi->q, prec); 746 rts_rspec_fallback =
735 if (p) { 747 brcms_c_rspec_to_rts_rspec(wlc, rspec_fallback,
736 tx_info = IEEE80211_SKB_CB(p); 748 false, mimo_ctlchbw);
737 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && 749 }
738 ((u8) (p->priority) == tid)) {
739 plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
740 plen = max(scb_ampdu->min_len, plen);
741 750
742 if ((plen + ampdu_len) > max_ampdu_bytes) { 751 BRCMS_SET_MIMO_PLCP_LEN(plcp, session->ampdu_len);
743 p = NULL; 752 /* mark plcp to indicate ampdu */
744 continue; 753 BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
745 }
746 754
747 /* 755 /* reset the mixed mode header durations */
748 * check if there are enough 756 if (txh->MModeLen) {
749 * descriptors available 757 u16 mmodelen = brcms_c_calc_lsig_len(wlc, rspec,
750 */ 758 session->ampdu_len);
751 if (*wlc->core->txavail[fifo] <= seg_cnt + 1) { 759 txh->MModeLen = cpu_to_le16(mmodelen);
752 wiphy_err(wiphy, "%s: No fifo space " 760 preamble_type = BRCMS_MM_PREAMBLE;
753 "!!\n", __func__); 761 }
754 p = NULL; 762 if (txh->MModeFbrLen) {
755 continue; 763 u16 mmfbrlen = brcms_c_calc_lsig_len(wlc, rspec_fallback,
756 } 764 session->ampdu_len);
757 /* next packet fit for aggregation so dequeue */ 765 txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
758 p = brcmu_pktq_pdeq(&qi->q, prec); 766 fbr_preamble_type = BRCMS_MM_PREAMBLE;
759 } else { 767 }
760 p = NULL;
761 }
762 }
763 } /* end while(p) */
764 768
765 ini->tx_in_transit += count; 769 /* set the preload length */
770 if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
771 dma_len = min(dma_len, f->ampdu_pld_size);
772 txh->PreloadSize = cpu_to_le16(dma_len);
773 } else {
774 txh->PreloadSize = 0;
775 }
766 776
767 if (count) { 777 mch = le16_to_cpu(txh->MacTxControlHigh);
768 /* patch up the last txh */
769 txh = (struct d11txh *) pkt[count - 1]->data;
770 mcl = le16_to_cpu(txh->MacTxControlLow);
771 mcl &= ~TXC_AMPDU_MASK;
772 mcl |= (TXC_AMPDU_LAST << TXC_AMPDU_SHIFT);
773 txh->MacTxControlLow = cpu_to_le16(mcl);
774
775 /* remove the null delimiter after last mpdu */
776 ndelim = txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM];
777 txh->RTSPLCPFallback[AMPDU_FBR_NULL_DELIM] = 0;
778 ampdu_len -= ndelim * AMPDU_DELIMITER_LEN;
779
780 /* remove the pad len from last mpdu */
781 fbr_iscck = ((le16_to_cpu(txh->XtraFrameTypes) & 0x3) == 0);
782 len = fbr_iscck ? BRCMS_GET_CCK_PLCP_LEN(txh->FragPLCPFallback)
783 : BRCMS_GET_MIMO_PLCP_LEN(txh->FragPLCPFallback);
784 ampdu_len -= roundup(len, 4) - len;
785
786 /* patch up the first txh & plcp */
787 txh = (struct d11txh *) pkt[0]->data;
788 plcp = (u8 *) (txh + 1);
789 778
790 BRCMS_SET_MIMO_PLCP_LEN(plcp, ampdu_len); 779 /* update RTS dur fields */
791 /* mark plcp to indicate ampdu */ 780 if (use_rts || use_cts) {
792 BRCMS_SET_MIMO_PLCP_AMPDU(plcp); 781 u16 durid;
782 if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
783 TXC_PREAMBLE_RTS_MAIN_SHORT)
784 rts_preamble_type = BRCMS_SHORT_PREAMBLE;
793 785
794 /* reset the mixed mode header durations */ 786 if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
795 if (txh->MModeLen) { 787 TXC_PREAMBLE_RTS_FB_SHORT)
796 u16 mmodelen = 788 rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
797 brcms_c_calc_lsig_len(wlc, rspec, ampdu_len);
798 txh->MModeLen = cpu_to_le16(mmodelen);
799 preamble_type = BRCMS_MM_PREAMBLE;
800 }
801 if (txh->MModeFbrLen) {
802 u16 mmfbrlen =
803 brcms_c_calc_lsig_len(wlc, rspec_fallback,
804 ampdu_len);
805 txh->MModeFbrLen = cpu_to_le16(mmfbrlen);
806 fbr_preamble_type = BRCMS_MM_PREAMBLE;
807 }
808 789
809 /* set the preload length */ 790 durid = brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
810 if (mcs_2_rate(mcs, true, false) >= f->dmaxferrate) {
811 dma_len = min(dma_len, f->ampdu_pld_size);
812 txh->PreloadSize = cpu_to_le16(dma_len);
813 } else
814 txh->PreloadSize = 0;
815
816 mch = le16_to_cpu(txh->MacTxControlHigh);
817
818 /* update RTS dur fields */
819 if (use_rts || use_cts) {
820 u16 durid;
821 rts = (struct ieee80211_rts *)&txh->rts_frame;
822 if ((mch & TXC_PREAMBLE_RTS_MAIN_SHORT) ==
823 TXC_PREAMBLE_RTS_MAIN_SHORT)
824 rts_preamble_type = BRCMS_SHORT_PREAMBLE;
825
826 if ((mch & TXC_PREAMBLE_RTS_FB_SHORT) ==
827 TXC_PREAMBLE_RTS_FB_SHORT)
828 rts_fbr_preamble_type = BRCMS_SHORT_PREAMBLE;
829
830 durid =
831 brcms_c_compute_rtscts_dur(wlc, use_cts, rts_rspec,
832 rspec, rts_preamble_type, 791 rspec, rts_preamble_type,
833 preamble_type, ampdu_len, 792 preamble_type,
834 true); 793 session->ampdu_len, true);
835 rts->duration = cpu_to_le16(durid); 794 rts->duration = cpu_to_le16(durid);
836 durid = brcms_c_compute_rtscts_dur(wlc, use_cts, 795 durid = brcms_c_compute_rtscts_dur(wlc, use_cts,
837 rts_rspec_fallback, 796 rts_rspec_fallback,
838 rspec_fallback, 797 rspec_fallback,
839 rts_fbr_preamble_type, 798 rts_fbr_preamble_type,
840 fbr_preamble_type, 799 fbr_preamble_type,
841 ampdu_len, true); 800 session->ampdu_len, true);
842 txh->RTSDurFallback = cpu_to_le16(durid); 801 txh->RTSDurFallback = cpu_to_le16(durid);
843 /* set TxFesTimeNormal */ 802 /* set TxFesTimeNormal */
844 txh->TxFesTimeNormal = rts->duration; 803 txh->TxFesTimeNormal = rts->duration;
845 /* set fallback rate version of TxFesTimeNormal */ 804 /* set fallback rate version of TxFesTimeNormal */
846 txh->TxFesTimeFallback = txh->RTSDurFallback; 805 txh->TxFesTimeFallback = txh->RTSDurFallback;
847 } 806 }
848
849 /* set flag and plcp for fallback rate */
850 if (fbr) {
851 mch |= TXC_AMPDU_FBR;
852 txh->MacTxControlHigh = cpu_to_le16(mch);
853 BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
854 BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
855 }
856
857 BCMMSG(wlc->wiphy, "wl%d: count %d ampdu_len %d\n",
858 wlc->pub->unit, count, ampdu_len);
859
860 /* inform rate_sel if it this is a rate probe pkt */
861 frameid = le16_to_cpu(txh->TxFrameID);
862 if (frameid & TXFID_RATE_PROBE_MASK)
863 wiphy_err(wiphy, "%s: XXX what to do with "
864 "TXFID_RATE_PROBE_MASK!?\n", __func__);
865
866 for (i = 0; i < count; i++)
867 brcms_c_txfifo(wlc, fifo, pkt[i], i == (count - 1),
868 ampdu->txpkt_weight);
869 807
808 /* set flag and plcp for fallback rate */
809 if (fbr) {
810 mch |= TXC_AMPDU_FBR;
811 txh->MacTxControlHigh = cpu_to_le16(mch);
812 BRCMS_SET_MIMO_PLCP_AMPDU(plcp);
813 BRCMS_SET_MIMO_PLCP_AMPDU(txh->FragPLCPFallback);
870 } 814 }
871 /* endif (count) */ 815
872 return err; 816 brcms_dbg_ht(wlc->hw->d11core, "wl%d: count %d ampdu_len %d\n",
817 wlc->pub->unit, skb_queue_len(&session->skb_list),
818 session->ampdu_len);
873} 819}
874 820
875static void 821static void
@@ -909,7 +855,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
909 u8 antselid = 0; 855 u8 antselid = 0;
910 u8 retry_limit, rr_retry_limit; 856 u8 retry_limit, rr_retry_limit;
911 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p); 857 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
912 struct wiphy *wiphy = wlc->wiphy;
913 858
914#ifdef DEBUG 859#ifdef DEBUG
915 u8 hole[AMPDU_MAX_MPDU]; 860 u8 hole[AMPDU_MAX_MPDU];
@@ -955,13 +900,14 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
955 if (supr_status) { 900 if (supr_status) {
956 update_rate = false; 901 update_rate = false;
957 if (supr_status == TX_STATUS_SUPR_BADCH) { 902 if (supr_status == TX_STATUS_SUPR_BADCH) {
958 wiphy_err(wiphy, 903 brcms_err(wlc->hw->d11core,
959 "%s: Pkt tx suppressed, illegal channel possibly %d\n", 904 "%s: Pkt tx suppressed, illegal channel possibly %d\n",
960 __func__, CHSPEC_CHANNEL( 905 __func__, CHSPEC_CHANNEL(
961 wlc->default_bss->chanspec)); 906 wlc->default_bss->chanspec));
962 } else { 907 } else {
963 if (supr_status != TX_STATUS_SUPR_FRAG) 908 if (supr_status != TX_STATUS_SUPR_FRAG)
964 wiphy_err(wiphy, "%s: supr_status 0x%x\n", 909 brcms_err(wlc->hw->d11core,
910 "%s: supr_status 0x%x\n",
965 __func__, supr_status); 911 __func__, supr_status);
966 } 912 }
967 /* no need to retry for badch; will fail again */ 913 /* no need to retry for badch; will fail again */
@@ -977,20 +923,14 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
977 * if there were underflows, but pre-loading 923 * if there were underflows, but pre-loading
978 * is not active, notify rate adaptation. 924 * is not active, notify rate adaptation.
979 */ 925 */
980 if (brcms_c_ffpld_check_txfunfl(wlc, 926 if (brcms_c_ffpld_check_txfunfl(wlc, queue) > 0)
981 prio2fifo[tid]) > 0)
982 tx_error = true; 927 tx_error = true;
983 } 928 }
984 } else if (txs->phyerr) { 929 } else if (txs->phyerr) {
985 update_rate = false; 930 update_rate = false;
986 wiphy_err(wiphy, "%s: ampdu tx phy error (0x%x)\n", 931 brcms_err(wlc->hw->d11core,
932 "%s: ampdu tx phy error (0x%x)\n",
987 __func__, txs->phyerr); 933 __func__, txs->phyerr);
988
989 if (brcm_msg_level & LOG_ERROR_VAL) {
990 brcmu_prpkt("txpkt (AMPDU)", p);
991 brcms_c_print_txdesc((struct d11txh *) p->data);
992 }
993 brcms_c_print_txstatus(txs);
994 } 934 }
995 } 935 }
996 936
@@ -1003,6 +943,8 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
1003 h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN); 943 h = (struct ieee80211_hdr *)(plcp + D11_PHY_HDR_LEN);
1004 seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT; 944 seq = le16_to_cpu(h->seq_ctrl) >> SEQNUM_SHIFT;
1005 945
946 trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
947
1006 if (tot_mpdu == 0) { 948 if (tot_mpdu == 0) {
1007 mcs = plcp[0] & MIMO_PLCP_MCS_MASK; 949 mcs = plcp[0] & MIMO_PLCP_MCS_MASK;
1008 mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel); 950 mimoantsel = le16_to_cpu(txh->ABI_MimoAntSel);
@@ -1012,10 +954,10 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
1012 ack_recd = false; 954 ack_recd = false;
1013 if (ba_recd) { 955 if (ba_recd) {
1014 bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX); 956 bindex = MODSUB_POW2(seq, start_seq, SEQNUM_MAX);
1015 BCMMSG(wiphy, 957 brcms_dbg_ht(wlc->hw->d11core,
1016 "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n", 958 "tid %d seq %d, start_seq %d, bindex %d set %d, index %d\n",
1017 tid, seq, start_seq, bindex, 959 tid, seq, start_seq, bindex,
1018 isset(bitmap, bindex), index); 960 isset(bitmap, bindex), index);
1019 /* if acked then clear bit and free packet */ 961 /* if acked then clear bit and free packet */
1020 if ((bindex < AMPDU_TX_BA_MAX_WSIZE) 962 if ((bindex < AMPDU_TX_BA_MAX_WSIZE)
1021 && isset(bitmap, bindex)) { 963 && isset(bitmap, bindex)) {
@@ -1046,14 +988,16 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
1046 /* either retransmit or send bar if ack not recd */ 988 /* either retransmit or send bar if ack not recd */
1047 if (!ack_recd) { 989 if (!ack_recd) {
1048 if (retry && (ini->txretry[index] < (int)retry_limit)) { 990 if (retry && (ini->txretry[index] < (int)retry_limit)) {
991 int ret;
1049 ini->txretry[index]++; 992 ini->txretry[index]++;
1050 ini->tx_in_transit--; 993 ini->tx_in_transit--;
994 ret = brcms_c_txfifo(wlc, queue, p);
1051 /* 995 /*
1052 * Use high prededence for retransmit to 996 * We shouldn't be out of space in the DMA
1053 * give some punch 997 * ring here since we're reinserting a frame
998 * that was just pulled out.
1054 */ 999 */
1055 brcms_c_txq_enq(wlc, scb, p, 1000 WARN_ONCE(ret, "queue %d out of txds\n", queue);
1056 BRCMS_PRIO_TO_HI_PREC(tid));
1057 } else { 1001 } else {
1058 /* Retry timeout */ 1002 /* Retry timeout */
1059 ini->tx_in_transit--; 1003 ini->tx_in_transit--;
@@ -1064,9 +1008,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
1064 IEEE80211_TX_STAT_AMPDU_NO_BACK; 1008 IEEE80211_TX_STAT_AMPDU_NO_BACK;
1065 skb_pull(p, D11_PHY_HDR_LEN); 1009 skb_pull(p, D11_PHY_HDR_LEN);
1066 skb_pull(p, D11_TXH_LEN); 1010 skb_pull(p, D11_TXH_LEN);
1067 BCMMSG(wiphy, 1011 brcms_dbg_ht(wlc->hw->d11core,
1068 "BA Timeout, seq %d, in_transit %d\n", 1012 "BA Timeout, seq %d, in_transit %d\n",
1069 seq, ini->tx_in_transit); 1013 seq, ini->tx_in_transit);
1070 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, 1014 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
1071 p); 1015 p);
1072 } 1016 }
@@ -1080,12 +1024,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
1080 1024
1081 p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED); 1025 p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
1082 } 1026 }
1083 brcms_c_send_q(wlc);
1084 1027
1085 /* update rate state */ 1028 /* update rate state */
1086 antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel); 1029 antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
1087
1088 brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
1089} 1030}
1090 1031
1091void 1032void
@@ -1133,6 +1074,8 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
1133 while (p) { 1074 while (p) {
1134 tx_info = IEEE80211_SKB_CB(p); 1075 tx_info = IEEE80211_SKB_CB(p);
1135 txh = (struct d11txh *) p->data; 1076 txh = (struct d11txh *) p->data;
1077 trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
1078 sizeof(*txh));
1136 mcl = le16_to_cpu(txh->MacTxControlLow); 1079 mcl = le16_to_cpu(txh->MacTxControlLow);
1137 brcmu_pkt_buf_free_skb(p); 1080 brcmu_pkt_buf_free_skb(p);
1138 /* break out if last packet of ampdu */ 1081 /* break out if last packet of ampdu */
@@ -1142,7 +1085,6 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
1142 p = dma_getnexttxp(wlc->hw->di[queue], 1085 p = dma_getnexttxp(wlc->hw->di[queue],
1143 DMA_RANGE_TRANSMITTED); 1086 DMA_RANGE_TRANSMITTED);
1144 } 1087 }
1145 brcms_c_txfifo_complete(wlc, queue, ampdu->txpkt_weight);
1146 } 1088 }
1147} 1089}
1148 1090
@@ -1182,23 +1124,6 @@ void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu)
1182} 1124}
1183 1125
1184/* 1126/*
1185 * callback function that helps flushing ampdu packets from a priority queue
1186 */
1187static bool cb_del_ampdu_pkt(struct sk_buff *mpdu, void *arg_a)
1188{
1189 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(mpdu);
1190 struct cb_del_ampdu_pars *ampdu_pars =
1191 (struct cb_del_ampdu_pars *)arg_a;
1192 bool rc;
1193
1194 rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false;
1195 rc = rc && (tx_info->rate_driver_data[0] == NULL || ampdu_pars->sta == NULL ||
1196 tx_info->rate_driver_data[0] == ampdu_pars->sta);
1197 rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid);
1198 return rc;
1199}
1200
1201/*
1202 * callback function that helps invalidating ampdu packets in a DMA queue 1127 * callback function that helps invalidating ampdu packets in a DMA queue
1203 */ 1128 */
1204static void dma_cb_fn_ampdu(void *txi, void *arg_a) 1129static void dma_cb_fn_ampdu(void *txi, void *arg_a)
@@ -1218,15 +1143,5 @@ static void dma_cb_fn_ampdu(void *txi, void *arg_a)
1218void brcms_c_ampdu_flush(struct brcms_c_info *wlc, 1143void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
1219 struct ieee80211_sta *sta, u16 tid) 1144 struct ieee80211_sta *sta, u16 tid)
1220{ 1145{
1221 struct brcms_txq_info *qi = wlc->pkt_queue;
1222 struct pktq *pq = &qi->q;
1223 int prec;
1224 struct cb_del_ampdu_pars ampdu_pars;
1225
1226 ampdu_pars.sta = sta;
1227 ampdu_pars.tid = tid;
1228 for (prec = 0; prec < pq->num_prec; prec++)
1229 brcmu_pktq_pflush(pq, prec, true, cb_del_ampdu_pkt,
1230 (void *)&ampdu_pars);
1231 brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu); 1146 brcms_c_inval_dma_pkts(wlc->hw, sta, dma_cb_fn_ampdu);
1232} 1147}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
index 421f4ba7c63c..73d01e586109 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
@@ -17,11 +17,34 @@
17#ifndef _BRCM_AMPDU_H_ 17#ifndef _BRCM_AMPDU_H_
18#define _BRCM_AMPDU_H_ 18#define _BRCM_AMPDU_H_
19 19
20/*
21 * Data structure representing an in-progress session for accumulating
22 * frames for AMPDU.
23 *
24 * wlc: pointer to common driver data
25 * skb_list: queue of skb's for AMPDU
26 * max_ampdu_len: maximum length for this AMPDU
27 * max_ampdu_frames: maximum number of frames for this AMPDU
28 * ampdu_len: total number of bytes accumulated for this AMPDU
29 * dma_len: DMA length of this AMPDU
30 */
31struct brcms_ampdu_session {
32 struct brcms_c_info *wlc;
33 struct sk_buff_head skb_list;
34 unsigned max_ampdu_len;
35 u16 max_ampdu_frames;
36 u16 ampdu_len;
37 u16 dma_len;
38};
39
40extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
41 struct brcms_c_info *wlc);
42extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
43 struct sk_buff *p);
44extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
45
20extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc); 46extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
21extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu); 47extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
22extern int brcms_c_sendampdu(struct ampdu_info *ampdu,
23 struct brcms_txq_info *qi,
24 struct sk_buff **aggp, int prec);
25extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb, 48extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
26 struct sk_buff *p, struct tx_status *txs); 49 struct sk_buff *p, struct tx_status *txs);
27extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc); 50extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
index 55e12c327911..54c616919590 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/antsel.c
@@ -21,6 +21,7 @@
21#include "main.h" 21#include "main.h"
22#include "phy_shim.h" 22#include "phy_shim.h"
23#include "antsel.h" 23#include "antsel.h"
24#include "debug.h"
24 25
25#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */ 26#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */
26#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */ 27#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */
@@ -137,7 +138,8 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
137 asi->antsel_avail = false; 138 asi->antsel_avail = false;
138 } else { 139 } else {
139 asi->antsel_avail = false; 140 asi->antsel_avail = false;
140 wiphy_err(wlc->wiphy, "antsel_attach: 2o3 " 141 brcms_err(wlc->hw->d11core,
142 "antsel_attach: 2o3 "
141 "board cfg invalid\n"); 143 "board cfg invalid\n");
142 } 144 }
143 145
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
index 27dd73eef56d..871781e6a713 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/brcms_trace_events.h
@@ -14,22 +14,29 @@
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#undef TRACE_SYSTEM
18#define TRACE_SYSTEM brcmsmac
19
20#if !defined(__TRACE_BRCMSMAC_H) || defined(TRACE_HEADER_MULTI_READ) 17#if !defined(__TRACE_BRCMSMAC_H) || defined(TRACE_HEADER_MULTI_READ)
21 18
22#define __TRACE_BRCMSMAC_H 19#define __TRACE_BRCMSMAC_H
23 20
21#include <linux/types.h>
22#include <linux/device.h>
24#include <linux/tracepoint.h> 23#include <linux/tracepoint.h>
25#include "mac80211_if.h" 24#include "mac80211_if.h"
26 25
27#ifndef CONFIG_BRCMDBG 26#ifndef CONFIG_BRCM_TRACING
28#undef TRACE_EVENT 27#undef TRACE_EVENT
29#define TRACE_EVENT(name, proto, ...) \ 28#define TRACE_EVENT(name, proto, ...) \
30static inline void trace_ ## name(proto) {} 29static inline void trace_ ## name(proto) {}
30#undef DECLARE_EVENT_CLASS
31#define DECLARE_EVENT_CLASS(...)
32#undef DEFINE_EVENT
33#define DEFINE_EVENT(evt_class, name, proto, ...) \
34static inline void trace_ ## name(proto) {}
31#endif 35#endif
32 36
37#undef TRACE_SYSTEM
38#define TRACE_SYSTEM brcmsmac
39
33/* 40/*
34 * We define a tracepoint, its arguments, its printk format and its 41 * We define a tracepoint, its arguments, its printk format and its
35 * 'fast binary record' layout. 42 * 'fast binary record' layout.
@@ -78,9 +85,165 @@ TRACE_EVENT(brcms_dpc,
78 ) 85 )
79); 86);
80 87
88TRACE_EVENT(brcms_macintstatus,
89 TP_PROTO(const struct device *dev, int in_isr, u32 macintstatus,
90 u32 mask),
91 TP_ARGS(dev, in_isr, macintstatus, mask),
92 TP_STRUCT__entry(
93 __string(dev, dev_name(dev))
94 __field(int, in_isr)
95 __field(u32, macintstatus)
96 __field(u32, mask)
97 ),
98 TP_fast_assign(
99 __assign_str(dev, dev_name(dev));
100 __entry->in_isr = in_isr;
101 __entry->macintstatus = macintstatus;
102 __entry->mask = mask;
103 ),
104 TP_printk("[%s] in_isr=%d macintstatus=%#x mask=%#x", __get_str(dev),
105 __entry->in_isr, __entry->macintstatus, __entry->mask)
106);
107
108#undef TRACE_SYSTEM
109#define TRACE_SYSTEM brcmsmac_tx
110
111TRACE_EVENT(brcms_txdesc,
112 TP_PROTO(const struct device *dev,
113 void *txh, size_t txh_len),
114 TP_ARGS(dev, txh, txh_len),
115 TP_STRUCT__entry(
116 __string(dev, dev_name(dev))
117 __dynamic_array(u8, txh, txh_len)
118 ),
119 TP_fast_assign(
120 __assign_str(dev, dev_name(dev));
121 memcpy(__get_dynamic_array(txh), txh, txh_len);
122 ),
123 TP_printk("[%s] txdesc", __get_str(dev))
124);
125
126TRACE_EVENT(brcms_txstatus,
127 TP_PROTO(const struct device *dev, u16 framelen, u16 frameid,
128 u16 status, u16 lasttxtime, u16 sequence, u16 phyerr,
129 u16 ackphyrxsh),
130 TP_ARGS(dev, framelen, frameid, status, lasttxtime, sequence, phyerr,
131 ackphyrxsh),
132 TP_STRUCT__entry(
133 __string(dev, dev_name(dev))
134 __field(u16, framelen)
135 __field(u16, frameid)
136 __field(u16, status)
137 __field(u16, lasttxtime)
138 __field(u16, sequence)
139 __field(u16, phyerr)
140 __field(u16, ackphyrxsh)
141 ),
142 TP_fast_assign(
143 __assign_str(dev, dev_name(dev));
144 __entry->framelen = framelen;
145 __entry->frameid = frameid;
146 __entry->status = status;
147 __entry->lasttxtime = lasttxtime;
148 __entry->sequence = sequence;
149 __entry->phyerr = phyerr;
150 __entry->ackphyrxsh = ackphyrxsh;
151 ),
152 TP_printk("[%s] FrameId %#04x TxStatus %#04x LastTxTime %#04x "
153 "Seq %#04x PHYTxStatus %#04x RxAck %#04x",
154 __get_str(dev), __entry->frameid, __entry->status,
155 __entry->lasttxtime, __entry->sequence, __entry->phyerr,
156 __entry->ackphyrxsh)
157);
158
159TRACE_EVENT(brcms_ampdu_session,
160 TP_PROTO(const struct device *dev, unsigned max_ampdu_len,
161 u16 max_ampdu_frames, u16 ampdu_len, u16 ampdu_frames,
162 u16 dma_len),
163 TP_ARGS(dev, max_ampdu_len, max_ampdu_frames, ampdu_len, ampdu_frames,
164 dma_len),
165 TP_STRUCT__entry(
166 __string(dev, dev_name(dev))
167 __field(unsigned, max_ampdu_len)
168 __field(u16, max_ampdu_frames)
169 __field(u16, ampdu_len)
170 __field(u16, ampdu_frames)
171 __field(u16, dma_len)
172 ),
173 TP_fast_assign(
174 __assign_str(dev, dev_name(dev));
175 __entry->max_ampdu_len = max_ampdu_len;
176 __entry->max_ampdu_frames = max_ampdu_frames;
177 __entry->ampdu_len = ampdu_len;
178 __entry->ampdu_frames = ampdu_frames;
179 __entry->dma_len = dma_len;
180 ),
181 TP_printk("[%s] ampdu session max_len=%u max_frames=%u len=%u frames=%u dma_len=%u",
182 __get_str(dev), __entry->max_ampdu_len,
183 __entry->max_ampdu_frames, __entry->ampdu_len,
184 __entry->ampdu_frames, __entry->dma_len)
185);
186
187#undef TRACE_SYSTEM
188#define TRACE_SYSTEM brcmsmac_msg
189
190#define MAX_MSG_LEN 100
191
192DECLARE_EVENT_CLASS(brcms_msg_event,
193 TP_PROTO(struct va_format *vaf),
194 TP_ARGS(vaf),
195 TP_STRUCT__entry(
196 __dynamic_array(char, msg, MAX_MSG_LEN)
197 ),
198 TP_fast_assign(
199 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
200 MAX_MSG_LEN, vaf->fmt,
201 *vaf->va) >= MAX_MSG_LEN);
202 ),
203 TP_printk("%s", __get_str(msg))
204);
205
206DEFINE_EVENT(brcms_msg_event, brcms_info,
207 TP_PROTO(struct va_format *vaf),
208 TP_ARGS(vaf)
209);
210
211DEFINE_EVENT(brcms_msg_event, brcms_warn,
212 TP_PROTO(struct va_format *vaf),
213 TP_ARGS(vaf)
214);
215
216DEFINE_EVENT(brcms_msg_event, brcms_err,
217 TP_PROTO(struct va_format *vaf),
218 TP_ARGS(vaf)
219);
220
221DEFINE_EVENT(brcms_msg_event, brcms_crit,
222 TP_PROTO(struct va_format *vaf),
223 TP_ARGS(vaf)
224);
225
226TRACE_EVENT(brcms_dbg,
227 TP_PROTO(u32 level, const char *func, struct va_format *vaf),
228 TP_ARGS(level, func, vaf),
229 TP_STRUCT__entry(
230 __field(u32, level)
231 __string(func, func)
232 __dynamic_array(char, msg, MAX_MSG_LEN)
233 ),
234 TP_fast_assign(
235 __entry->level = level;
236 __assign_str(func, func);
237 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
238 MAX_MSG_LEN, vaf->fmt,
239 *vaf->va) >= MAX_MSG_LEN);
240 ),
241 TP_printk("%s: %s", __get_str(func), __get_str(msg))
242);
243
81#endif /* __TRACE_BRCMSMAC_H */ 244#endif /* __TRACE_BRCMSMAC_H */
82 245
83#ifdef CONFIG_BRCMDBG 246#ifdef CONFIG_BRCM_TRACING
84 247
85#undef TRACE_INCLUDE_PATH 248#undef TRACE_INCLUDE_PATH
86#define TRACE_INCLUDE_PATH . 249#define TRACE_INCLUDE_PATH .
@@ -89,4 +252,4 @@ TRACE_EVENT(brcms_dpc,
89 252
90#include <trace/define_trace.h> 253#include <trace/define_trace.h>
91 254
92#endif /* CONFIG_BRCMDBG */ 255#endif /* CONFIG_BRCM_TRACING */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 64a48f06d68b..a90b72202ec5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -26,6 +26,7 @@
26#include "stf.h" 26#include "stf.h"
27#include "channel.h" 27#include "channel.h"
28#include "mac80211_if.h" 28#include "mac80211_if.h"
29#include "debug.h"
29 30
30/* QDB() macro takes a dB value and converts to a quarter dB value */ 31/* QDB() macro takes a dB value and converts to a quarter dB value */
31#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR) 32#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
@@ -336,8 +337,6 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
336 const char *ccode = sprom->alpha2; 337 const char *ccode = sprom->alpha2;
337 int ccode_len = sizeof(sprom->alpha2); 338 int ccode_len = sizeof(sprom->alpha2);
338 339
339 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
340
341 wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC); 340 wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
342 if (wlc_cm == NULL) 341 if (wlc_cm == NULL)
343 return NULL; 342 return NULL;
@@ -615,8 +614,8 @@ brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec)
615 614
616 /* check the chanspec */ 615 /* check the chanspec */
617 if (brcms_c_chspec_malformed(chspec)) { 616 if (brcms_c_chspec_malformed(chspec)) {
618 wiphy_err(wlc->wiphy, "wl%d: malformed chanspec 0x%x\n", 617 brcms_err(wlc->hw->d11core, "wl%d: malformed chanspec 0x%x\n",
619 wlc->pub->unit, chspec); 618 wlc->pub->unit, chspec);
620 return false; 619 return false;
621 } 620 }
622 621
@@ -738,7 +737,8 @@ static int brcms_reg_notifier(struct wiphy *wiphy,
738 mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE); 737 mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
739 } else { 738 } else {
740 mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE); 739 mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
741 wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\"\n", 740 brcms_err(wlc->hw->d11core,
741 "wl%d: %s: no valid channel for \"%s\"\n",
742 wlc->pub->unit, __func__, request->alpha2); 742 wlc->pub->unit, __func__, request->alpha2);
743 } 743 }
744 744
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
new file mode 100644
index 000000000000..9761deb46204
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 * Copyright (c) 2012 Canonical Ltd.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
12 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
14 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
15 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17#include <linux/debugfs.h>
18#include <linux/if_ether.h>
19#include <linux/if.h>
20#include <linux/net.h>
21#include <linux/netdevice.h>
22#include <linux/ieee80211.h>
23#include <linux/module.h>
24#include <net/mac80211.h>
25
26#include <defs.h>
27#include <brcmu_wifi.h>
28#include <brcmu_utils.h>
29#include "types.h"
30#include "main.h"
31#include "debug.h"
32#include "brcms_trace_events.h"
33
34static struct dentry *root_folder;
35
36void brcms_debugfs_init(void)
37{
38 root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
39 if (IS_ERR(root_folder))
40 root_folder = NULL;
41}
42
43void brcms_debugfs_exit(void)
44{
45 if (!root_folder)
46 return;
47
48 debugfs_remove_recursive(root_folder);
49 root_folder = NULL;
50}
51
52int brcms_debugfs_attach(struct brcms_pub *drvr)
53{
54 if (!root_folder)
55 return -ENODEV;
56
57 drvr->dbgfs_dir = debugfs_create_dir(
58 dev_name(&drvr->wlc->hw->d11core->dev), root_folder);
59 return PTR_RET(drvr->dbgfs_dir);
60}
61
62void brcms_debugfs_detach(struct brcms_pub *drvr)
63{
64 if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
65 debugfs_remove_recursive(drvr->dbgfs_dir);
66}
67
68struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr)
69{
70 return drvr->dbgfs_dir;
71}
72
73static
74ssize_t brcms_debugfs_hardware_read(struct file *f, char __user *data,
75 size_t count, loff_t *ppos)
76{
77 char buf[128];
78 int res;
79 struct brcms_pub *drvr = f->private_data;
80
81 /* only allow read from start */
82 if (*ppos > 0)
83 return 0;
84
85 res = scnprintf(buf, sizeof(buf),
86 "board vendor: %x\n"
87 "board type: %x\n"
88 "board revision: %x\n"
89 "board flags: %x\n"
90 "board flags2: %x\n"
91 "firmware revision: %x\n",
92 drvr->wlc->hw->d11core->bus->boardinfo.vendor,
93 drvr->wlc->hw->d11core->bus->boardinfo.type,
94 drvr->wlc->hw->boardrev,
95 drvr->wlc->hw->boardflags,
96 drvr->wlc->hw->boardflags2,
97 drvr->wlc->ucode_rev
98 );
99
100 return simple_read_from_buffer(data, count, ppos, buf, res);
101}
102
103static const struct file_operations brcms_debugfs_hardware_ops = {
104 .owner = THIS_MODULE,
105 .open = simple_open,
106 .read = brcms_debugfs_hardware_read
107};
108
109void brcms_debugfs_create_files(struct brcms_pub *drvr)
110{
111 struct dentry *dentry = drvr->dbgfs_dir;
112
113 if (!IS_ERR_OR_NULL(dentry))
114 debugfs_create_file("hardware", S_IRUGO, dentry,
115 drvr, &brcms_debugfs_hardware_ops);
116}
117
118#define __brcms_fn(fn) \
119void __brcms_ ##fn(struct device *dev, const char *fmt, ...) \
120{ \
121 struct va_format vaf = { \
122 .fmt = fmt, \
123 }; \
124 va_list args; \
125 \
126 va_start(args, fmt); \
127 vaf.va = &args; \
128 dev_ ##fn(dev, "%pV", &vaf); \
129 trace_brcms_ ##fn(&vaf); \
130 va_end(args); \
131}
132
133__brcms_fn(info)
134__brcms_fn(warn)
135__brcms_fn(err)
136__brcms_fn(crit)
137
138#if defined(CONFIG_BRCMDBG) || defined(CONFIG_BRCM_TRACING)
139void __brcms_dbg(struct device *dev, u32 level, const char *func,
140 const char *fmt, ...)
141{
142 struct va_format vaf = {
143 .fmt = fmt,
144 };
145 va_list args;
146
147 va_start(args, fmt);
148 vaf.va = &args;
149#ifdef CONFIG_BRCMDBG
150 if ((brcm_msg_level & level) && net_ratelimit())
151 dev_err(dev, "%s %pV", func, &vaf);
152#endif
153 trace_brcms_dbg(level, func, &vaf);
154 va_end(args);
155}
156#endif
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.h b/drivers/net/wireless/brcm80211/brcmsmac/debug.h
new file mode 100644
index 000000000000..796836b0f469
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.h
@@ -0,0 +1,75 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef _BRCMS_DEBUG_H_
17#define _BRCMS_DEBUG_H_
18
19#include <linux/device.h>
20#include <linux/bcma/bcma.h>
21#include <net/cfg80211.h>
22#include <net/mac80211.h>
23#include "main.h"
24#include "mac80211_if.h"
25
26__printf(2, 3)
27void __brcms_info(struct device *dev, const char *fmt, ...);
28__printf(2, 3)
29void __brcms_warn(struct device *dev, const char *fmt, ...);
30__printf(2, 3)
31void __brcms_err(struct device *dev, const char *fmt, ...);
32__printf(2, 3)
33void __brcms_crit(struct device *dev, const char *fmt, ...);
34
35#if defined(CONFIG_BRCMDBG) || defined(CONFIG_BRCM_TRACING)
36__printf(4, 5)
37void __brcms_dbg(struct device *dev, u32 level, const char *func,
38 const char *fmt, ...);
39#else
40static inline __printf(4, 5)
41void __brcms_dbg(struct device *dev, u32 level, const char *func,
42 const char *fmt, ...)
43{
44}
45#endif
46
47/*
48 * Debug macros cannot be used when wlc is uninitialized. Generally
49 * this means any code that could run before brcms_c_attach() has
50 * returned successfully probably shouldn't use the following macros.
51 */
52
53#define brcms_dbg(core, l, f, a...) __brcms_dbg(&(core)->dev, l, __func__, f, ##a)
54#define brcms_info(core, f, a...) __brcms_info(&(core)->dev, f, ##a)
55#define brcms_warn(core, f, a...) __brcms_warn(&(core)->dev, f, ##a)
56#define brcms_err(core, f, a...) __brcms_err(&(core)->dev, f, ##a)
57#define brcms_crit(core, f, a...) __brcms_crit(&(core)->dev, f, ##a)
58
59#define brcms_dbg_info(core, f, a...) brcms_dbg(core, BRCM_DL_INFO, f, ##a)
60#define brcms_dbg_mac80211(core, f, a...) brcms_dbg(core, BRCM_DL_MAC80211, f, ##a)
61#define brcms_dbg_rx(core, f, a...) brcms_dbg(core, BRCM_DL_RX, f, ##a)
62#define brcms_dbg_tx(core, f, a...) brcms_dbg(core, BRCM_DL_TX, f, ##a)
63#define brcms_dbg_int(core, f, a...) brcms_dbg(core, BRCM_DL_INT, f, ##a)
64#define brcms_dbg_dma(core, f, a...) brcms_dbg(core, BRCM_DL_DMA, f, ##a)
65#define brcms_dbg_ht(core, f, a...) brcms_dbg(core, BRCM_DL_HT, f, ##a)
66
67struct brcms_pub;
68void brcms_debugfs_init(void);
69void brcms_debugfs_exit(void);
70int brcms_debugfs_attach(struct brcms_pub *drvr);
71void brcms_debugfs_detach(struct brcms_pub *drvr);
72struct dentry *brcms_debugfs_get_devdir(struct brcms_pub *drvr);
73void brcms_debugfs_create_files(struct brcms_pub *drvr);
74
75#endif /* _BRCMS_DEBUG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 5e53305bd9a9..1860c572b3c4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -14,17 +14,22 @@
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/slab.h> 17#include <linux/slab.h>
20#include <linux/delay.h> 18#include <linux/delay.h>
21#include <linux/pci.h> 19#include <linux/pci.h>
20#include <net/cfg80211.h>
21#include <net/mac80211.h>
22 22
23#include <brcmu_utils.h> 23#include <brcmu_utils.h>
24#include <aiutils.h> 24#include <aiutils.h>
25#include "types.h" 25#include "types.h"
26#include "main.h"
26#include "dma.h" 27#include "dma.h"
27#include "soc.h" 28#include "soc.h"
29#include "scb.h"
30#include "ampdu.h"
31#include "debug.h"
32#include "brcms_trace_events.h"
28 33
29/* 34/*
30 * dma register field offset calculation 35 * dma register field offset calculation
@@ -176,28 +181,6 @@
176 181
177#define BCMEXTRAHDROOM 172 182#define BCMEXTRAHDROOM 172
178 183
179/* debug/trace */
180#ifdef DEBUG
181#define DMA_ERROR(fmt, ...) \
182do { \
183 if (*di->msg_level & 1) \
184 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
185} while (0)
186#define DMA_TRACE(fmt, ...) \
187do { \
188 if (*di->msg_level & 2) \
189 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
190} while (0)
191#else
192#define DMA_ERROR(fmt, ...) \
193 no_printk(fmt, ##__VA_ARGS__)
194#define DMA_TRACE(fmt, ...) \
195 no_printk(fmt, ##__VA_ARGS__)
196#endif /* DEBUG */
197
198#define DMA_NONE(fmt, ...) \
199 no_printk(fmt, ##__VA_ARGS__)
200
201#define MAXNAMEL 8 /* 8 char names */ 184#define MAXNAMEL 8 /* 8 char names */
202 185
203/* macros to convert between byte offsets and indexes */ 186/* macros to convert between byte offsets and indexes */
@@ -224,12 +207,14 @@ struct dma64desc {
224/* dma engine software state */ 207/* dma engine software state */
225struct dma_info { 208struct dma_info {
226 struct dma_pub dma; /* exported structure */ 209 struct dma_pub dma; /* exported structure */
227 uint *msg_level; /* message level pointer */
228 char name[MAXNAMEL]; /* callers name for diag msgs */ 210 char name[MAXNAMEL]; /* callers name for diag msgs */
229 211
230 struct bcma_device *core; 212 struct bcma_device *core;
231 struct device *dmadev; 213 struct device *dmadev;
232 214
215 /* session information for AMPDU */
216 struct brcms_ampdu_session ampdu_session;
217
233 bool dma64; /* this dma engine is operating in 64-bit mode */ 218 bool dma64; /* this dma engine is operating in 64-bit mode */
234 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ 219 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
235 220
@@ -298,12 +283,6 @@ struct dma_info {
298 bool aligndesc_4k; 283 bool aligndesc_4k;
299}; 284};
300 285
301/*
302 * default dma message level (if input msg_level
303 * pointer is null in dma_attach())
304 */
305static uint dma_msg_level;
306
307/* Check for odd number of 1's */ 286/* Check for odd number of 1's */
308static u32 parity32(__le32 data) 287static u32 parity32(__le32 data)
309{ 288{
@@ -353,7 +332,7 @@ static uint prevtxd(struct dma_info *di, uint i)
353 332
354static uint nextrxd(struct dma_info *di, uint i) 333static uint nextrxd(struct dma_info *di, uint i)
355{ 334{
356 return txd(di, i + 1); 335 return rxd(di, i + 1);
357} 336}
358 337
359static uint ntxdactive(struct dma_info *di, uint h, uint t) 338static uint ntxdactive(struct dma_info *di, uint h, uint t)
@@ -370,10 +349,8 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
370{ 349{
371 uint dmactrlflags; 350 uint dmactrlflags;
372 351
373 if (di == NULL) { 352 if (di == NULL)
374 DMA_ERROR("NULL dma handle\n");
375 return 0; 353 return 0;
376 }
377 354
378 dmactrlflags = di->dma.dmactrlflags; 355 dmactrlflags = di->dma.dmactrlflags;
379 dmactrlflags &= ~mask; 356 dmactrlflags &= ~mask;
@@ -423,13 +400,15 @@ static bool _dma_isaddrext(struct dma_info *di)
423 /* not all tx or rx channel are available */ 400 /* not all tx or rx channel are available */
424 if (di->d64txregbase != 0) { 401 if (di->d64txregbase != 0) {
425 if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control))) 402 if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
426 DMA_ERROR("%s: DMA64 tx doesn't have AE set\n", 403 brcms_dbg_dma(di->core,
427 di->name); 404 "%s: DMA64 tx doesn't have AE set\n",
405 di->name);
428 return true; 406 return true;
429 } else if (di->d64rxregbase != 0) { 407 } else if (di->d64rxregbase != 0) {
430 if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control))) 408 if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
431 DMA_ERROR("%s: DMA64 rx doesn't have AE set\n", 409 brcms_dbg_dma(di->core,
432 di->name); 410 "%s: DMA64 rx doesn't have AE set\n",
411 di->name);
433 return true; 412 return true;
434 } 413 }
435 414
@@ -530,8 +509,9 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
530 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, 509 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
531 &alloced, &di->txdpaorig); 510 &alloced, &di->txdpaorig);
532 if (va == NULL) { 511 if (va == NULL) {
533 DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n", 512 brcms_dbg_dma(di->core,
534 di->name); 513 "%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
514 di->name);
535 return false; 515 return false;
536 } 516 }
537 align = (1 << align_bits); 517 align = (1 << align_bits);
@@ -544,8 +524,9 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
544 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits, 524 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
545 &alloced, &di->rxdpaorig); 525 &alloced, &di->rxdpaorig);
546 if (va == NULL) { 526 if (va == NULL) {
547 DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n", 527 brcms_dbg_dma(di->core,
548 di->name); 528 "%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
529 di->name);
549 return false; 530 return false;
550 } 531 }
551 align = (1 << align_bits); 532 align = (1 << align_bits);
@@ -564,12 +545,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
564 return dma64_alloc(di, direction); 545 return dma64_alloc(di, direction);
565} 546}
566 547
567struct dma_pub *dma_attach(char *name, struct si_pub *sih, 548struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
568 struct bcma_device *core,
569 uint txregbase, uint rxregbase, uint ntxd, uint nrxd, 549 uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
570 uint rxbufsize, int rxextheadroom, 550 uint rxbufsize, int rxextheadroom,
571 uint nrxpost, uint rxoffset, uint *msg_level) 551 uint nrxpost, uint rxoffset)
572{ 552{
553 struct si_pub *sih = wlc->hw->sih;
554 struct bcma_device *core = wlc->hw->d11core;
573 struct dma_info *di; 555 struct dma_info *di;
574 u8 rev = core->id.rev; 556 u8 rev = core->id.rev;
575 uint size; 557 uint size;
@@ -580,9 +562,6 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
580 if (di == NULL) 562 if (di == NULL)
581 return NULL; 563 return NULL;
582 564
583 di->msg_level = msg_level ? msg_level : &dma_msg_level;
584
585
586 di->dma64 = 565 di->dma64 =
587 ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64); 566 ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
588 567
@@ -598,11 +577,11 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
598 */ 577 */
599 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); 578 _dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
600 579
601 DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d " 580 brcms_dbg_dma(di->core, "%s: %s flags 0x%x ntxd %d nrxd %d "
602 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d " 581 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
603 "txregbase %u rxregbase %u\n", name, "DMA64", 582 "txregbase %u rxregbase %u\n", name, "DMA64",
604 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize, 583 di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
605 rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase); 584 rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
606 585
607 /* make a private copy of our callers name */ 586 /* make a private copy of our callers name */
608 strncpy(di->name, name, MAXNAMEL); 587 strncpy(di->name, name, MAXNAMEL);
@@ -664,8 +643,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
664 di->dmadesc_align = 4; /* 16 byte alignment */ 643 di->dmadesc_align = 4; /* 16 byte alignment */
665 } 644 }
666 645
667 DMA_NONE("DMA descriptor align_needed %d, align %d\n", 646 brcms_dbg_dma(di->core, "DMA descriptor align_needed %d, align %d\n",
668 di->aligndesc_4k, di->dmadesc_align); 647 di->aligndesc_4k, di->dmadesc_align);
669 648
670 /* allocate tx packet pointer vector */ 649 /* allocate tx packet pointer vector */
671 if (ntxd) { 650 if (ntxd) {
@@ -703,21 +682,27 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
703 682
704 if ((di->ddoffsetlow != 0) && !di->addrext) { 683 if ((di->ddoffsetlow != 0) && !di->addrext) {
705 if (di->txdpa > SI_PCI_DMA_SZ) { 684 if (di->txdpa > SI_PCI_DMA_SZ) {
706 DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n", 685 brcms_dbg_dma(di->core,
707 di->name, (u32)di->txdpa); 686 "%s: txdpa 0x%x: addrext not supported\n",
687 di->name, (u32)di->txdpa);
708 goto fail; 688 goto fail;
709 } 689 }
710 if (di->rxdpa > SI_PCI_DMA_SZ) { 690 if (di->rxdpa > SI_PCI_DMA_SZ) {
711 DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n", 691 brcms_dbg_dma(di->core,
712 di->name, (u32)di->rxdpa); 692 "%s: rxdpa 0x%x: addrext not supported\n",
693 di->name, (u32)di->rxdpa);
713 goto fail; 694 goto fail;
714 } 695 }
715 } 696 }
716 697
717 DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n", 698 /* Initialize AMPDU session */
718 di->ddoffsetlow, di->ddoffsethigh, 699 brcms_c_ampdu_reset_session(&di->ampdu_session, wlc);
719 di->dataoffsetlow, di->dataoffsethigh, 700
720 di->addrext); 701 brcms_dbg_dma(di->core,
702 "ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
703 di->ddoffsetlow, di->ddoffsethigh,
704 di->dataoffsetlow, di->dataoffsethigh,
705 di->addrext);
721 706
722 return (struct dma_pub *) di; 707 return (struct dma_pub *) di;
723 708
@@ -763,7 +748,7 @@ void dma_detach(struct dma_pub *pub)
763{ 748{
764 struct dma_info *di = (struct dma_info *)pub; 749 struct dma_info *di = (struct dma_info *)pub;
765 750
766 DMA_TRACE("%s:\n", di->name); 751 brcms_dbg_dma(di->core, "%s:\n", di->name);
767 752
768 /* free dma descriptor rings */ 753 /* free dma descriptor rings */
769 if (di->txd64) 754 if (di->txd64)
@@ -839,7 +824,7 @@ static void _dma_rxenable(struct dma_info *di)
839 uint dmactrlflags = di->dma.dmactrlflags; 824 uint dmactrlflags = di->dma.dmactrlflags;
840 u32 control; 825 u32 control;
841 826
842 DMA_TRACE("%s:\n", di->name); 827 brcms_dbg_dma(di->core, "%s:\n", di->name);
843 828
844 control = D64_RC_RE | (bcma_read32(di->core, 829 control = D64_RC_RE | (bcma_read32(di->core,
845 DMA64RXREGOFFS(di, control)) & 830 DMA64RXREGOFFS(di, control)) &
@@ -859,7 +844,7 @@ void dma_rxinit(struct dma_pub *pub)
859{ 844{
860 struct dma_info *di = (struct dma_info *)pub; 845 struct dma_info *di = (struct dma_info *)pub;
861 846
862 DMA_TRACE("%s:\n", di->name); 847 brcms_dbg_dma(di->core, "%s:\n", di->name);
863 848
864 if (di->nrxd == 0) 849 if (di->nrxd == 0)
865 return; 850 return;
@@ -954,7 +939,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
954 return 0; 939 return 0;
955 940
956 len = le16_to_cpu(*(__le16 *) (p->data)); 941 len = le16_to_cpu(*(__le16 *) (p->data));
957 DMA_TRACE("%s: dma_rx len %d\n", di->name, len); 942 brcms_dbg_dma(di->core, "%s: dma_rx len %d\n", di->name, len);
958 dma_spin_for_len(len, p); 943 dma_spin_for_len(len, p);
959 944
960 /* set actual length */ 945 /* set actual length */
@@ -981,14 +966,15 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
981 DMA64RXREGOFFS(di, status0)) & 966 DMA64RXREGOFFS(di, status0)) &
982 D64_RS0_CD_MASK) - di->rcvptrbase) & 967 D64_RS0_CD_MASK) - di->rcvptrbase) &
983 D64_RS0_CD_MASK, struct dma64desc); 968 D64_RS0_CD_MASK, struct dma64desc);
984 DMA_ERROR("rxin %d rxout %d, hw_curr %d\n", 969 brcms_dbg_dma(di->core,
985 di->rxin, di->rxout, cur); 970 "rxin %d rxout %d, hw_curr %d\n",
971 di->rxin, di->rxout, cur);
986 } 972 }
987#endif /* DEBUG */ 973#endif /* DEBUG */
988 974
989 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { 975 if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
990 DMA_ERROR("%s: bad frame length (%d)\n", 976 brcms_dbg_dma(di->core, "%s: bad frame length (%d)\n",
991 di->name, len); 977 di->name, len);
992 skb_queue_walk_safe(&dma_frames, p, next) { 978 skb_queue_walk_safe(&dma_frames, p, next) {
993 skb_unlink(p, &dma_frames); 979 skb_unlink(p, &dma_frames);
994 brcmu_pkt_buf_free_skb(p); 980 brcmu_pkt_buf_free_skb(p);
@@ -1005,7 +991,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
1005 991
1006static bool dma64_rxidle(struct dma_info *di) 992static bool dma64_rxidle(struct dma_info *di)
1007{ 993{
1008 DMA_TRACE("%s:\n", di->name); 994 brcms_dbg_dma(di->core, "%s:\n", di->name);
1009 995
1010 if (di->nrxd == 0) 996 if (di->nrxd == 0)
1011 return true; 997 return true;
@@ -1016,6 +1002,17 @@ static bool dma64_rxidle(struct dma_info *di)
1016 D64_RS0_CD_MASK)); 1002 D64_RS0_CD_MASK));
1017} 1003}
1018 1004
1005static bool dma64_txidle(struct dma_info *di)
1006{
1007 if (di->ntxd == 0)
1008 return true;
1009
1010 return ((bcma_read32(di->core,
1011 DMA64TXREGOFFS(di, status0)) & D64_XS0_CD_MASK) ==
1012 (bcma_read32(di->core, DMA64TXREGOFFS(di, ptr)) &
1013 D64_XS0_CD_MASK));
1014}
1015
1019/* 1016/*
1020 * post receive buffers 1017 * post receive buffers
1021 * return false is refill failed completely and ring is empty this will stall 1018 * return false is refill failed completely and ring is empty this will stall
@@ -1047,7 +1044,7 @@ bool dma_rxfill(struct dma_pub *pub)
1047 1044
1048 n = di->nrxpost - nrxdactive(di, rxin, rxout); 1045 n = di->nrxpost - nrxdactive(di, rxin, rxout);
1049 1046
1050 DMA_TRACE("%s: post %d\n", di->name, n); 1047 brcms_dbg_dma(di->core, "%s: post %d\n", di->name, n);
1051 1048
1052 if (di->rxbufsize > BCMEXTRAHDROOM) 1049 if (di->rxbufsize > BCMEXTRAHDROOM)
1053 extra_offset = di->rxextrahdrroom; 1050 extra_offset = di->rxextrahdrroom;
@@ -1060,9 +1057,11 @@ bool dma_rxfill(struct dma_pub *pub)
1060 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset); 1057 p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
1061 1058
1062 if (p == NULL) { 1059 if (p == NULL) {
1063 DMA_ERROR("%s: out of rxbufs\n", di->name); 1060 brcms_dbg_dma(di->core, "%s: out of rxbufs\n",
1061 di->name);
1064 if (i == 0 && dma64_rxidle(di)) { 1062 if (i == 0 && dma64_rxidle(di)) {
1065 DMA_ERROR("%s: ring is empty !\n", di->name); 1063 brcms_dbg_dma(di->core, "%s: ring is empty !\n",
1064 di->name);
1066 ring_empty = true; 1065 ring_empty = true;
1067 } 1066 }
1068 di->dma.rxnobuf++; 1067 di->dma.rxnobuf++;
@@ -1107,7 +1106,7 @@ void dma_rxreclaim(struct dma_pub *pub)
1107 struct dma_info *di = (struct dma_info *)pub; 1106 struct dma_info *di = (struct dma_info *)pub;
1108 struct sk_buff *p; 1107 struct sk_buff *p;
1109 1108
1110 DMA_TRACE("%s:\n", di->name); 1109 brcms_dbg_dma(di->core, "%s:\n", di->name);
1111 1110
1112 while ((p = _dma_getnextrxp(di, true))) 1111 while ((p = _dma_getnextrxp(di, true)))
1113 brcmu_pkt_buf_free_skb(p); 1112 brcmu_pkt_buf_free_skb(p);
@@ -1138,7 +1137,7 @@ void dma_txinit(struct dma_pub *pub)
1138 struct dma_info *di = (struct dma_info *)pub; 1137 struct dma_info *di = (struct dma_info *)pub;
1139 u32 control = D64_XC_XE; 1138 u32 control = D64_XC_XE;
1140 1139
1141 DMA_TRACE("%s:\n", di->name); 1140 brcms_dbg_dma(di->core, "%s:\n", di->name);
1142 1141
1143 if (di->ntxd == 0) 1142 if (di->ntxd == 0)
1144 return; 1143 return;
@@ -1170,7 +1169,7 @@ void dma_txsuspend(struct dma_pub *pub)
1170{ 1169{
1171 struct dma_info *di = (struct dma_info *)pub; 1170 struct dma_info *di = (struct dma_info *)pub;
1172 1171
1173 DMA_TRACE("%s:\n", di->name); 1172 brcms_dbg_dma(di->core, "%s:\n", di->name);
1174 1173
1175 if (di->ntxd == 0) 1174 if (di->ntxd == 0)
1176 return; 1175 return;
@@ -1182,7 +1181,7 @@ void dma_txresume(struct dma_pub *pub)
1182{ 1181{
1183 struct dma_info *di = (struct dma_info *)pub; 1182 struct dma_info *di = (struct dma_info *)pub;
1184 1183
1185 DMA_TRACE("%s:\n", di->name); 1184 brcms_dbg_dma(di->core, "%s:\n", di->name);
1186 1185
1187 if (di->ntxd == 0) 1186 if (di->ntxd == 0)
1188 return; 1187 return;
@@ -1205,11 +1204,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
1205 struct dma_info *di = (struct dma_info *)pub; 1204 struct dma_info *di = (struct dma_info *)pub;
1206 struct sk_buff *p; 1205 struct sk_buff *p;
1207 1206
1208 DMA_TRACE("%s: %s\n", 1207 brcms_dbg_dma(di->core, "%s: %s\n",
1209 di->name, 1208 di->name,
1210 range == DMA_RANGE_ALL ? "all" : 1209 range == DMA_RANGE_ALL ? "all" :
1211 range == DMA_RANGE_TRANSMITTED ? "transmitted" : 1210 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
1212 "transferred"); 1211 "transferred");
1213 1212
1214 if (di->txin == di->txout) 1213 if (di->txin == di->txout)
1215 return; 1214 return;
@@ -1264,39 +1263,25 @@ bool dma_rxreset(struct dma_pub *pub)
1264 return status == D64_RS0_RS_DISABLED; 1263 return status == D64_RS0_RS_DISABLED;
1265} 1264}
1266 1265
1267/* 1266static void dma_txenq(struct dma_info *di, struct sk_buff *p)
1268 * !! tx entry routine
1269 * WARNING: call must check the return value for error.
1270 * the error(toss frames) could be fatal and cause many subsequent hard
1271 * to debug problems
1272 */
1273int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
1274{ 1267{
1275 struct dma_info *di = (struct dma_info *)pub;
1276 unsigned char *data; 1268 unsigned char *data;
1277 uint len; 1269 uint len;
1278 u16 txout; 1270 u16 txout;
1279 u32 flags = 0; 1271 u32 flags = 0;
1280 dma_addr_t pa; 1272 dma_addr_t pa;
1281 1273
1282 DMA_TRACE("%s:\n", di->name);
1283
1284 txout = di->txout; 1274 txout = di->txout;
1285 1275
1276 if (WARN_ON(nexttxd(di, txout) == di->txin))
1277 return;
1278
1286 /* 1279 /*
1287 * obtain and initialize transmit descriptor entry. 1280 * obtain and initialize transmit descriptor entry.
1288 */ 1281 */
1289 data = p->data; 1282 data = p->data;
1290 len = p->len; 1283 len = p->len;
1291 1284
1292 /* no use to transmit a zero length packet */
1293 if (len == 0)
1294 return 0;
1295
1296 /* return nonzero if out of tx descriptors */
1297 if (nexttxd(di, txout) == di->txin)
1298 goto outoftxd;
1299
1300 /* get physical address of buffer start */ 1285 /* get physical address of buffer start */
1301 pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE); 1286 pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
1302 1287
@@ -1318,23 +1303,147 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
1318 1303
1319 /* bump the tx descriptor index */ 1304 /* bump the tx descriptor index */
1320 di->txout = txout; 1305 di->txout = txout;
1306}
1321 1307
1322 /* kick the chip */ 1308static void ampdu_finalize(struct dma_info *di)
1323 if (commit) 1309{
1324 bcma_write32(di->core, DMA64TXREGOFFS(di, ptr), 1310 struct brcms_ampdu_session *session = &di->ampdu_session;
1325 di->xmtptrbase + I2B(txout, struct dma64desc)); 1311 struct sk_buff *p;
1312
1313 trace_brcms_ampdu_session(&session->wlc->hw->d11core->dev,
1314 session->max_ampdu_len,
1315 session->max_ampdu_frames,
1316 session->ampdu_len,
1317 skb_queue_len(&session->skb_list),
1318 session->dma_len);
1319
1320 if (WARN_ON(skb_queue_empty(&session->skb_list)))
1321 return;
1322
1323 brcms_c_ampdu_finalize(session);
1324
1325 while (!skb_queue_empty(&session->skb_list)) {
1326 p = skb_dequeue(&session->skb_list);
1327 dma_txenq(di, p);
1328 }
1329
1330 bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
1331 di->xmtptrbase + I2B(di->txout, struct dma64desc));
1332 brcms_c_ampdu_reset_session(session, session->wlc);
1333}
1334
1335static void prep_ampdu_frame(struct dma_info *di, struct sk_buff *p)
1336{
1337 struct brcms_ampdu_session *session = &di->ampdu_session;
1338 int ret;
1339
1340 ret = brcms_c_ampdu_add_frame(session, p);
1341 if (ret == -ENOSPC) {
1342 /*
1343 * AMPDU cannot accomodate this frame. Close out the in-
1344 * progress AMPDU session and start a new one.
1345 */
1346 ampdu_finalize(di);
1347 ret = brcms_c_ampdu_add_frame(session, p);
1348 }
1349
1350 WARN_ON(ret);
1351}
1352
1353/* Update count of available tx descriptors based on current DMA state */
1354static void dma_update_txavail(struct dma_info *di)
1355{
1356 /*
1357 * Available space is number of descriptors less the number of
1358 * active descriptors and the number of queued AMPDU frames.
1359 */
1360 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) -
1361 skb_queue_len(&di->ampdu_session.skb_list) - 1;
1362}
1363
1364/*
1365 * !! tx entry routine
1366 * WARNING: call must check the return value for error.
1367 * the error(toss frames) could be fatal and cause many subsequent hard
1368 * to debug problems
1369 */
1370int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
1371 struct sk_buff *p)
1372{
1373 struct dma_info *di = (struct dma_info *)pub;
1374 struct brcms_ampdu_session *session = &di->ampdu_session;
1375 struct ieee80211_tx_info *tx_info;
1376 bool is_ampdu;
1377
1378 /* no use to transmit a zero length packet */
1379 if (p->len == 0)
1380 return 0;
1381
1382 /* return nonzero if out of tx descriptors */
1383 if (di->dma.txavail == 0 || nexttxd(di, di->txout) == di->txin)
1384 goto outoftxd;
1385
1386 tx_info = IEEE80211_SKB_CB(p);
1387 is_ampdu = tx_info->flags & IEEE80211_TX_CTL_AMPDU;
1388 if (is_ampdu)
1389 prep_ampdu_frame(di, p);
1390 else
1391 dma_txenq(di, p);
1326 1392
1327 /* tx flow control */ 1393 /* tx flow control */
1328 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; 1394 dma_update_txavail(di);
1395
1396 /* kick the chip */
1397 if (is_ampdu) {
1398 /*
1399 * Start sending data if we've got a full AMPDU, there's
1400 * no more space in the DMA ring, or the ring isn't
1401 * currently transmitting.
1402 */
1403 if (skb_queue_len(&session->skb_list) == session->max_ampdu_frames ||
1404 di->dma.txavail == 0 || dma64_txidle(di))
1405 ampdu_finalize(di);
1406 } else {
1407 bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
1408 di->xmtptrbase + I2B(di->txout, struct dma64desc));
1409 }
1329 1410
1330 return 0; 1411 return 0;
1331 1412
1332 outoftxd: 1413 outoftxd:
1333 DMA_ERROR("%s: out of txds !!!\n", di->name); 1414 brcms_dbg_dma(di->core, "%s: out of txds !!!\n", di->name);
1334 brcmu_pkt_buf_free_skb(p); 1415 brcmu_pkt_buf_free_skb(p);
1335 di->dma.txavail = 0; 1416 di->dma.txavail = 0;
1336 di->dma.txnobuf++; 1417 di->dma.txnobuf++;
1337 return -1; 1418 return -ENOSPC;
1419}
1420
1421void dma_txflush(struct dma_pub *pub)
1422{
1423 struct dma_info *di = (struct dma_info *)pub;
1424 struct brcms_ampdu_session *session = &di->ampdu_session;
1425
1426 if (!skb_queue_empty(&session->skb_list))
1427 ampdu_finalize(di);
1428}
1429
1430int dma_txpending(struct dma_pub *pub)
1431{
1432 struct dma_info *di = (struct dma_info *)pub;
1433 return ntxdactive(di, di->txin, di->txout);
1434}
1435
1436/*
1437 * If we have an active AMPDU session and are not transmitting,
1438 * this function will force tx to start.
1439 */
1440void dma_kick_tx(struct dma_pub *pub)
1441{
1442 struct dma_info *di = (struct dma_info *)pub;
1443 struct brcms_ampdu_session *session = &di->ampdu_session;
1444
1445 if (!skb_queue_empty(&session->skb_list) && dma64_txidle(di))
1446 ampdu_finalize(di);
1338} 1447}
1339 1448
1340/* 1449/*
@@ -1354,11 +1463,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1354 u16 active_desc; 1463 u16 active_desc;
1355 struct sk_buff *txp; 1464 struct sk_buff *txp;
1356 1465
1357 DMA_TRACE("%s: %s\n", 1466 brcms_dbg_dma(di->core, "%s: %s\n",
1358 di->name, 1467 di->name,
1359 range == DMA_RANGE_ALL ? "all" : 1468 range == DMA_RANGE_ALL ? "all" :
1360 range == DMA_RANGE_TRANSMITTED ? "transmitted" : 1469 range == DMA_RANGE_TRANSMITTED ? "transmitted" :
1361 "transferred"); 1470 "transferred");
1362 1471
1363 if (di->ntxd == 0) 1472 if (di->ntxd == 0)
1364 return NULL; 1473 return NULL;
@@ -1412,13 +1521,13 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
1412 di->txin = i; 1521 di->txin = i;
1413 1522
1414 /* tx flow control */ 1523 /* tx flow control */
1415 di->dma.txavail = di->ntxd - ntxdactive(di, di->txin, di->txout) - 1; 1524 dma_update_txavail(di);
1416 1525
1417 return txp; 1526 return txp;
1418 1527
1419 bogus: 1528 bogus:
1420 DMA_NONE("bogus curr: start %d end %d txout %d\n", 1529 brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n",
1421 start, end, di->txout); 1530 start, end, di->txout);
1422 return NULL; 1531 return NULL;
1423} 1532}
1424 1533
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index cc269ee5c499..ff5b80b09046 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -74,12 +74,11 @@ struct dma_pub {
74 uint txnobuf; /* tx out of dma descriptors */ 74 uint txnobuf; /* tx out of dma descriptors */
75}; 75};
76 76
77extern struct dma_pub *dma_attach(char *name, struct si_pub *sih, 77extern struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
78 struct bcma_device *d11core,
79 uint txregbase, uint rxregbase, 78 uint txregbase, uint rxregbase,
80 uint ntxd, uint nrxd, 79 uint ntxd, uint nrxd,
81 uint rxbufsize, int rxextheadroom, 80 uint rxbufsize, int rxextheadroom,
82 uint nrxpost, uint rxoffset, uint *msg_level); 81 uint nrxpost, uint rxoffset);
83 82
84void dma_rxinit(struct dma_pub *pub); 83void dma_rxinit(struct dma_pub *pub);
85int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list); 84int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
@@ -87,7 +86,11 @@ bool dma_rxfill(struct dma_pub *pub);
87bool dma_rxreset(struct dma_pub *pub); 86bool dma_rxreset(struct dma_pub *pub);
88bool dma_txreset(struct dma_pub *pub); 87bool dma_txreset(struct dma_pub *pub);
89void dma_txinit(struct dma_pub *pub); 88void dma_txinit(struct dma_pub *pub);
90int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit); 89int dma_txfast(struct brcms_c_info *wlc, struct dma_pub *pub,
90 struct sk_buff *p0);
91void dma_txflush(struct dma_pub *pub);
92int dma_txpending(struct dma_pub *pub);
93void dma_kick_tx(struct dma_pub *pub);
91void dma_txsuspend(struct dma_pub *pub); 94void dma_txsuspend(struct dma_pub *pub);
92bool dma_txsuspended(struct dma_pub *pub); 95bool dma_txsuspended(struct dma_pub *pub);
93void dma_txresume(struct dma_pub *pub); 96void dma_txresume(struct dma_pub *pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index a744ea5a9559..1fbd8ecbe2ea 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -33,6 +33,7 @@
33#include "ucode_loader.h" 33#include "ucode_loader.h"
34#include "mac80211_if.h" 34#include "mac80211_if.h"
35#include "main.h" 35#include "main.h"
36#include "debug.h"
36 37
37#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ 38#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
38 39
@@ -92,16 +93,21 @@ MODULE_FIRMWARE("brcm/bcm43xx_hdr-0.fw");
92 93
93/* recognized BCMA Core IDs */ 94/* recognized BCMA Core IDs */
94static struct bcma_device_id brcms_coreid_table[] = { 95static struct bcma_device_id brcms_coreid_table[] = {
96 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 17, BCMA_ANY_CLASS),
95 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS), 97 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
96 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS), 98 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
97 BCMA_CORETABLE_END 99 BCMA_CORETABLE_END
98}; 100};
99MODULE_DEVICE_TABLE(bcma, brcms_coreid_table); 101MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
100 102
101#ifdef DEBUG 103#if defined(CONFIG_BRCMDBG)
102static int msglevel = 0xdeadbeef; 104/*
103module_param(msglevel, int, 0); 105 * Module parameter for setting the debug message level. Available
104#endif /* DEBUG */ 106 * flags are specified by the BRCM_DL_* macros in
107 * drivers/net/wireless/brcm80211/include/defs.h.
108 */
109module_param_named(debug, brcm_msg_level, uint, S_IRUGO | S_IWUSR);
110#endif
105 111
106static struct ieee80211_channel brcms_2ghz_chantable[] = { 112static struct ieee80211_channel brcms_2ghz_chantable[] = {
107 CHAN2GHZ(1, 2412, IEEE80211_CHAN_NO_HT40MINUS), 113 CHAN2GHZ(1, 2412, IEEE80211_CHAN_NO_HT40MINUS),
@@ -276,12 +282,12 @@ static void brcms_ops_tx(struct ieee80211_hw *hw,
276 282
277 spin_lock_bh(&wl->lock); 283 spin_lock_bh(&wl->lock);
278 if (!wl->pub->up) { 284 if (!wl->pub->up) {
279 wiphy_err(wl->wiphy, "ops->tx called while down\n"); 285 brcms_err(wl->wlc->hw->d11core, "ops->tx called while down\n");
280 kfree_skb(skb); 286 kfree_skb(skb);
281 goto done; 287 goto done;
282 } 288 }
283 brcms_c_sendpkt_mac80211(wl->wlc, skb, hw); 289 if (brcms_c_sendpkt_mac80211(wl->wlc, skb, hw))
284 tx_info->rate_driver_data[0] = control->sta; 290 tx_info->rate_driver_data[0] = control->sta;
285 done: 291 done:
286 spin_unlock_bh(&wl->lock); 292 spin_unlock_bh(&wl->lock);
287} 293}
@@ -313,8 +319,8 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
313 spin_unlock_bh(&wl->lock); 319 spin_unlock_bh(&wl->lock);
314 320
315 if (err != 0) 321 if (err != 0)
316 wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__, 322 brcms_err(wl->wlc->hw->d11core, "%s: brcms_up() returned %d\n",
317 err); 323 __func__, err);
318 return err; 324 return err;
319} 325}
320 326
@@ -332,7 +338,7 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
332 status = brcms_c_chipmatch(wl->wlc->hw->d11core); 338 status = brcms_c_chipmatch(wl->wlc->hw->d11core);
333 spin_unlock_bh(&wl->lock); 339 spin_unlock_bh(&wl->lock);
334 if (!status) { 340 if (!status) {
335 wiphy_err(wl->wiphy, 341 brcms_err(wl->wlc->hw->d11core,
336 "wl: brcms_ops_stop: chipmatch failed\n"); 342 "wl: brcms_ops_stop: chipmatch failed\n");
337 return; 343 return;
338 } 344 }
@@ -350,8 +356,9 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
350 356
351 /* Just STA for now */ 357 /* Just STA for now */
352 if (vif->type != NL80211_IFTYPE_STATION) { 358 if (vif->type != NL80211_IFTYPE_STATION) {
353 wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only" 359 brcms_err(wl->wlc->hw->d11core,
354 " STA for now\n", __func__, vif->type); 360 "%s: Attempt to add type %d, only STA for now\n",
361 __func__, vif->type);
355 return -EOPNOTSUPP; 362 return -EOPNOTSUPP;
356 } 363 }
357 364
@@ -370,9 +377,9 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
370{ 377{
371 struct ieee80211_conf *conf = &hw->conf; 378 struct ieee80211_conf *conf = &hw->conf;
372 struct brcms_info *wl = hw->priv; 379 struct brcms_info *wl = hw->priv;
380 struct bcma_device *core = wl->wlc->hw->d11core;
373 int err = 0; 381 int err = 0;
374 int new_int; 382 int new_int;
375 struct wiphy *wiphy = hw->wiphy;
376 383
377 spin_lock_bh(&wl->lock); 384 spin_lock_bh(&wl->lock);
378 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { 385 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
@@ -380,25 +387,26 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
380 conf->listen_interval); 387 conf->listen_interval);
381 } 388 }
382 if (changed & IEEE80211_CONF_CHANGE_MONITOR) 389 if (changed & IEEE80211_CONF_CHANGE_MONITOR)
383 wiphy_dbg(wiphy, "%s: change monitor mode: %s\n", 390 brcms_dbg_info(core, "%s: change monitor mode: %s\n",
384 __func__, conf->flags & IEEE80211_CONF_MONITOR ? 391 __func__, conf->flags & IEEE80211_CONF_MONITOR ?
385 "true" : "false"); 392 "true" : "false");
386 if (changed & IEEE80211_CONF_CHANGE_PS) 393 if (changed & IEEE80211_CONF_CHANGE_PS)
387 wiphy_err(wiphy, "%s: change power-save mode: %s (implement)\n", 394 brcms_err(core, "%s: change power-save mode: %s (implement)\n",
388 __func__, conf->flags & IEEE80211_CONF_PS ? 395 __func__, conf->flags & IEEE80211_CONF_PS ?
389 "true" : "false"); 396 "true" : "false");
390 397
391 if (changed & IEEE80211_CONF_CHANGE_POWER) { 398 if (changed & IEEE80211_CONF_CHANGE_POWER) {
392 err = brcms_c_set_tx_power(wl->wlc, conf->power_level); 399 err = brcms_c_set_tx_power(wl->wlc, conf->power_level);
393 if (err < 0) { 400 if (err < 0) {
394 wiphy_err(wiphy, "%s: Error setting power_level\n", 401 brcms_err(core, "%s: Error setting power_level\n",
395 __func__); 402 __func__);
396 goto config_out; 403 goto config_out;
397 } 404 }
398 new_int = brcms_c_get_tx_power(wl->wlc); 405 new_int = brcms_c_get_tx_power(wl->wlc);
399 if (new_int != conf->power_level) 406 if (new_int != conf->power_level)
400 wiphy_err(wiphy, "%s: Power level req != actual, %d %d" 407 brcms_err(core,
401 "\n", __func__, conf->power_level, 408 "%s: Power level req != actual, %d %d\n",
409 __func__, conf->power_level,
402 new_int); 410 new_int);
403 } 411 }
404 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 412 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
@@ -425,13 +433,13 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
425 struct ieee80211_bss_conf *info, u32 changed) 433 struct ieee80211_bss_conf *info, u32 changed)
426{ 434{
427 struct brcms_info *wl = hw->priv; 435 struct brcms_info *wl = hw->priv;
428 struct wiphy *wiphy = hw->wiphy; 436 struct bcma_device *core = wl->wlc->hw->d11core;
429 437
430 if (changed & BSS_CHANGED_ASSOC) { 438 if (changed & BSS_CHANGED_ASSOC) {
431 /* association status changed (associated/disassociated) 439 /* association status changed (associated/disassociated)
432 * also implies a change in the AID. 440 * also implies a change in the AID.
433 */ 441 */
434 wiphy_err(wiphy, "%s: %s: %sassociated\n", KBUILD_MODNAME, 442 brcms_err(core, "%s: %s: %sassociated\n", KBUILD_MODNAME,
435 __func__, info->assoc ? "" : "dis"); 443 __func__, info->assoc ? "" : "dis");
436 spin_lock_bh(&wl->lock); 444 spin_lock_bh(&wl->lock);
437 brcms_c_associate_upd(wl->wlc, info->assoc); 445 brcms_c_associate_upd(wl->wlc, info->assoc);
@@ -491,7 +499,7 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
491 error = brcms_c_set_rateset(wl->wlc, &rs); 499 error = brcms_c_set_rateset(wl->wlc, &rs);
492 spin_unlock_bh(&wl->lock); 500 spin_unlock_bh(&wl->lock);
493 if (error) 501 if (error)
494 wiphy_err(wiphy, "changing basic rates failed: %d\n", 502 brcms_err(core, "changing basic rates failed: %d\n",
495 error); 503 error);
496 } 504 }
497 if (changed & BSS_CHANGED_BEACON_INT) { 505 if (changed & BSS_CHANGED_BEACON_INT) {
@@ -508,30 +516,30 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
508 } 516 }
509 if (changed & BSS_CHANGED_BEACON) 517 if (changed & BSS_CHANGED_BEACON)
510 /* Beacon data changed, retrieve new beacon (beaconing modes) */ 518 /* Beacon data changed, retrieve new beacon (beaconing modes) */
511 wiphy_err(wiphy, "%s: beacon changed\n", __func__); 519 brcms_err(core, "%s: beacon changed\n", __func__);
512 520
513 if (changed & BSS_CHANGED_BEACON_ENABLED) { 521 if (changed & BSS_CHANGED_BEACON_ENABLED) {
514 /* Beaconing should be enabled/disabled (beaconing modes) */ 522 /* Beaconing should be enabled/disabled (beaconing modes) */
515 wiphy_err(wiphy, "%s: Beacon enabled: %s\n", __func__, 523 brcms_err(core, "%s: Beacon enabled: %s\n", __func__,
516 info->enable_beacon ? "true" : "false"); 524 info->enable_beacon ? "true" : "false");
517 } 525 }
518 526
519 if (changed & BSS_CHANGED_CQM) { 527 if (changed & BSS_CHANGED_CQM) {
520 /* Connection quality monitor config changed */ 528 /* Connection quality monitor config changed */
521 wiphy_err(wiphy, "%s: cqm change: threshold %d, hys %d " 529 brcms_err(core, "%s: cqm change: threshold %d, hys %d "
522 " (implement)\n", __func__, info->cqm_rssi_thold, 530 " (implement)\n", __func__, info->cqm_rssi_thold,
523 info->cqm_rssi_hyst); 531 info->cqm_rssi_hyst);
524 } 532 }
525 533
526 if (changed & BSS_CHANGED_IBSS) { 534 if (changed & BSS_CHANGED_IBSS) {
527 /* IBSS join status changed */ 535 /* IBSS join status changed */
528 wiphy_err(wiphy, "%s: IBSS joined: %s (implement)\n", __func__, 536 brcms_err(core, "%s: IBSS joined: %s (implement)\n",
529 info->ibss_joined ? "true" : "false"); 537 __func__, info->ibss_joined ? "true" : "false");
530 } 538 }
531 539
532 if (changed & BSS_CHANGED_ARP_FILTER) { 540 if (changed & BSS_CHANGED_ARP_FILTER) {
533 /* Hardware ARP filter address list or state changed */ 541 /* Hardware ARP filter address list or state changed */
534 wiphy_err(wiphy, "%s: arp filtering: enabled %s, count %d" 542 brcms_err(core, "%s: arp filtering: enabled %s, count %d"
535 " (implement)\n", __func__, info->arp_filter_enabled ? 543 " (implement)\n", __func__, info->arp_filter_enabled ?
536 "true" : "false", info->arp_addr_cnt); 544 "true" : "false", info->arp_addr_cnt);
537 } 545 }
@@ -541,8 +549,8 @@ brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
541 * QoS for this association was enabled/disabled. 549 * QoS for this association was enabled/disabled.
542 * Note that it is only ever disabled for station mode. 550 * Note that it is only ever disabled for station mode.
543 */ 551 */
544 wiphy_err(wiphy, "%s: qos enabled: %s (implement)\n", __func__, 552 brcms_err(core, "%s: qos enabled: %s (implement)\n",
545 info->qos ? "true" : "false"); 553 __func__, info->qos ? "true" : "false");
546 } 554 }
547 return; 555 return;
548} 556}
@@ -553,25 +561,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
553 unsigned int *total_flags, u64 multicast) 561 unsigned int *total_flags, u64 multicast)
554{ 562{
555 struct brcms_info *wl = hw->priv; 563 struct brcms_info *wl = hw->priv;
556 struct wiphy *wiphy = hw->wiphy; 564 struct bcma_device *core = wl->wlc->hw->d11core;
557 565
558 changed_flags &= MAC_FILTERS; 566 changed_flags &= MAC_FILTERS;
559 *total_flags &= MAC_FILTERS; 567 *total_flags &= MAC_FILTERS;
560 568
561 if (changed_flags & FIF_PROMISC_IN_BSS) 569 if (changed_flags & FIF_PROMISC_IN_BSS)
562 wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n"); 570 brcms_dbg_info(core, "FIF_PROMISC_IN_BSS\n");
563 if (changed_flags & FIF_ALLMULTI) 571 if (changed_flags & FIF_ALLMULTI)
564 wiphy_dbg(wiphy, "FIF_ALLMULTI\n"); 572 brcms_dbg_info(core, "FIF_ALLMULTI\n");
565 if (changed_flags & FIF_FCSFAIL) 573 if (changed_flags & FIF_FCSFAIL)
566 wiphy_dbg(wiphy, "FIF_FCSFAIL\n"); 574 brcms_dbg_info(core, "FIF_FCSFAIL\n");
567 if (changed_flags & FIF_CONTROL) 575 if (changed_flags & FIF_CONTROL)
568 wiphy_dbg(wiphy, "FIF_CONTROL\n"); 576 brcms_dbg_info(core, "FIF_CONTROL\n");
569 if (changed_flags & FIF_OTHER_BSS) 577 if (changed_flags & FIF_OTHER_BSS)
570 wiphy_dbg(wiphy, "FIF_OTHER_BSS\n"); 578 brcms_dbg_info(core, "FIF_OTHER_BSS\n");
571 if (changed_flags & FIF_PSPOLL) 579 if (changed_flags & FIF_PSPOLL)
572 wiphy_dbg(wiphy, "FIF_PSPOLL\n"); 580 brcms_dbg_info(core, "FIF_PSPOLL\n");
573 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) 581 if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
574 wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n"); 582 brcms_dbg_info(core, "FIF_BCN_PRBRESP_PROMISC\n");
575 583
576 spin_lock_bh(&wl->lock); 584 spin_lock_bh(&wl->lock);
577 brcms_c_mac_promisc(wl->wlc, *total_flags); 585 brcms_c_mac_promisc(wl->wlc, *total_flags);
@@ -653,8 +661,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
653 status = brcms_c_aggregatable(wl->wlc, tid); 661 status = brcms_c_aggregatable(wl->wlc, tid);
654 spin_unlock_bh(&wl->lock); 662 spin_unlock_bh(&wl->lock);
655 if (!status) { 663 if (!status) {
656 wiphy_err(wl->wiphy, "START: tid %d is not agg\'able\n", 664 brcms_err(wl->wlc->hw->d11core,
657 tid); 665 "START: tid %d is not agg\'able\n", tid);
658 return -EINVAL; 666 return -EINVAL;
659 } 667 }
660 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 668 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -681,8 +689,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
681 /* Power save wakeup */ 689 /* Power save wakeup */
682 break; 690 break;
683 default: 691 default:
684 wiphy_err(wl->wiphy, "%s: Invalid command, ignoring\n", 692 brcms_err(wl->wlc->hw->d11core,
685 __func__); 693 "%s: Invalid command, ignoring\n", __func__);
686 } 694 }
687 695
688 return 0; 696 return 0;
@@ -839,8 +847,10 @@ static void brcms_free(struct brcms_info *wl)
839 /* kill dpc */ 847 /* kill dpc */
840 tasklet_kill(&wl->tasklet); 848 tasklet_kill(&wl->tasklet);
841 849
842 if (wl->pub) 850 if (wl->pub) {
851 brcms_debugfs_detach(wl->pub);
843 brcms_c_module_unregister(wl->pub, "linux", wl); 852 brcms_c_module_unregister(wl->pub, "linux", wl);
853 }
844 854
845 /* free common resources */ 855 /* free common resources */
846 if (wl->wlc) { 856 if (wl->wlc) {
@@ -889,27 +899,22 @@ static void brcms_remove(struct bcma_device *pdev)
889static irqreturn_t brcms_isr(int irq, void *dev_id) 899static irqreturn_t brcms_isr(int irq, void *dev_id)
890{ 900{
891 struct brcms_info *wl; 901 struct brcms_info *wl;
892 bool ours, wantdpc; 902 irqreturn_t ret = IRQ_NONE;
893 903
894 wl = (struct brcms_info *) dev_id; 904 wl = (struct brcms_info *) dev_id;
895 905
896 spin_lock(&wl->isr_lock); 906 spin_lock(&wl->isr_lock);
897 907
898 /* call common first level interrupt handler */ 908 /* call common first level interrupt handler */
899 ours = brcms_c_isr(wl->wlc, &wantdpc); 909 if (brcms_c_isr(wl->wlc)) {
900 if (ours) { 910 /* schedule second level handler */
901 /* if more to do... */ 911 tasklet_schedule(&wl->tasklet);
902 if (wantdpc) { 912 ret = IRQ_HANDLED;
903
904 /* ...and call the second level interrupt handler */
905 /* schedule dpc */
906 tasklet_schedule(&wl->tasklet);
907 }
908 } 913 }
909 914
910 spin_unlock(&wl->isr_lock); 915 spin_unlock(&wl->isr_lock);
911 916
912 return IRQ_RETVAL(ours); 917 return ret;
913} 918}
914 919
915/* 920/*
@@ -1075,6 +1080,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1075 regulatory_hint(wl->wiphy, wl->pub->srom_ccode)) 1080 regulatory_hint(wl->wiphy, wl->pub->srom_ccode))
1076 wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__); 1081 wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__);
1077 1082
1083 brcms_debugfs_attach(wl->pub);
1084 brcms_debugfs_create_files(wl->pub);
1078 n_adapters_found++; 1085 n_adapters_found++;
1079 return wl; 1086 return wl;
1080 1087
@@ -1093,7 +1100,7 @@ fail:
1093 * 1100 *
1094 * Perimeter lock is initialized in the course of this function. 1101 * Perimeter lock is initialized in the course of this function.
1095 */ 1102 */
1096static int __devinit brcms_bcma_probe(struct bcma_device *pdev) 1103static int brcms_bcma_probe(struct bcma_device *pdev)
1097{ 1104{
1098 struct brcms_info *wl; 1105 struct brcms_info *wl;
1099 struct ieee80211_hw *hw; 1106 struct ieee80211_hw *hw;
@@ -1144,14 +1151,13 @@ static int brcms_suspend(struct bcma_device *pdev)
1144 wl->pub->hw_up = false; 1151 wl->pub->hw_up = false;
1145 spin_unlock_bh(&wl->lock); 1152 spin_unlock_bh(&wl->lock);
1146 1153
1147 pr_debug("brcms_suspend ok\n"); 1154 brcms_dbg_info(wl->wlc->hw->d11core, "brcms_suspend ok\n");
1148 1155
1149 return 0; 1156 return 0;
1150} 1157}
1151 1158
1152static int brcms_resume(struct bcma_device *pdev) 1159static int brcms_resume(struct bcma_device *pdev)
1153{ 1160{
1154 pr_debug("brcms_resume ok\n");
1155 return 0; 1161 return 0;
1156} 1162}
1157 1163
@@ -1160,7 +1166,7 @@ static struct bcma_driver brcms_bcma_driver = {
1160 .probe = brcms_bcma_probe, 1166 .probe = brcms_bcma_probe,
1161 .suspend = brcms_suspend, 1167 .suspend = brcms_suspend,
1162 .resume = brcms_resume, 1168 .resume = brcms_resume,
1163 .remove = __devexit_p(brcms_remove), 1169 .remove = brcms_remove,
1164 .id_table = brcms_coreid_table, 1170 .id_table = brcms_coreid_table,
1165}; 1171};
1166 1172
@@ -1184,10 +1190,7 @@ static DECLARE_WORK(brcms_driver_work, brcms_driver_init);
1184 1190
1185static int __init brcms_module_init(void) 1191static int __init brcms_module_init(void)
1186{ 1192{
1187#ifdef DEBUG 1193 brcms_debugfs_init();
1188 if (msglevel != 0xdeadbeef)
1189 brcm_msg_level = msglevel;
1190#endif
1191 if (!schedule_work(&brcms_driver_work)) 1194 if (!schedule_work(&brcms_driver_work))
1192 return -EBUSY; 1195 return -EBUSY;
1193 1196
@@ -1205,6 +1208,7 @@ static void __exit brcms_module_exit(void)
1205{ 1208{
1206 cancel_work_sync(&brcms_driver_work); 1209 cancel_work_sync(&brcms_driver_work);
1207 bcma_driver_unregister(&brcms_bcma_driver); 1210 bcma_driver_unregister(&brcms_bcma_driver);
1211 brcms_debugfs_exit();
1208} 1212}
1209 1213
1210module_init(brcms_module_init); 1214module_init(brcms_module_init);
@@ -1216,7 +1220,7 @@ module_exit(brcms_module_exit);
1216void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif, 1220void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
1217 bool state, int prio) 1221 bool state, int prio)
1218{ 1222{
1219 wiphy_err(wl->wiphy, "Shouldn't be here %s\n", __func__); 1223 brcms_err(wl->wlc->hw->d11core, "Shouldn't be here %s\n", __func__);
1220} 1224}
1221 1225
1222/* 1226/*
@@ -1224,7 +1228,8 @@ void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
1224 */ 1228 */
1225void brcms_init(struct brcms_info *wl) 1229void brcms_init(struct brcms_info *wl)
1226{ 1230{
1227 BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit); 1231 brcms_dbg_info(wl->wlc->hw->d11core, "Initializing wl%d\n",
1232 wl->pub->unit);
1228 brcms_reset(wl); 1233 brcms_reset(wl);
1229 brcms_c_init(wl->wlc, wl->mute_tx); 1234 brcms_c_init(wl->wlc, wl->mute_tx);
1230} 1235}
@@ -1234,7 +1239,7 @@ void brcms_init(struct brcms_info *wl)
1234 */ 1239 */
1235uint brcms_reset(struct brcms_info *wl) 1240uint brcms_reset(struct brcms_info *wl)
1236{ 1241{
1237 BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit); 1242 brcms_dbg_info(wl->wlc->hw->d11core, "Resetting wl%d\n", wl->pub->unit);
1238 brcms_c_reset(wl->wlc); 1243 brcms_c_reset(wl->wlc);
1239 1244
1240 /* dpc will not be rescheduled */ 1245 /* dpc will not be rescheduled */
@@ -1248,7 +1253,7 @@ uint brcms_reset(struct brcms_info *wl)
1248 1253
1249void brcms_fatal_error(struct brcms_info *wl) 1254void brcms_fatal_error(struct brcms_info *wl)
1250{ 1255{
1251 wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n", 1256 brcms_err(wl->wlc->hw->d11core, "wl%d: fatal error, reinitializing\n",
1252 wl->wlc->pub->unit); 1257 wl->wlc->pub->unit);
1253 brcms_reset(wl); 1258 brcms_reset(wl);
1254 ieee80211_restart_hw(wl->pub->ieee_hw); 1259 ieee80211_restart_hw(wl->pub->ieee_hw);
@@ -1396,8 +1401,9 @@ void brcms_add_timer(struct brcms_timer *t, uint ms, int periodic)
1396 1401
1397#ifdef DEBUG 1402#ifdef DEBUG
1398 if (t->set) 1403 if (t->set)
1399 wiphy_err(hw->wiphy, "%s: Already set. Name: %s, per %d\n", 1404 brcms_dbg_info(t->wl->wlc->hw->d11core,
1400 __func__, t->name, periodic); 1405 "%s: Already set. Name: %s, per %d\n",
1406 __func__, t->name, periodic);
1401#endif 1407#endif
1402 t->ms = ms; 1408 t->ms = ms;
1403 t->periodic = (bool) periodic; 1409 t->periodic = (bool) periodic;
@@ -1486,8 +1492,8 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
1486 } 1492 }
1487 } 1493 }
1488 } 1494 }
1489 wiphy_err(wl->wiphy, "ERROR: ucode buf tag:%d can not be found!\n", 1495 brcms_err(wl->wlc->hw->d11core,
1490 idx); 1496 "ERROR: ucode buf tag:%d can not be found!\n", idx);
1491 *pbuf = NULL; 1497 *pbuf = NULL;
1492fail: 1498fail:
1493 return -ENODATA; 1499 return -ENODATA;
@@ -1510,7 +1516,7 @@ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
1510 pdata = wl->fw.fw_bin[i]->data + 1516 pdata = wl->fw.fw_bin[i]->data +
1511 le32_to_cpu(hdr->offset); 1517 le32_to_cpu(hdr->offset);
1512 if (le32_to_cpu(hdr->len) != 4) { 1518 if (le32_to_cpu(hdr->len) != 4) {
1513 wiphy_err(wl->wiphy, 1519 brcms_err(wl->wlc->hw->d11core,
1514 "ERROR: fw hdr len\n"); 1520 "ERROR: fw hdr len\n");
1515 return -ENOMSG; 1521 return -ENOMSG;
1516 } 1522 }
@@ -1519,7 +1525,8 @@ int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
1519 } 1525 }
1520 } 1526 }
1521 } 1527 }
1522 wiphy_err(wl->wiphy, "ERROR: ucode tag:%d can not be found!\n", idx); 1528 brcms_err(wl->wlc->hw->d11core,
1529 "ERROR: ucode tag:%d can not be found!\n", idx);
1523 return -ENOMSG; 1530 return -ENOMSG;
1524} 1531}
1525 1532
@@ -1560,8 +1567,8 @@ int brcms_check_firmwares(struct brcms_info *wl)
1560 sizeof(struct firmware_hdr)); 1567 sizeof(struct firmware_hdr));
1561 rc = -EBADF; 1568 rc = -EBADF;
1562 } else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) { 1569 } else if (fw->size < MIN_FW_SIZE || fw->size > MAX_FW_SIZE) {
1563 wiphy_err(wl->wiphy, "%s: out of bounds fw file size " 1570 wiphy_err(wl->wiphy, "%s: out of bounds fw file size %zu\n",
1564 "%zu\n", __func__, fw->size); 1571 __func__, fw->size);
1565 rc = -EBADF; 1572 rc = -EBADF;
1566 } else { 1573 } else {
1567 /* check if ucode section overruns firmware image */ 1574 /* check if ucode section overruns firmware image */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 75086b37c817..17594de4199e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -34,12 +34,9 @@
34#include "ucode_loader.h" 34#include "ucode_loader.h"
35#include "main.h" 35#include "main.h"
36#include "soc.h" 36#include "soc.h"
37 37#include "dma.h"
38/* 38#include "debug.h"
39 * Indication for txflowcontrol that all priority bits in 39#include "brcms_trace_events.h"
40 * TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
41 */
42#define ALLPRIO -1
43 40
44/* watchdog timer, in unit of ms */ 41/* watchdog timer, in unit of ms */
45#define TIMER_INTERVAL_WATCHDOG 1000 42#define TIMER_INTERVAL_WATCHDOG 1000
@@ -126,21 +123,6 @@
126 123
127#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */ 124#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
128 125
129/* precedences numbers for wlc queues. These are twice as may levels as
130 * 802.1D priorities.
131 * Odd numbers are used for HI priority traffic at same precedence levels
132 * These constants are used ONLY by wlc_prio2prec_map. Do not use them
133 * elsewhere.
134 */
135#define _BRCMS_PREC_NONE 0 /* None = - */
136#define _BRCMS_PREC_BK 2 /* BK - Background */
137#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
138#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
139#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
140#define _BRCMS_PREC_VI 10 /* Vi - Video */
141#define _BRCMS_PREC_VO 12 /* Vo - Voice */
142#define _BRCMS_PREC_NC 14 /* NC - Network Control */
143
144/* synthpu_dly times in us */ 126/* synthpu_dly times in us */
145#define SYNTHPU_DLY_APHY_US 3700 127#define SYNTHPU_DLY_APHY_US 3700
146#define SYNTHPU_DLY_BPHY_US 1050 128#define SYNTHPU_DLY_BPHY_US 1050
@@ -237,17 +219,17 @@
237 219
238#define MAX_DMA_SEGS 4 220#define MAX_DMA_SEGS 4
239 221
240/* Max # of entries in Tx FIFO based on 4kb page size */ 222/* # of entries in Tx FIFO */
241#define NTXD 256 223#define NTXD 64
242/* Max # of entries in Rx FIFO based on 4kb page size */ 224/* Max # of entries in Rx FIFO based on 4kb page size */
243#define NRXD 256 225#define NRXD 256
244 226
227/* Amount of headroom to leave in Tx FIFO */
228#define TX_HEADROOM 4
229
245/* try to keep this # rbufs posted to the chip */ 230/* try to keep this # rbufs posted to the chip */
246#define NRXBUFPOST 32 231#define NRXBUFPOST 32
247 232
248/* data msg txq hiwat mark */
249#define BRCMS_DATAHIWAT 50
250
251/* max # frames to process in brcms_c_recv() */ 233/* max # frames to process in brcms_c_recv() */
252#define RXBND 8 234#define RXBND 8
253/* max # tx status to process in wlc_txstatus() */ 235/* max # tx status to process in wlc_txstatus() */
@@ -283,24 +265,8 @@ struct edcf_acparam {
283 u16 TXOP; 265 u16 TXOP;
284} __packed; 266} __packed;
285 267
286const u8 prio2fifo[NUMPRIO] = {
287 TX_AC_BE_FIFO, /* 0 BE AC_BE Best Effort */
288 TX_AC_BK_FIFO, /* 1 BK AC_BK Background */
289 TX_AC_BK_FIFO, /* 2 -- AC_BK Background */
290 TX_AC_BE_FIFO, /* 3 EE AC_BE Best Effort */
291 TX_AC_VI_FIFO, /* 4 CL AC_VI Video */
292 TX_AC_VI_FIFO, /* 5 VI AC_VI Video */
293 TX_AC_VO_FIFO, /* 6 VO AC_VO Voice */
294 TX_AC_VO_FIFO /* 7 NC AC_VO Voice */
295};
296
297/* debug/trace */ 268/* debug/trace */
298uint brcm_msg_level = 269uint brcm_msg_level;
299#if defined(DEBUG)
300 LOG_ERROR_VAL;
301#else
302 0;
303#endif /* DEBUG */
304 270
305/* TX FIFO number to WME/802.1E Access Category */ 271/* TX FIFO number to WME/802.1E Access Category */
306static const u8 wme_fifo2ac[] = { 272static const u8 wme_fifo2ac[] = {
@@ -320,18 +286,6 @@ static const u8 wme_ac2fifo[] = {
320 TX_AC_BK_FIFO 286 TX_AC_BK_FIFO
321}; 287};
322 288
323/* 802.1D Priority to precedence queue mapping */
324const u8 wlc_prio2prec_map[] = {
325 _BRCMS_PREC_BE, /* 0 BE - Best-effort */
326 _BRCMS_PREC_BK, /* 1 BK - Background */
327 _BRCMS_PREC_NONE, /* 2 None = - */
328 _BRCMS_PREC_EE, /* 3 EE - Excellent-effort */
329 _BRCMS_PREC_CL, /* 4 CL - Controlled Load */
330 _BRCMS_PREC_VI, /* 5 Vi - Video */
331 _BRCMS_PREC_VO, /* 6 Vo - Voice */
332 _BRCMS_PREC_NC, /* 7 NC - Network Control */
333};
334
335static const u16 xmtfifo_sz[][NFIFO] = { 289static const u16 xmtfifo_sz[][NFIFO] = {
336 /* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */ 290 /* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */
337 {20, 192, 192, 21, 17, 5}, 291 {20, 192, 192, 21, 17, 5},
@@ -371,6 +325,36 @@ static const char fifo_names[6][0];
371static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL); 325static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
372#endif 326#endif
373 327
328/* Mapping of ieee80211 AC numbers to tx fifos */
329static const u8 ac_to_fifo_mapping[IEEE80211_NUM_ACS] = {
330 [IEEE80211_AC_VO] = TX_AC_VO_FIFO,
331 [IEEE80211_AC_VI] = TX_AC_VI_FIFO,
332 [IEEE80211_AC_BE] = TX_AC_BE_FIFO,
333 [IEEE80211_AC_BK] = TX_AC_BK_FIFO,
334};
335
336/* Mapping of tx fifos to ieee80211 AC numbers */
337static const u8 fifo_to_ac_mapping[IEEE80211_NUM_ACS] = {
338 [TX_AC_BK_FIFO] = IEEE80211_AC_BK,
339 [TX_AC_BE_FIFO] = IEEE80211_AC_BE,
340 [TX_AC_VI_FIFO] = IEEE80211_AC_VI,
341 [TX_AC_VO_FIFO] = IEEE80211_AC_VO,
342};
343
344static u8 brcms_ac_to_fifo(u8 ac)
345{
346 if (ac >= ARRAY_SIZE(ac_to_fifo_mapping))
347 return TX_AC_BE_FIFO;
348 return ac_to_fifo_mapping[ac];
349}
350
351static u8 brcms_fifo_to_ac(u8 fifo)
352{
353 if (fifo >= ARRAY_SIZE(fifo_to_ac_mapping))
354 return IEEE80211_AC_BE;
355 return fifo_to_ac_mapping[fifo];
356}
357
374/* Find basic rate for a given rate */ 358/* Find basic rate for a given rate */
375static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec) 359static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
376{ 360{
@@ -415,10 +399,15 @@ static bool brcms_deviceremoved(struct brcms_c_info *wlc)
415} 399}
416 400
417/* sum the individual fifo tx pending packet counts */ 401/* sum the individual fifo tx pending packet counts */
418static s16 brcms_txpktpendtot(struct brcms_c_info *wlc) 402static int brcms_txpktpendtot(struct brcms_c_info *wlc)
419{ 403{
420 return wlc->core->txpktpend[0] + wlc->core->txpktpend[1] + 404 int i;
421 wlc->core->txpktpend[2] + wlc->core->txpktpend[3]; 405 int pending = 0;
406
407 for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
408 if (wlc->hw->di[i])
409 pending += dma_txpending(wlc->hw->di[i]);
410 return pending;
422} 411}
423 412
424static bool brcms_is_mband_unlocked(struct brcms_c_info *wlc) 413static bool brcms_is_mband_unlocked(struct brcms_c_info *wlc)
@@ -626,14 +615,11 @@ static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
626 uint rate = rspec2rate(ratespec); 615 uint rate = rspec2rate(ratespec);
627 616
628 if (rate == 0) { 617 if (rate == 0) {
629 wiphy_err(wlc->wiphy, "wl%d: WAR: using rate of 1 mbps\n", 618 brcms_err(wlc->hw->d11core, "wl%d: WAR: using rate of 1 mbps\n",
630 wlc->pub->unit); 619 wlc->pub->unit);
631 rate = BRCM_RATE_1M; 620 rate = BRCM_RATE_1M;
632 } 621 }
633 622
634 BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, len%d\n",
635 wlc->pub->unit, ratespec, preamble_type, mac_len);
636
637 if (is_mcs_rate(ratespec)) { 623 if (is_mcs_rate(ratespec)) {
638 uint mcs = ratespec & RSPEC_RATE_MASK; 624 uint mcs = ratespec & RSPEC_RATE_MASK;
639 int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec); 625 int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
@@ -696,7 +682,7 @@ static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
696 u16 size; 682 u16 size;
697 u32 value; 683 u32 value;
698 684
699 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 685 brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
700 686
701 for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) { 687 for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
702 size = le16_to_cpu(inits[i].size); 688 size = le16_to_cpu(inits[i].size);
@@ -725,19 +711,19 @@ static void brcms_c_write_mhf(struct brcms_hardware *wlc_hw, u16 *mhfs)
725 711
726static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw) 712static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
727{ 713{
728 struct wiphy *wiphy = wlc_hw->wlc->wiphy;
729 struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode; 714 struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
730 715
731 /* init microcode host flags */ 716 /* init microcode host flags */
732 brcms_c_write_mhf(wlc_hw, wlc_hw->band->mhfs); 717 brcms_c_write_mhf(wlc_hw, wlc_hw->band->mhfs);
733 718
734 /* do band-specific ucode IHR, SHM, and SCR inits */ 719 /* do band-specific ucode IHR, SHM, and SCR inits */
735 if (D11REV_IS(wlc_hw->corerev, 23)) { 720 if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
736 if (BRCMS_ISNPHY(wlc_hw->band)) 721 if (BRCMS_ISNPHY(wlc_hw->band))
737 brcms_c_write_inits(wlc_hw, ucode->d11n0bsinitvals16); 722 brcms_c_write_inits(wlc_hw, ucode->d11n0bsinitvals16);
738 else 723 else
739 wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev" 724 brcms_err(wlc_hw->d11core,
740 " %d\n", __func__, wlc_hw->unit, 725 "%s: wl%d: unsupported phy in corerev %d\n",
726 __func__, wlc_hw->unit,
741 wlc_hw->corerev); 727 wlc_hw->corerev);
742 } else { 728 } else {
743 if (D11REV_IS(wlc_hw->corerev, 24)) { 729 if (D11REV_IS(wlc_hw->corerev, 24)) {
@@ -745,12 +731,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
745 brcms_c_write_inits(wlc_hw, 731 brcms_c_write_inits(wlc_hw,
746 ucode->d11lcn0bsinitvals24); 732 ucode->d11lcn0bsinitvals24);
747 else 733 else
748 wiphy_err(wiphy, "%s: wl%d: unsupported phy in" 734 brcms_err(wlc_hw->d11core,
749 " core rev %d\n", __func__, 735 "%s: wl%d: unsupported phy in core rev %d\n",
750 wlc_hw->unit, wlc_hw->corerev); 736 __func__, wlc_hw->unit,
737 wlc_hw->corerev);
751 } else { 738 } else {
752 wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n", 739 brcms_err(wlc_hw->d11core,
753 __func__, wlc_hw->unit, wlc_hw->corerev); 740 "%s: wl%d: unsupported corerev %d\n",
741 __func__, wlc_hw->unit, wlc_hw->corerev);
754 } 742 }
755 } 743 }
756} 744}
@@ -765,7 +753,7 @@ static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
765 753
766static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk) 754static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
767{ 755{
768 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk); 756 brcms_dbg_info(wlc_hw->d11core, "wl%d: clk %d\n", wlc_hw->unit, clk);
769 757
770 wlc_hw->phyclk = clk; 758 wlc_hw->phyclk = clk;
771 759
@@ -790,8 +778,8 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
790/* low-level band switch utility routine */ 778/* low-level band switch utility routine */
791static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit) 779static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
792{ 780{
793 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, 781 brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: bandunit %d\n", wlc_hw->unit,
794 bandunit); 782 bandunit);
795 783
796 wlc_hw->band = wlc_hw->bandstate[bandunit]; 784 wlc_hw->band = wlc_hw->bandstate[bandunit];
797 785
@@ -819,7 +807,7 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
819 u32 macintmask; 807 u32 macintmask;
820 u32 macctrl; 808 u32 macctrl;
821 809
822 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); 810 brcms_dbg_mac80211(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
823 macctrl = bcma_read32(wlc_hw->d11core, 811 macctrl = bcma_read32(wlc_hw->d11core,
824 D11REGOFFS(maccontrol)); 812 D11REGOFFS(maccontrol));
825 WARN_ON((macctrl & MCTL_EN_MAC) != 0); 813 WARN_ON((macctrl & MCTL_EN_MAC) != 0);
@@ -841,9 +829,10 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
841static bool 829static bool
842brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) 830brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
843{ 831{
844 struct sk_buff *p; 832 struct sk_buff *p = NULL;
845 uint queue; 833 uint queue = NFIFO;
846 struct d11txh *txh; 834 struct dma_pub *dma = NULL;
835 struct d11txh *txh = NULL;
847 struct scb *scb = NULL; 836 struct scb *scb = NULL;
848 bool free_pdu; 837 bool free_pdu;
849 int tx_rts, tx_frame_count, tx_rts_count; 838 int tx_rts, tx_frame_count, tx_rts_count;
@@ -854,6 +843,11 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
854 struct ieee80211_tx_info *tx_info; 843 struct ieee80211_tx_info *tx_info;
855 struct ieee80211_tx_rate *txrate; 844 struct ieee80211_tx_rate *txrate;
856 int i; 845 int i;
846 bool fatal = true;
847
848 trace_brcms_txstatus(&wlc->hw->d11core->dev, txs->framelen,
849 txs->frameid, txs->status, txs->lasttxtime,
850 txs->sequence, txs->phyerr, txs->ackphyrxsh);
857 851
858 /* discard intermediate indications for ucode with one legitimate case: 852 /* discard intermediate indications for ucode with one legitimate case:
859 * e.g. if "useRTS" is set. ucode did a successful rts/cts exchange, 853 * e.g. if "useRTS" is set. ucode did a successful rts/cts exchange,
@@ -862,34 +856,36 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
862 */ 856 */
863 if (!(txs->status & TX_STATUS_AMPDU) 857 if (!(txs->status & TX_STATUS_AMPDU)
864 && (txs->status & TX_STATUS_INTERMEDIATE)) { 858 && (txs->status & TX_STATUS_INTERMEDIATE)) {
865 BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n"); 859 brcms_dbg_tx(wlc->hw->d11core, "INTERMEDIATE but not AMPDU\n");
866 return false; 860 fatal = false;
861 goto out;
867 } 862 }
868 863
869 queue = txs->frameid & TXFID_QUEUE_MASK; 864 queue = txs->frameid & TXFID_QUEUE_MASK;
870 if (queue >= NFIFO) { 865 if (queue >= NFIFO) {
871 p = NULL; 866 brcms_err(wlc->hw->d11core, "queue %u >= NFIFO\n", queue);
872 goto fatal; 867 goto out;
873 } 868 }
874 869
870 dma = wlc->hw->di[queue];
871
875 p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED); 872 p = dma_getnexttxp(wlc->hw->di[queue], DMA_RANGE_TRANSMITTED);
876 if (p == NULL) 873 if (p == NULL) {
877 goto fatal; 874 brcms_err(wlc->hw->d11core, "dma_getnexttxp returned null!\n");
875 goto out;
876 }
878 877
879 txh = (struct d11txh *) (p->data); 878 txh = (struct d11txh *) (p->data);
880 mcl = le16_to_cpu(txh->MacTxControlLow); 879 mcl = le16_to_cpu(txh->MacTxControlLow);
881 880
882 if (txs->phyerr) { 881 if (txs->phyerr)
883 if (brcm_msg_level & LOG_ERROR_VAL) { 882 brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
884 wiphy_err(wlc->wiphy, "phyerr 0x%x, rate 0x%x\n", 883 txs->phyerr, txh->MainRates);
885 txs->phyerr, txh->MainRates);
886 brcms_c_print_txdesc(txh);
887 }
888 brcms_c_print_txstatus(txs);
889 }
890 884
891 if (txs->frameid != le16_to_cpu(txh->TxFrameID)) 885 if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
892 goto fatal; 886 brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
887 goto out;
888 }
893 tx_info = IEEE80211_SKB_CB(p); 889 tx_info = IEEE80211_SKB_CB(p);
894 h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN); 890 h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
895 891
@@ -898,14 +894,24 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
898 894
899 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 895 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
900 brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs); 896 brcms_c_ampdu_dotxstatus(wlc->ampdu, scb, p, txs);
901 return false; 897 fatal = false;
898 goto out;
902 } 899 }
903 900
901 /*
902 * brcms_c_ampdu_dotxstatus() will trace tx descriptors for AMPDU
903 * frames; this traces them for the rest.
904 */
905 trace_brcms_txdesc(&wlc->hw->d11core->dev, txh, sizeof(*txh));
906
904 supr_status = txs->status & TX_STATUS_SUPR_MASK; 907 supr_status = txs->status & TX_STATUS_SUPR_MASK;
905 if (supr_status == TX_STATUS_SUPR_BADCH) 908 if (supr_status == TX_STATUS_SUPR_BADCH) {
906 BCMMSG(wlc->wiphy, 909 unsigned xfts = le16_to_cpu(txh->XtraFrameTypes);
907 "%s: Pkt tx suppressed, possibly channel %d\n", 910 brcms_dbg_tx(wlc->hw->d11core,
908 __func__, CHSPEC_CHANNEL(wlc->default_bss->chanspec)); 911 "Pkt tx suppressed, dest chan %u, current %d\n",
912 (xfts >> XFTS_CHANNEL_SHIFT) & 0xff,
913 CHSPEC_CHANNEL(wlc->default_bss->chanspec));
914 }
909 915
910 tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS; 916 tx_rts = le16_to_cpu(txh->MacTxControlLow) & TXC_SENDRTS;
911 tx_frame_count = 917 tx_frame_count =
@@ -916,7 +922,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
916 lastframe = !ieee80211_has_morefrags(h->frame_control); 922 lastframe = !ieee80211_has_morefrags(h->frame_control);
917 923
918 if (!lastframe) { 924 if (!lastframe) {
919 wiphy_err(wlc->wiphy, "Not last frame!\n"); 925 brcms_err(wlc->hw->d11core, "Not last frame!\n");
920 } else { 926 } else {
921 /* 927 /*
922 * Set information to be consumed by Minstrel ht. 928 * Set information to be consumed by Minstrel ht.
@@ -982,26 +988,37 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
982 totlen = p->len; 988 totlen = p->len;
983 free_pdu = true; 989 free_pdu = true;
984 990
985 brcms_c_txfifo_complete(wlc, queue, 1);
986
987 if (lastframe) { 991 if (lastframe) {
988 /* remove PLCP & Broadcom tx descriptor header */ 992 /* remove PLCP & Broadcom tx descriptor header */
989 skb_pull(p, D11_PHY_HDR_LEN); 993 skb_pull(p, D11_PHY_HDR_LEN);
990 skb_pull(p, D11_TXH_LEN); 994 skb_pull(p, D11_TXH_LEN);
991 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p); 995 ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, p);
992 } else { 996 } else {
993 wiphy_err(wlc->wiphy, "%s: Not last frame => not calling " 997 brcms_err(wlc->hw->d11core,
994 "tx_status\n", __func__); 998 "%s: Not last frame => not calling tx_status\n",
999 __func__);
995 } 1000 }
996 1001
997 return false; 1002 fatal = false;
998 1003
999 fatal: 1004 out:
1000 if (p) 1005 if (fatal) {
1001 brcmu_pkt_buf_free_skb(p); 1006 if (txh)
1007 trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
1008 sizeof(*txh));
1009 if (p)
1010 brcmu_pkt_buf_free_skb(p);
1011 }
1002 1012
1003 return true; 1013 if (dma && queue < NFIFO) {
1014 u16 ac_queue = brcms_fifo_to_ac(queue);
1015 if (dma->txavail > TX_HEADROOM && queue < TX_BCMC_FIFO &&
1016 ieee80211_queue_stopped(wlc->pub->ieee_hw, ac_queue))
1017 ieee80211_wake_queue(wlc->pub->ieee_hw, ac_queue);
1018 dma_kick_tx(dma);
1019 }
1004 1020
1021 return fatal;
1005} 1022}
1006 1023
1007/* process tx completion events in BMAC 1024/* process tx completion events in BMAC
@@ -1011,7 +1028,6 @@ static bool
1011brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) 1028brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1012{ 1029{
1013 bool morepending = false; 1030 bool morepending = false;
1014 struct brcms_c_info *wlc = wlc_hw->wlc;
1015 struct bcma_device *core; 1031 struct bcma_device *core;
1016 struct tx_status txstatus, *txs; 1032 struct tx_status txstatus, *txs;
1017 u32 s1, s2; 1033 u32 s1, s2;
@@ -1022,19 +1038,23 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1022 */ 1038 */
1023 uint max_tx_num = bound ? TXSBND : -1; 1039 uint max_tx_num = bound ? TXSBND : -1;
1024 1040
1025 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
1026
1027 txs = &txstatus; 1041 txs = &txstatus;
1028 core = wlc_hw->d11core; 1042 core = wlc_hw->d11core;
1029 *fatal = false; 1043 *fatal = false;
1030 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1044 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
1031 while (!(*fatal) 1045 while (!(*fatal)
1032 && (s1 & TXS_V)) { 1046 && (s1 & TXS_V)) {
1047 /* !give others some time to run! */
1048 if (n >= max_tx_num) {
1049 morepending = true;
1050 break;
1051 }
1033 1052
1034 if (s1 == 0xffffffff) { 1053 if (s1 == 0xffffffff) {
1035 wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", 1054 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
1036 wlc_hw->unit, __func__); 1055 __func__);
1037 return morepending; 1056 *fatal = true;
1057 return false;
1038 } 1058 }
1039 s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); 1059 s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
1040 1060
@@ -1046,20 +1066,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
1046 1066
1047 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); 1067 *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs);
1048 1068
1049 /* !give others some time to run! */
1050 if (++n >= max_tx_num)
1051 break;
1052 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); 1069 s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
1070 n++;
1053 } 1071 }
1054 1072
1055 if (*fatal) 1073 if (*fatal)
1056 return 0; 1074 return false;
1057
1058 if (n >= max_tx_num)
1059 morepending = true;
1060
1061 if (!pktq_empty(&wlc->pkt_queue->q))
1062 brcms_c_send_q(wlc);
1063 1075
1064 return morepending; 1076 return morepending;
1065} 1077}
@@ -1112,7 +1124,6 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
1112 u16 pio_mhf2 = 0; 1124 u16 pio_mhf2 = 0;
1113 struct brcms_hardware *wlc_hw = wlc->hw; 1125 struct brcms_hardware *wlc_hw = wlc->hw;
1114 uint unit = wlc_hw->unit; 1126 uint unit = wlc_hw->unit;
1115 struct wiphy *wiphy = wlc->wiphy;
1116 1127
1117 /* name and offsets for dma_attach */ 1128 /* name and offsets for dma_attach */
1118 snprintf(name, sizeof(name), "wl%d", unit); 1129 snprintf(name, sizeof(name), "wl%d", unit);
@@ -1125,12 +1136,12 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
1125 * TX: TX_AC_BK_FIFO (TX AC Background data packets) 1136 * TX: TX_AC_BK_FIFO (TX AC Background data packets)
1126 * RX: RX_FIFO (RX data packets) 1137 * RX: RX_FIFO (RX data packets)
1127 */ 1138 */
1128 wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, 1139 wlc_hw->di[0] = dma_attach(name, wlc,
1129 (wme ? dmareg(DMA_TX, 0) : 0), 1140 (wme ? dmareg(DMA_TX, 0) : 0),
1130 dmareg(DMA_RX, 0), 1141 dmareg(DMA_RX, 0),
1131 (wme ? NTXD : 0), NRXD, 1142 (wme ? NTXD : 0), NRXD,
1132 RXBUFSZ, -1, NRXBUFPOST, 1143 RXBUFSZ, -1, NRXBUFPOST,
1133 BRCMS_HWRXOFF, &brcm_msg_level); 1144 BRCMS_HWRXOFF);
1134 dma_attach_err |= (NULL == wlc_hw->di[0]); 1145 dma_attach_err |= (NULL == wlc_hw->di[0]);
1135 1146
1136 /* 1147 /*
@@ -1139,10 +1150,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
1139 * (legacy) TX_DATA_FIFO (TX data packets) 1150 * (legacy) TX_DATA_FIFO (TX data packets)
1140 * RX: UNUSED 1151 * RX: UNUSED
1141 */ 1152 */
1142 wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, 1153 wlc_hw->di[1] = dma_attach(name, wlc,
1143 dmareg(DMA_TX, 1), 0, 1154 dmareg(DMA_TX, 1), 0,
1144 NTXD, 0, 0, -1, 0, 0, 1155 NTXD, 0, 0, -1, 0, 0);
1145 &brcm_msg_level);
1146 dma_attach_err |= (NULL == wlc_hw->di[1]); 1156 dma_attach_err |= (NULL == wlc_hw->di[1]);
1147 1157
1148 /* 1158 /*
@@ -1150,26 +1160,26 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
1150 * TX: TX_AC_VI_FIFO (TX AC Video data packets) 1160 * TX: TX_AC_VI_FIFO (TX AC Video data packets)
1151 * RX: UNUSED 1161 * RX: UNUSED
1152 */ 1162 */
1153 wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, 1163 wlc_hw->di[2] = dma_attach(name, wlc,
1154 dmareg(DMA_TX, 2), 0, 1164 dmareg(DMA_TX, 2), 0,
1155 NTXD, 0, 0, -1, 0, 0, 1165 NTXD, 0, 0, -1, 0, 0);
1156 &brcm_msg_level);
1157 dma_attach_err |= (NULL == wlc_hw->di[2]); 1166 dma_attach_err |= (NULL == wlc_hw->di[2]);
1158 /* 1167 /*
1159 * FIFO 3 1168 * FIFO 3
1160 * TX: TX_AC_VO_FIFO (TX AC Voice data packets) 1169 * TX: TX_AC_VO_FIFO (TX AC Voice data packets)
1161 * (legacy) TX_CTL_FIFO (TX control & mgmt packets) 1170 * (legacy) TX_CTL_FIFO (TX control & mgmt packets)
1162 */ 1171 */
1163 wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core, 1172 wlc_hw->di[3] = dma_attach(name, wlc,
1164 dmareg(DMA_TX, 3), 1173 dmareg(DMA_TX, 3),
1165 0, NTXD, 0, 0, -1, 1174 0, NTXD, 0, 0, -1,
1166 0, 0, &brcm_msg_level); 1175 0, 0);
1167 dma_attach_err |= (NULL == wlc_hw->di[3]); 1176 dma_attach_err |= (NULL == wlc_hw->di[3]);
1168/* Cleaner to leave this as if with AP defined */ 1177/* Cleaner to leave this as if with AP defined */
1169 1178
1170 if (dma_attach_err) { 1179 if (dma_attach_err) {
1171 wiphy_err(wiphy, "wl%d: wlc_attach: dma_attach failed" 1180 brcms_err(wlc_hw->d11core,
1172 "\n", unit); 1181 "wl%d: wlc_attach: dma_attach failed\n",
1182 unit);
1173 return false; 1183 return false;
1174 } 1184 }
1175 1185
@@ -1503,8 +1513,7 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
1503 u16 mac_m; 1513 u16 mac_m;
1504 u16 mac_h; 1514 u16 mac_h;
1505 1515
1506 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n", 1516 brcms_dbg_rx(core, "wl%d: brcms_b_set_addrmatch\n", wlc_hw->unit);
1507 wlc_hw->unit);
1508 1517
1509 mac_l = addr[0] | (addr[1] << 8); 1518 mac_l = addr[0] | (addr[1] << 8);
1510 mac_m = addr[2] | (addr[3] << 8); 1519 mac_m = addr[2] | (addr[3] << 8);
@@ -1527,7 +1536,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
1527 __le32 word_le; 1536 __le32 word_le;
1528 __be32 word_be; 1537 __be32 word_be;
1529 bool be_bit; 1538 bool be_bit;
1530 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 1539 brcms_dbg_info(core, "wl%d\n", wlc_hw->unit);
1531 1540
1532 bcma_write32(core, D11REGOFFS(tplatewrptr), offset); 1541 bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
1533 1542
@@ -1700,8 +1709,8 @@ static void brcms_b_bsinit(struct brcms_c_info *wlc, u16 chanspec)
1700{ 1709{
1701 struct brcms_hardware *wlc_hw = wlc->hw; 1710 struct brcms_hardware *wlc_hw = wlc->hw;
1702 1711
1703 BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, 1712 brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: bandunit %d\n", wlc_hw->unit,
1704 wlc_hw->band->bandunit); 1713 wlc_hw->band->bandunit);
1705 1714
1706 brcms_c_ucode_bsinit(wlc_hw); 1715 brcms_c_ucode_bsinit(wlc_hw);
1707 1716
@@ -1736,8 +1745,6 @@ static void brcms_b_bsinit(struct brcms_c_info *wlc, u16 chanspec)
1736/* Perform a soft reset of the PHY PLL */ 1745/* Perform a soft reset of the PHY PLL */
1737void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw) 1746void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
1738{ 1747{
1739 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
1740
1741 ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr), 1748 ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
1742 ~0, 0); 1749 ~0, 0);
1743 udelay(1); 1750 udelay(1);
@@ -1782,7 +1789,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
1782 u32 phy_bw_clkbits; 1789 u32 phy_bw_clkbits;
1783 bool phy_in_reset = false; 1790 bool phy_in_reset = false;
1784 1791
1785 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 1792 brcms_dbg_info(wlc_hw->d11core, "wl%d: reset phy\n", wlc_hw->unit);
1786 1793
1787 if (pih == NULL) 1794 if (pih == NULL)
1788 return; 1795 return;
@@ -1916,7 +1923,7 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
1916/* power both the pll and external oscillator on/off */ 1923/* power both the pll and external oscillator on/off */
1917static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want) 1924static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
1918{ 1925{
1919 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: want %d\n", wlc_hw->unit, want); 1926 brcms_dbg_info(wlc_hw->d11core, "wl%d: want %d\n", wlc_hw->unit, want);
1920 1927
1921 /* 1928 /*
1922 * dont power down if plldown is false or 1929 * dont power down if plldown is false or
@@ -2005,7 +2012,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
2005 if (flags == BRCMS_USE_COREFLAGS) 2012 if (flags == BRCMS_USE_COREFLAGS)
2006 flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0); 2013 flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
2007 2014
2008 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 2015 brcms_dbg_info(wlc_hw->d11core, "wl%d: core reset\n", wlc_hw->unit);
2009 2016
2010 /* request FAST clock if not on */ 2017 /* request FAST clock if not on */
2011 fastclk = wlc_hw->forcefastclk; 2018 fastclk = wlc_hw->forcefastclk;
@@ -2016,13 +2023,13 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
2016 if (bcma_core_is_enabled(wlc_hw->d11core)) { 2023 if (bcma_core_is_enabled(wlc_hw->d11core)) {
2017 for (i = 0; i < NFIFO; i++) 2024 for (i = 0; i < NFIFO; i++)
2018 if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i]))) 2025 if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
2019 wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: " 2026 brcms_err(wlc_hw->d11core, "wl%d: %s: "
2020 "dma_txreset[%d]: cannot stop dma\n", 2027 "dma_txreset[%d]: cannot stop dma\n",
2021 wlc_hw->unit, __func__, i); 2028 wlc_hw->unit, __func__, i);
2022 2029
2023 if ((wlc_hw->di[RX_FIFO]) 2030 if ((wlc_hw->di[RX_FIFO])
2024 && (!wlc_dma_rxreset(wlc_hw, RX_FIFO))) 2031 && (!wlc_dma_rxreset(wlc_hw, RX_FIFO)))
2025 wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: dma_rxreset" 2032 brcms_err(wlc_hw->d11core, "wl%d: %s: dma_rxreset"
2026 "[%d]: cannot stop dma\n", 2033 "[%d]: cannot stop dma\n",
2027 wlc_hw->unit, __func__, RX_FIFO); 2034 wlc_hw->unit, __func__, RX_FIFO);
2028 } 2035 }
@@ -2235,7 +2242,7 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
2235 uint i; 2242 uint i;
2236 uint count; 2243 uint count;
2237 2244
2238 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 2245 brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
2239 2246
2240 count = (nbytes / sizeof(u32)); 2247 count = (nbytes / sizeof(u32));
2241 2248
@@ -2257,14 +2264,14 @@ static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
2257 if (wlc_hw->ucode_loaded) 2264 if (wlc_hw->ucode_loaded)
2258 return; 2265 return;
2259 2266
2260 if (D11REV_IS(wlc_hw->corerev, 23)) { 2267 if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
2261 if (BRCMS_ISNPHY(wlc_hw->band)) { 2268 if (BRCMS_ISNPHY(wlc_hw->band)) {
2262 brcms_ucode_write(wlc_hw, ucode->bcm43xx_16_mimo, 2269 brcms_ucode_write(wlc_hw, ucode->bcm43xx_16_mimo,
2263 ucode->bcm43xx_16_mimosz); 2270 ucode->bcm43xx_16_mimosz);
2264 wlc_hw->ucode_loaded = true; 2271 wlc_hw->ucode_loaded = true;
2265 } else 2272 } else
2266 wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in " 2273 brcms_err(wlc_hw->d11core,
2267 "corerev %d\n", 2274 "%s: wl%d: unsupported phy in corerev %d\n",
2268 __func__, wlc_hw->unit, wlc_hw->corerev); 2275 __func__, wlc_hw->unit, wlc_hw->corerev);
2269 } else if (D11REV_IS(wlc_hw->corerev, 24)) { 2276 } else if (D11REV_IS(wlc_hw->corerev, 24)) {
2270 if (BRCMS_ISLCNPHY(wlc_hw->band)) { 2277 if (BRCMS_ISLCNPHY(wlc_hw->band)) {
@@ -2272,8 +2279,8 @@ static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
2272 ucode->bcm43xx_24_lcnsz); 2279 ucode->bcm43xx_24_lcnsz);
2273 wlc_hw->ucode_loaded = true; 2280 wlc_hw->ucode_loaded = true;
2274 } else { 2281 } else {
2275 wiphy_err(wlc->wiphy, "%s: wl%d: unsupported phy in " 2282 brcms_err(wlc_hw->d11core,
2276 "corerev %d\n", 2283 "%s: wl%d: unsupported phy in corerev %d\n",
2277 __func__, wlc_hw->unit, wlc_hw->corerev); 2284 __func__, wlc_hw->unit, wlc_hw->corerev);
2278 } 2285 }
2279 } 2286 }
@@ -2310,7 +2317,6 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
2310 uint unit; 2317 uint unit;
2311 uint intstatus, idx; 2318 uint intstatus, idx;
2312 struct bcma_device *core = wlc_hw->d11core; 2319 struct bcma_device *core = wlc_hw->d11core;
2313 struct wiphy *wiphy = wlc_hw->wlc->wiphy;
2314 2320
2315 unit = wlc_hw->unit; 2321 unit = wlc_hw->unit;
2316 2322
@@ -2323,39 +2329,39 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
2323 if (!intstatus) 2329 if (!intstatus)
2324 continue; 2330 continue;
2325 2331
2326 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: intstatus%d 0x%x\n", 2332 brcms_dbg_int(core, "wl%d: intstatus%d 0x%x\n",
2327 unit, idx, intstatus); 2333 unit, idx, intstatus);
2328 2334
2329 if (intstatus & I_RO) { 2335 if (intstatus & I_RO) {
2330 wiphy_err(wiphy, "wl%d: fifo %d: receive fifo " 2336 brcms_err(core, "wl%d: fifo %d: receive fifo "
2331 "overflow\n", unit, idx); 2337 "overflow\n", unit, idx);
2332 fatal = true; 2338 fatal = true;
2333 } 2339 }
2334 2340
2335 if (intstatus & I_PC) { 2341 if (intstatus & I_PC) {
2336 wiphy_err(wiphy, "wl%d: fifo %d: descriptor error\n", 2342 brcms_err(core, "wl%d: fifo %d: descriptor error\n",
2337 unit, idx); 2343 unit, idx);
2338 fatal = true; 2344 fatal = true;
2339 } 2345 }
2340 2346
2341 if (intstatus & I_PD) { 2347 if (intstatus & I_PD) {
2342 wiphy_err(wiphy, "wl%d: fifo %d: data error\n", unit, 2348 brcms_err(core, "wl%d: fifo %d: data error\n", unit,
2343 idx); 2349 idx);
2344 fatal = true; 2350 fatal = true;
2345 } 2351 }
2346 2352
2347 if (intstatus & I_DE) { 2353 if (intstatus & I_DE) {
2348 wiphy_err(wiphy, "wl%d: fifo %d: descriptor protocol " 2354 brcms_err(core, "wl%d: fifo %d: descriptor protocol "
2349 "error\n", unit, idx); 2355 "error\n", unit, idx);
2350 fatal = true; 2356 fatal = true;
2351 } 2357 }
2352 2358
2353 if (intstatus & I_RU) 2359 if (intstatus & I_RU)
2354 wiphy_err(wiphy, "wl%d: fifo %d: receive descriptor " 2360 brcms_err(core, "wl%d: fifo %d: receive descriptor "
2355 "underflow\n", idx, unit); 2361 "underflow\n", idx, unit);
2356 2362
2357 if (intstatus & I_XU) { 2363 if (intstatus & I_XU) {
2358 wiphy_err(wiphy, "wl%d: fifo %d: transmit fifo " 2364 brcms_err(core, "wl%d: fifo %d: transmit fifo "
2359 "underflow\n", idx, unit); 2365 "underflow\n", idx, unit);
2360 fatal = true; 2366 fatal = true;
2361 } 2367 }
@@ -2516,13 +2522,13 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
2516{ 2522{
2517 struct brcms_hardware *wlc_hw = wlc->hw; 2523 struct brcms_hardware *wlc_hw = wlc->hw;
2518 struct bcma_device *core = wlc_hw->d11core; 2524 struct bcma_device *core = wlc_hw->d11core;
2519 u32 macintstatus; 2525 u32 macintstatus, mask;
2520 2526
2521 /* macintstatus includes a DMA interrupt summary bit */ 2527 /* macintstatus includes a DMA interrupt summary bit */
2522 macintstatus = bcma_read32(core, D11REGOFFS(macintstatus)); 2528 macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
2529 mask = in_isr ? wlc->macintmask : wlc->defmacintmask;
2523 2530
2524 BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit, 2531 trace_brcms_macintstatus(&core->dev, in_isr, macintstatus, mask);
2525 macintstatus);
2526 2532
2527 /* detect cardbus removed, in power down(suspend) and in reset */ 2533 /* detect cardbus removed, in power down(suspend) and in reset */
2528 if (brcms_deviceremoved(wlc)) 2534 if (brcms_deviceremoved(wlc))
@@ -2535,16 +2541,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
2535 return 0; 2541 return 0;
2536 2542
2537 /* defer unsolicited interrupts */ 2543 /* defer unsolicited interrupts */
2538 macintstatus &= (in_isr ? wlc->macintmask : wlc->defmacintmask); 2544 macintstatus &= mask;
2539 2545
2540 /* if not for us */ 2546 /* if not for us */
2541 if (macintstatus == 0) 2547 if (macintstatus == 0)
2542 return 0; 2548 return 0;
2543 2549
2544 /* interrupts are already turned off for CFE build
2545 * Caution: For CFE Turning off the interrupts again has some undesired
2546 * consequences
2547 */
2548 /* turn off the interrupts */ 2550 /* turn off the interrupts */
2549 bcma_write32(core, D11REGOFFS(macintmask), 0); 2551 bcma_write32(core, D11REGOFFS(macintmask), 0);
2550 (void)bcma_read32(core, D11REGOFFS(macintmask)); 2552 (void)bcma_read32(core, D11REGOFFS(macintmask));
@@ -2587,33 +2589,31 @@ bool brcms_c_intrsupd(struct brcms_c_info *wlc)
2587 2589
2588/* 2590/*
2589 * First-level interrupt processing. 2591 * First-level interrupt processing.
2590 * Return true if this was our interrupt, false otherwise. 2592 * Return true if this was our interrupt
2591 * *wantdpc will be set to true if further brcms_c_dpc() processing is required, 2593 * and if further brcms_c_dpc() processing is required,
2592 * false otherwise. 2594 * false otherwise.
2593 */ 2595 */
2594bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc) 2596bool brcms_c_isr(struct brcms_c_info *wlc)
2595{ 2597{
2596 struct brcms_hardware *wlc_hw = wlc->hw; 2598 struct brcms_hardware *wlc_hw = wlc->hw;
2597 u32 macintstatus; 2599 u32 macintstatus;
2598 2600
2599 *wantdpc = false;
2600
2601 if (!wlc_hw->up || !wlc->macintmask) 2601 if (!wlc_hw->up || !wlc->macintmask)
2602 return false; 2602 return false;
2603 2603
2604 /* read and clear macintstatus and intstatus registers */ 2604 /* read and clear macintstatus and intstatus registers */
2605 macintstatus = wlc_intstatus(wlc, true); 2605 macintstatus = wlc_intstatus(wlc, true);
2606 2606
2607 if (macintstatus == 0xffffffff) 2607 if (macintstatus == 0xffffffff) {
2608 wiphy_err(wlc->wiphy, "DEVICEREMOVED detected in the ISR code" 2608 brcms_err(wlc_hw->d11core,
2609 " path\n"); 2609 "DEVICEREMOVED detected in the ISR code path\n");
2610 return false;
2611 }
2610 2612
2611 /* it is not for us */ 2613 /* it is not for us */
2612 if (macintstatus == 0) 2614 if (macintstatus == 0)
2613 return false; 2615 return false;
2614 2616
2615 *wantdpc = true;
2616
2617 /* save interrupt status bits */ 2617 /* save interrupt status bits */
2618 wlc->macintstatus = macintstatus; 2618 wlc->macintstatus = macintstatus;
2619 2619
@@ -2626,10 +2626,9 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2626 struct brcms_hardware *wlc_hw = wlc->hw; 2626 struct brcms_hardware *wlc_hw = wlc->hw;
2627 struct bcma_device *core = wlc_hw->d11core; 2627 struct bcma_device *core = wlc_hw->d11core;
2628 u32 mc, mi; 2628 u32 mc, mi;
2629 struct wiphy *wiphy = wlc->wiphy;
2630 2629
2631 BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, 2630 brcms_dbg_mac80211(core, "wl%d: bandunit %d\n", wlc_hw->unit,
2632 wlc_hw->band->bandunit); 2631 wlc_hw->band->bandunit);
2633 2632
2634 /* 2633 /*
2635 * Track overlapping suspend requests 2634 * Track overlapping suspend requests
@@ -2644,7 +2643,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2644 mc = bcma_read32(core, D11REGOFFS(maccontrol)); 2643 mc = bcma_read32(core, D11REGOFFS(maccontrol));
2645 2644
2646 if (mc == 0xffffffff) { 2645 if (mc == 0xffffffff) {
2647 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, 2646 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
2648 __func__); 2647 __func__);
2649 brcms_down(wlc->wl); 2648 brcms_down(wlc->wl);
2650 return; 2649 return;
@@ -2655,7 +2654,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2655 2654
2656 mi = bcma_read32(core, D11REGOFFS(macintstatus)); 2655 mi = bcma_read32(core, D11REGOFFS(macintstatus));
2657 if (mi == 0xffffffff) { 2656 if (mi == 0xffffffff) {
2658 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, 2657 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
2659 __func__); 2658 __func__);
2660 brcms_down(wlc->wl); 2659 brcms_down(wlc->wl);
2661 return; 2660 return;
@@ -2668,10 +2667,10 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2668 BRCMS_MAX_MAC_SUSPEND); 2667 BRCMS_MAX_MAC_SUSPEND);
2669 2668
2670 if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) { 2669 if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
2671 wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS" 2670 brcms_err(core, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
2672 " and MI_MACSSPNDD is still not on.\n", 2671 " and MI_MACSSPNDD is still not on.\n",
2673 wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND); 2672 wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
2674 wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, " 2673 brcms_err(core, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
2675 "psm_brc 0x%04x\n", wlc_hw->unit, 2674 "psm_brc 0x%04x\n", wlc_hw->unit,
2676 bcma_read32(core, D11REGOFFS(psmdebug)), 2675 bcma_read32(core, D11REGOFFS(psmdebug)),
2677 bcma_read32(core, D11REGOFFS(phydebug)), 2676 bcma_read32(core, D11REGOFFS(phydebug)),
@@ -2680,7 +2679,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
2680 2679
2681 mc = bcma_read32(core, D11REGOFFS(maccontrol)); 2680 mc = bcma_read32(core, D11REGOFFS(maccontrol));
2682 if (mc == 0xffffffff) { 2681 if (mc == 0xffffffff) {
2683 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, 2682 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
2684 __func__); 2683 __func__);
2685 brcms_down(wlc->wl); 2684 brcms_down(wlc->wl);
2686 return; 2685 return;
@@ -2696,8 +2695,8 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc)
2696 struct bcma_device *core = wlc_hw->d11core; 2695 struct bcma_device *core = wlc_hw->d11core;
2697 u32 mc, mi; 2696 u32 mc, mi;
2698 2697
2699 BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit, 2698 brcms_dbg_mac80211(core, "wl%d: bandunit %d\n", wlc_hw->unit,
2700 wlc->band->bandunit); 2699 wlc->band->bandunit);
2701 2700
2702 /* 2701 /*
2703 * Track overlapping suspend requests 2702 * Track overlapping suspend requests
@@ -2740,8 +2739,6 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
2740 u32 w, val; 2739 u32 w, val;
2741 struct wiphy *wiphy = wlc_hw->wlc->wiphy; 2740 struct wiphy *wiphy = wlc_hw->wlc->wiphy;
2742 2741
2743 BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
2744
2745 /* Validate dchip register access */ 2742 /* Validate dchip register access */
2746 2743
2747 bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0); 2744 bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
@@ -2802,7 +2799,7 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
2802 struct bcma_device *core = wlc_hw->d11core; 2799 struct bcma_device *core = wlc_hw->d11core;
2803 u32 tmp; 2800 u32 tmp;
2804 2801
2805 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 2802 brcms_dbg_info(core, "wl%d\n", wlc_hw->unit);
2806 2803
2807 tmp = 0; 2804 tmp = 0;
2808 2805
@@ -2818,8 +2815,8 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
2818 2815
2819 tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st)); 2816 tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
2820 if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT) 2817 if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
2821 wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY" 2818 brcms_err(core, "%s: turn on PHY PLL failed\n",
2822 " PLL failed\n", __func__); 2819 __func__);
2823 } else { 2820 } else {
2824 bcma_set32(core, D11REGOFFS(clk_ctl_st), 2821 bcma_set32(core, D11REGOFFS(clk_ctl_st),
2825 tmp | CCS_ERSRC_REQ_D11PLL | 2822 tmp | CCS_ERSRC_REQ_D11PLL |
@@ -2835,8 +2832,8 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
2835 (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) 2832 (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
2836 != 2833 !=
2837 (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL)) 2834 (CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
2838 wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on " 2835 brcms_err(core, "%s: turn on PHY PLL failed\n",
2839 "PHY PLL failed\n", __func__); 2836 __func__);
2840 } 2837 }
2841 } else { 2838 } else {
2842 /* 2839 /*
@@ -2854,7 +2851,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
2854{ 2851{
2855 bool dev_gone; 2852 bool dev_gone;
2856 2853
2857 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 2854 brcms_dbg_info(wlc_hw->d11core, "wl%d: disable core\n", wlc_hw->unit);
2858 2855
2859 dev_gone = brcms_deviceremoved(wlc_hw->wlc); 2856 dev_gone = brcms_deviceremoved(wlc_hw->wlc);
2860 2857
@@ -2884,12 +2881,14 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc)
2884 uint i; 2881 uint i;
2885 2882
2886 /* free any posted tx packets */ 2883 /* free any posted tx packets */
2887 for (i = 0; i < NFIFO; i++) 2884 for (i = 0; i < NFIFO; i++) {
2888 if (wlc_hw->di[i]) { 2885 if (wlc_hw->di[i]) {
2889 dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL); 2886 dma_txreclaim(wlc_hw->di[i], DMA_RANGE_ALL);
2890 wlc->core->txpktpend[i] = 0; 2887 if (i < TX_BCMC_FIFO)
2891 BCMMSG(wlc->wiphy, "pktpend fifo %d clrd\n", i); 2888 ieee80211_wake_queue(wlc->pub->ieee_hw,
2889 brcms_fifo_to_ac(i));
2892 } 2890 }
2891 }
2893 2892
2894 /* free any posted rx packets */ 2893 /* free any posted rx packets */
2895 dma_rxreclaim(wlc_hw->di[RX_FIFO]); 2894 dma_rxreclaim(wlc_hw->di[RX_FIFO]);
@@ -2921,7 +2920,7 @@ brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
2921 if (offset & 2) 2920 if (offset & 2)
2922 objoff += 2; 2921 objoff += 2;
2923 2922
2924 bcma_write16(core, objoff, v); 2923 bcma_wflush16(core, objoff, v);
2925} 2924}
2926 2925
2927/* 2926/*
@@ -3109,7 +3108,7 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
3109 /* check for rx fifo 0 overflow */ 3108 /* check for rx fifo 0 overflow */
3110 delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl); 3109 delta = (u16) (wlc->core->macstat_snapshot->rxf0ovfl - rxf0ovfl);
3111 if (delta) 3110 if (delta)
3112 wiphy_err(wlc->wiphy, "wl%d: %u rx fifo 0 overflows!\n", 3111 brcms_err(wlc->hw->d11core, "wl%d: %u rx fifo 0 overflows!\n",
3113 wlc->pub->unit, delta); 3112 wlc->pub->unit, delta);
3114 3113
3115 /* check for tx fifo underflows */ 3114 /* check for tx fifo underflows */
@@ -3118,8 +3117,9 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
3118 (u16) (wlc->core->macstat_snapshot->txfunfl[i] - 3117 (u16) (wlc->core->macstat_snapshot->txfunfl[i] -
3119 txfunfl[i]); 3118 txfunfl[i]);
3120 if (delta) 3119 if (delta)
3121 wiphy_err(wlc->wiphy, "wl%d: %u tx fifo %d underflows!" 3120 brcms_err(wlc->hw->d11core,
3122 "\n", wlc->pub->unit, delta, i); 3121 "wl%d: %u tx fifo %d underflows!\n",
3122 wlc->pub->unit, delta, i);
3123 } 3123 }
3124#endif /* DEBUG */ 3124#endif /* DEBUG */
3125 3125
@@ -3132,8 +3132,6 @@ static void brcms_c_statsupd(struct brcms_c_info *wlc)
3132 3132
3133static void brcms_b_reset(struct brcms_hardware *wlc_hw) 3133static void brcms_b_reset(struct brcms_hardware *wlc_hw)
3134{ 3134{
3135 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
3136
3137 /* reset the core */ 3135 /* reset the core */
3138 if (!brcms_deviceremoved(wlc_hw->wlc)) 3136 if (!brcms_deviceremoved(wlc_hw->wlc))
3139 brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS); 3137 brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -3144,7 +3142,7 @@ static void brcms_b_reset(struct brcms_hardware *wlc_hw)
3144 3142
3145void brcms_c_reset(struct brcms_c_info *wlc) 3143void brcms_c_reset(struct brcms_c_info *wlc)
3146{ 3144{
3147 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 3145 brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
3148 3146
3149 /* slurp up hw mac counters before core reset */ 3147 /* slurp up hw mac counters before core reset */
3150 brcms_c_statsupd(wlc); 3148 brcms_c_statsupd(wlc);
@@ -3189,10 +3187,9 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3189 bool fifosz_fixup = false; 3187 bool fifosz_fixup = false;
3190 int err = 0; 3188 int err = 0;
3191 u16 buf[NFIFO]; 3189 u16 buf[NFIFO];
3192 struct wiphy *wiphy = wlc->wiphy;
3193 struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode; 3190 struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
3194 3191
3195 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit); 3192 brcms_dbg_info(core, "wl%d: core init\n", wlc_hw->unit);
3196 3193
3197 /* reset PSM */ 3194 /* reset PSM */
3198 brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE)); 3195 brcms_b_mctrl(wlc_hw, ~0, (MCTL_IHR_EN | MCTL_PSM_JMP_0 | MCTL_WAKE));
@@ -3212,29 +3209,29 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3212 SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) & 3209 SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
3213 MI_MACSSPNDD) == 0), 1000 * 1000); 3210 MI_MACSSPNDD) == 0), 1000 * 1000);
3214 if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0) 3211 if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
3215 wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-" 3212 brcms_err(core, "wl%d: wlc_coreinit: ucode did not self-"
3216 "suspend!\n", wlc_hw->unit); 3213 "suspend!\n", wlc_hw->unit);
3217 3214
3218 brcms_c_gpio_init(wlc); 3215 brcms_c_gpio_init(wlc);
3219 3216
3220 sflags = bcma_aread32(core, BCMA_IOST); 3217 sflags = bcma_aread32(core, BCMA_IOST);
3221 3218
3222 if (D11REV_IS(wlc_hw->corerev, 23)) { 3219 if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
3223 if (BRCMS_ISNPHY(wlc_hw->band)) 3220 if (BRCMS_ISNPHY(wlc_hw->band))
3224 brcms_c_write_inits(wlc_hw, ucode->d11n0initvals16); 3221 brcms_c_write_inits(wlc_hw, ucode->d11n0initvals16);
3225 else 3222 else
3226 wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev" 3223 brcms_err(core, "%s: wl%d: unsupported phy in corerev"
3227 " %d\n", __func__, wlc_hw->unit, 3224 " %d\n", __func__, wlc_hw->unit,
3228 wlc_hw->corerev); 3225 wlc_hw->corerev);
3229 } else if (D11REV_IS(wlc_hw->corerev, 24)) { 3226 } else if (D11REV_IS(wlc_hw->corerev, 24)) {
3230 if (BRCMS_ISLCNPHY(wlc_hw->band)) 3227 if (BRCMS_ISLCNPHY(wlc_hw->band))
3231 brcms_c_write_inits(wlc_hw, ucode->d11lcn0initvals24); 3228 brcms_c_write_inits(wlc_hw, ucode->d11lcn0initvals24);
3232 else 3229 else
3233 wiphy_err(wiphy, "%s: wl%d: unsupported phy in corerev" 3230 brcms_err(core, "%s: wl%d: unsupported phy in corerev"
3234 " %d\n", __func__, wlc_hw->unit, 3231 " %d\n", __func__, wlc_hw->unit,
3235 wlc_hw->corerev); 3232 wlc_hw->corerev);
3236 } else { 3233 } else {
3237 wiphy_err(wiphy, "%s: wl%d: unsupported corerev %d\n", 3234 brcms_err(core, "%s: wl%d: unsupported corerev %d\n",
3238 __func__, wlc_hw->unit, wlc_hw->corerev); 3235 __func__, wlc_hw->unit, wlc_hw->corerev);
3239 } 3236 }
3240 3237
@@ -3276,7 +3273,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
3276 err = -1; 3273 err = -1;
3277 } 3274 }
3278 if (err != 0) 3275 if (err != 0)
3279 wiphy_err(wiphy, "wlc_coreinit: txfifo mismatch: ucode size %d" 3276 brcms_err(core, "wlc_coreinit: txfifo mismatch: ucode size %d"
3280 " driver size %d index %d\n", buf[i], 3277 " driver size %d index %d\n", buf[i],
3281 wlc_hw->xmtfifo_sz[i], i); 3278 wlc_hw->xmtfifo_sz[i], i);
3282 3279
@@ -3359,8 +3356,6 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
3359 bool fastclk; 3356 bool fastclk;
3360 struct brcms_c_info *wlc = wlc_hw->wlc; 3357 struct brcms_c_info *wlc = wlc_hw->wlc;
3361 3358
3362 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
3363
3364 /* request FAST clock if not on */ 3359 /* request FAST clock if not on */
3365 fastclk = wlc_hw->forcefastclk; 3360 fastclk = wlc_hw->forcefastclk;
3366 if (!fastclk) 3361 if (!fastclk)
@@ -3453,7 +3448,7 @@ static void brcms_c_rate_lookup_init(struct brcms_c_info *wlc,
3453 rate = (rateset->rates[i] & BRCMS_RATE_MASK); 3448 rate = (rateset->rates[i] & BRCMS_RATE_MASK);
3454 3449
3455 if (rate > BRCM_MAXRATE) { 3450 if (rate > BRCM_MAXRATE) {
3456 wiphy_err(wlc->wiphy, "brcms_c_rate_lookup_init: " 3451 brcms_err(wlc->hw->d11core, "brcms_c_rate_lookup_init: "
3457 "invalid rate 0x%X in rate set\n", 3452 "invalid rate 0x%X in rate set\n",
3458 rateset->rates[i]); 3453 rateset->rates[i]);
3459 continue; 3454 continue;
@@ -3529,7 +3524,6 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
3529 uint parkband; 3524 uint parkband;
3530 uint i, band_order[2]; 3525 uint i, band_order[2];
3531 3526
3532 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
3533 /* 3527 /*
3534 * We might have been bandlocked during down and the chip 3528 * We might have been bandlocked during down and the chip
3535 * power-cycled (hibernate). Figure out the right band to park on 3529 * power-cycled (hibernate). Figure out the right band to park on
@@ -3710,8 +3704,8 @@ static void brcms_c_set_ratetable(struct brcms_c_info *wlc)
3710/* band-specific init */ 3704/* band-specific init */
3711static void brcms_c_bsinit(struct brcms_c_info *wlc) 3705static void brcms_c_bsinit(struct brcms_c_info *wlc)
3712{ 3706{
3713 BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", 3707 brcms_dbg_info(wlc->hw->d11core, "wl%d: bandunit %d\n",
3714 wlc->pub->unit, wlc->band->bandunit); 3708 wlc->pub->unit, wlc->band->bandunit);
3715 3709
3716 /* write ucode ACK/CTS rate table */ 3710 /* write ucode ACK/CTS rate table */
3717 brcms_c_set_ratetable(wlc); 3711 brcms_c_set_ratetable(wlc);
@@ -3734,7 +3728,8 @@ brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM,
3734 isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM : 3728 isOFDM ? M_TX_IDLE_BUSY_RATIO_X_16_OFDM :
3735 M_TX_IDLE_BUSY_RATIO_X_16_CCK; 3729 M_TX_IDLE_BUSY_RATIO_X_16_CCK;
3736 if (duty_cycle > 100 || duty_cycle < 0) { 3730 if (duty_cycle > 100 || duty_cycle < 0) {
3737 wiphy_err(wlc->wiphy, "wl%d: duty cycle value off limit\n", 3731 brcms_err(wlc->hw->d11core,
3732 "wl%d: duty cycle value off limit\n",
3738 wlc->pub->unit); 3733 wlc->pub->unit);
3739 return -EINVAL; 3734 return -EINVAL;
3740 } 3735 }
@@ -3752,40 +3747,6 @@ brcms_c_duty_cycle_set(struct brcms_c_info *wlc, int duty_cycle, bool isOFDM,
3752 return 0; 3747 return 0;
3753} 3748}
3754 3749
3755/*
3756 * Initialize the base precedence map for dequeueing
3757 * from txq based on WME settings
3758 */
3759static void brcms_c_tx_prec_map_init(struct brcms_c_info *wlc)
3760{
3761 wlc->tx_prec_map = BRCMS_PREC_BMP_ALL;
3762 memset(wlc->fifo2prec_map, 0, NFIFO * sizeof(u16));
3763
3764 wlc->fifo2prec_map[TX_AC_BK_FIFO] = BRCMS_PREC_BMP_AC_BK;
3765 wlc->fifo2prec_map[TX_AC_BE_FIFO] = BRCMS_PREC_BMP_AC_BE;
3766 wlc->fifo2prec_map[TX_AC_VI_FIFO] = BRCMS_PREC_BMP_AC_VI;
3767 wlc->fifo2prec_map[TX_AC_VO_FIFO] = BRCMS_PREC_BMP_AC_VO;
3768}
3769
3770static void
3771brcms_c_txflowcontrol_signal(struct brcms_c_info *wlc,
3772 struct brcms_txq_info *qi, bool on, int prio)
3773{
3774 /* transmit flowcontrol is not yet implemented */
3775}
3776
3777static void brcms_c_txflowcontrol_reset(struct brcms_c_info *wlc)
3778{
3779 struct brcms_txq_info *qi;
3780
3781 for (qi = wlc->tx_queues; qi != NULL; qi = qi->next) {
3782 if (qi->stopped) {
3783 brcms_c_txflowcontrol_signal(wlc, qi, OFF, ALLPRIO);
3784 qi->stopped = 0;
3785 }
3786 }
3787}
3788
3789/* push sw hps and wake state through hardware */ 3750/* push sw hps and wake state through hardware */
3790static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc) 3751static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
3791{ 3752{
@@ -3795,7 +3756,8 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
3795 3756
3796 hps = brcms_c_ps_allowed(wlc); 3757 hps = brcms_c_ps_allowed(wlc);
3797 3758
3798 BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps); 3759 brcms_dbg_mac80211(wlc->hw->d11core, "wl%d: hps %d\n", wlc->pub->unit,
3760 hps);
3799 3761
3800 v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol)); 3762 v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
3801 v2 = MCTL_WAKE; 3763 v2 = MCTL_WAKE;
@@ -3881,7 +3843,8 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
3881{ 3843{
3882 uint bandunit; 3844 uint bandunit;
3883 3845
3884 BCMMSG(wlc_hw->wlc->wiphy, "wl%d: 0x%x\n", wlc_hw->unit, chanspec); 3846 brcms_dbg_mac80211(wlc_hw->d11core, "wl%d: 0x%x\n", wlc_hw->unit,
3847 chanspec);
3885 3848
3886 wlc_hw->chanspec = chanspec; 3849 wlc_hw->chanspec = chanspec;
3887 3850
@@ -3942,7 +3905,7 @@ static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
3942 u16 old_chanspec = wlc->chanspec; 3905 u16 old_chanspec = wlc->chanspec;
3943 3906
3944 if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) { 3907 if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
3945 wiphy_err(wlc->wiphy, "wl%d: %s: Bad channel %d\n", 3908 brcms_err(wlc->hw->d11core, "wl%d: %s: Bad channel %d\n",
3946 wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec)); 3909 wlc->pub->unit, __func__, CHSPEC_CHANNEL(chanspec));
3947 return; 3910 return;
3948 } 3911 }
@@ -3953,8 +3916,8 @@ static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
3953 if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) { 3916 if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
3954 switchband = true; 3917 switchband = true;
3955 if (wlc->bandlocked) { 3918 if (wlc->bandlocked) {
3956 wiphy_err(wlc->wiphy, "wl%d: %s: chspec %d " 3919 brcms_err(wlc->hw->d11core,
3957 "band is locked!\n", 3920 "wl%d: %s: chspec %d band is locked!\n",
3958 wlc->pub->unit, __func__, 3921 wlc->pub->unit, __func__,
3959 CHSPEC_CHANNEL(chanspec)); 3922 CHSPEC_CHANNEL(chanspec));
3960 return; 3923 return;
@@ -4018,6 +3981,10 @@ void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
4018 */ 3981 */
4019void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val) 3982void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val)
4020{ 3983{
3984 /*
3985 * Cannot use brcms_dbg_* here because this function is called
3986 * before wlc is sufficiently initialized.
3987 */
4021 BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val); 3988 BCMMSG(wlc->wiphy, "idx %d, val %d\n", idx, val);
4022 3989
4023 switch (idx) { 3990 switch (idx) {
@@ -4090,8 +4057,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
4090 4057
4091 /* Only apply params if the core is out of reset and has clocks */ 4058 /* Only apply params if the core is out of reset and has clocks */
4092 if (!wlc->clk) { 4059 if (!wlc->clk) {
4093 wiphy_err(wlc->wiphy, "wl%d: %s : no-clock\n", wlc->pub->unit, 4060 brcms_err(wlc->hw->d11core, "wl%d: %s : no-clock\n",
4094 __func__); 4061 wlc->pub->unit, __func__);
4095 return; 4062 return;
4096 } 4063 }
4097 4064
@@ -4109,7 +4076,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
4109 4076
4110 if (acp_shm.aifs < EDCF_AIFSN_MIN 4077 if (acp_shm.aifs < EDCF_AIFSN_MIN
4111 || acp_shm.aifs > EDCF_AIFSN_MAX) { 4078 || acp_shm.aifs > EDCF_AIFSN_MAX) {
4112 wiphy_err(wlc->wiphy, "wl%d: edcf_setparams: bad " 4079 brcms_err(wlc->hw->d11core, "wl%d: edcf_setparams: bad "
4113 "aifs %d\n", wlc->pub->unit, acp_shm.aifs); 4080 "aifs %d\n", wlc->pub->unit, acp_shm.aifs);
4114 } else { 4081 } else {
4115 acp_shm.cwmin = params->cw_min; 4082 acp_shm.cwmin = params->cw_min;
@@ -4224,8 +4191,8 @@ static void brcms_c_radio_timer(void *arg)
4224 struct brcms_c_info *wlc = (struct brcms_c_info *) arg; 4191 struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
4225 4192
4226 if (brcms_deviceremoved(wlc)) { 4193 if (brcms_deviceremoved(wlc)) {
4227 wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit, 4194 brcms_err(wlc->hw->d11core, "wl%d: %s: dead chip\n",
4228 __func__); 4195 wlc->pub->unit, __func__);
4229 brcms_down(wlc->wl); 4196 brcms_down(wlc->wl);
4230 return; 4197 return;
4231 } 4198 }
@@ -4238,8 +4205,6 @@ static void brcms_b_watchdog(struct brcms_c_info *wlc)
4238{ 4205{
4239 struct brcms_hardware *wlc_hw = wlc->hw; 4206 struct brcms_hardware *wlc_hw = wlc->hw;
4240 4207
4241 BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
4242
4243 if (!wlc_hw->up) 4208 if (!wlc_hw->up)
4244 return; 4209 return;
4245 4210
@@ -4258,14 +4223,14 @@ static void brcms_b_watchdog(struct brcms_c_info *wlc)
4258/* common watchdog code */ 4223/* common watchdog code */
4259static void brcms_c_watchdog(struct brcms_c_info *wlc) 4224static void brcms_c_watchdog(struct brcms_c_info *wlc)
4260{ 4225{
4261 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 4226 brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
4262 4227
4263 if (!wlc->pub->up) 4228 if (!wlc->pub->up)
4264 return; 4229 return;
4265 4230
4266 if (brcms_deviceremoved(wlc)) { 4231 if (brcms_deviceremoved(wlc)) {
4267 wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n", wlc->pub->unit, 4232 brcms_err(wlc->hw->d11core, "wl%d: %s: dead chip\n",
4268 __func__); 4233 wlc->pub->unit, __func__);
4269 brcms_down(wlc->wl); 4234 brcms_down(wlc->wl);
4270 return; 4235 return;
4271 } 4236 }
@@ -4437,13 +4402,13 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4437 struct ssb_sprom *sprom = &core->bus->sprom; 4402 struct ssb_sprom *sprom = &core->bus->sprom;
4438 4403
4439 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) 4404 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI)
4440 BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, 4405 brcms_dbg_info(core, "wl%d: vendor 0x%x device 0x%x\n", unit,
4441 pcidev->vendor, 4406 pcidev->vendor,
4442 pcidev->device); 4407 pcidev->device);
4443 else 4408 else
4444 BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, 4409 brcms_dbg_info(core, "wl%d: vendor 0x%x device 0x%x\n", unit,
4445 core->bus->boardinfo.vendor, 4410 core->bus->boardinfo.vendor,
4446 core->bus->boardinfo.type); 4411 core->bus->boardinfo.type);
4447 4412
4448 wme = true; 4413 wme = true;
4449 4414
@@ -4535,7 +4500,8 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4535 4500
4536 /* check device id(srom, nvram etc.) to set bands */ 4501 /* check device id(srom, nvram etc.) to set bands */
4537 if (wlc_hw->deviceid == BCM43224_D11N_ID || 4502 if (wlc_hw->deviceid == BCM43224_D11N_ID ||
4538 wlc_hw->deviceid == BCM43224_D11N_ID_VEN1) 4503 wlc_hw->deviceid == BCM43224_D11N_ID_VEN1 ||
4504 wlc_hw->deviceid == BCM43224_CHIP_ID)
4539 /* Dualband boards */ 4505 /* Dualband boards */
4540 wlc_hw->_nbands = 2; 4506 wlc_hw->_nbands = 2;
4541 else 4507 else
@@ -4715,8 +4681,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
4715 goto fail; 4681 goto fail;
4716 } 4682 }
4717 4683
4718 BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x\n", 4684 brcms_dbg_info(wlc_hw->d11core, "deviceid 0x%x nbands %d board 0x%x\n",
4719 wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih)); 4685 wlc_hw->deviceid, wlc_hw->_nbands,
4686 ai_get_boardtype(wlc_hw->sih));
4720 4687
4721 return err; 4688 return err;
4722 4689
@@ -4836,56 +4803,6 @@ static void brcms_c_bss_default_init(struct brcms_c_info *wlc)
4836 bi->flags |= BRCMS_BSS_HT; 4803 bi->flags |= BRCMS_BSS_HT;
4837} 4804}
4838 4805
4839static struct brcms_txq_info *brcms_c_txq_alloc(struct brcms_c_info *wlc)
4840{
4841 struct brcms_txq_info *qi, *p;
4842
4843 qi = kzalloc(sizeof(struct brcms_txq_info), GFP_ATOMIC);
4844 if (qi != NULL) {
4845 /*
4846 * Have enough room for control packets along with HI watermark
4847 * Also, add room to txq for total psq packets if all the SCBs
4848 * leave PS mode. The watermark for flowcontrol to OS packets
4849 * will remain the same
4850 */
4851 brcmu_pktq_init(&qi->q, BRCMS_PREC_COUNT,
4852 2 * BRCMS_DATAHIWAT + PKTQ_LEN_DEFAULT);
4853
4854 /* add this queue to the the global list */
4855 p = wlc->tx_queues;
4856 if (p == NULL) {
4857 wlc->tx_queues = qi;
4858 } else {
4859 while (p->next != NULL)
4860 p = p->next;
4861 p->next = qi;
4862 }
4863 }
4864 return qi;
4865}
4866
4867static void brcms_c_txq_free(struct brcms_c_info *wlc,
4868 struct brcms_txq_info *qi)
4869{
4870 struct brcms_txq_info *p;
4871
4872 if (qi == NULL)
4873 return;
4874
4875 /* remove the queue from the linked list */
4876 p = wlc->tx_queues;
4877 if (p == qi)
4878 wlc->tx_queues = p->next;
4879 else {
4880 while (p != NULL && p->next != qi)
4881 p = p->next;
4882 if (p != NULL)
4883 p->next = p->next->next;
4884 }
4885
4886 kfree(qi);
4887}
4888
4889static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap) 4806static void brcms_c_update_mimo_band_bwcap(struct brcms_c_info *wlc, u8 bwcap)
4890{ 4807{
4891 uint i; 4808 uint i;
@@ -4991,8 +4908,6 @@ uint brcms_c_detach(struct brcms_c_info *wlc)
4991 if (wlc == NULL) 4908 if (wlc == NULL)
4992 return 0; 4909 return 0;
4993 4910
4994 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
4995
4996 callbacks += brcms_b_detach(wlc); 4911 callbacks += brcms_b_detach(wlc);
4997 4912
4998 /* delete software timers */ 4913 /* delete software timers */
@@ -5005,10 +4920,6 @@ uint brcms_c_detach(struct brcms_c_info *wlc)
5005 4920
5006 brcms_c_detach_module(wlc); 4921 brcms_c_detach_module(wlc);
5007 4922
5008
5009 while (wlc->tx_queues != NULL)
5010 brcms_c_txq_free(wlc, wlc->tx_queues);
5011
5012 brcms_c_detach_mfree(wlc); 4923 brcms_c_detach_mfree(wlc);
5013 return callbacks; 4924 return callbacks;
5014} 4925}
@@ -5026,7 +4937,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
5026 if (wlc_hw->wlc->pub->hw_up) 4937 if (wlc_hw->wlc->pub->hw_up)
5027 return; 4938 return;
5028 4939
5029 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 4940 brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
5030 4941
5031 /* 4942 /*
5032 * Enable pll and xtal, initialize the power control registers, 4943 * Enable pll and xtal, initialize the power control registers,
@@ -5063,7 +4974,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
5063 4974
5064static int brcms_b_up_prep(struct brcms_hardware *wlc_hw) 4975static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
5065{ 4976{
5066 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit); 4977 brcms_dbg_info(wlc_hw->d11core, "wl%d\n", wlc_hw->unit);
5067 4978
5068 /* 4979 /*
5069 * Enable pll and xtal, initialize the power control registers, 4980 * Enable pll and xtal, initialize the power control registers,
@@ -5077,7 +4988,7 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
5077 * Configure pci/pcmcia here instead of in brcms_c_attach() 4988 * Configure pci/pcmcia here instead of in brcms_c_attach()
5078 * to allow mfg hotswap: down, hotswap (chip power cycle), up. 4989 * to allow mfg hotswap: down, hotswap (chip power cycle), up.
5079 */ 4990 */
5080 bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci, wlc_hw->d11core, 4991 bcma_core_pci_irq_ctl(&wlc_hw->d11core->bus->drv_pci[0], wlc_hw->d11core,
5081 true); 4992 true);
5082 4993
5083 /* 4994 /*
@@ -5102,8 +5013,6 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
5102 5013
5103static int brcms_b_up_finish(struct brcms_hardware *wlc_hw) 5014static int brcms_b_up_finish(struct brcms_hardware *wlc_hw)
5104{ 5015{
5105 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
5106
5107 wlc_hw->up = true; 5016 wlc_hw->up = true;
5108 wlc_phy_hw_state_upd(wlc_hw->band->pi, true); 5017 wlc_phy_hw_state_upd(wlc_hw->band->pi, true);
5109 5018
@@ -5135,7 +5044,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
5135{ 5044{
5136 struct ieee80211_channel *ch; 5045 struct ieee80211_channel *ch;
5137 5046
5138 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 5047 brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
5139 5048
5140 /* HW is turned off so don't try to access it */ 5049 /* HW is turned off so don't try to access it */
5141 if (wlc->pub->hw_off || brcms_deviceremoved(wlc)) 5050 if (wlc->pub->hw_off || brcms_deviceremoved(wlc))
@@ -5176,8 +5085,8 @@ int brcms_c_up(struct brcms_c_info *wlc)
5176 WL_RADIO_HW_DISABLE); 5085 WL_RADIO_HW_DISABLE);
5177 5086
5178 if (bsscfg->enable && bsscfg->BSS) 5087 if (bsscfg->enable && bsscfg->BSS)
5179 wiphy_err(wlc->wiphy, "wl%d: up" 5088 brcms_err(wlc->hw->d11core,
5180 ": rfdisable -> " 5089 "wl%d: up: rfdisable -> "
5181 "bsscfg_disable()\n", 5090 "bsscfg_disable()\n",
5182 wlc->pub->unit); 5091 wlc->pub->unit);
5183 } 5092 }
@@ -5237,8 +5146,6 @@ static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
5237 bool dev_gone; 5146 bool dev_gone;
5238 uint callbacks = 0; 5147 uint callbacks = 0;
5239 5148
5240 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
5241
5242 if (!wlc_hw->up) 5149 if (!wlc_hw->up)
5243 return callbacks; 5150 return callbacks;
5244 5151
@@ -5265,8 +5172,6 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
5265 uint callbacks = 0; 5172 uint callbacks = 0;
5266 bool dev_gone; 5173 bool dev_gone;
5267 5174
5268 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
5269
5270 if (!wlc_hw->up) 5175 if (!wlc_hw->up)
5271 return callbacks; 5176 return callbacks;
5272 5177
@@ -5314,14 +5219,14 @@ uint brcms_c_down(struct brcms_c_info *wlc)
5314 uint callbacks = 0; 5219 uint callbacks = 0;
5315 int i; 5220 int i;
5316 bool dev_gone = false; 5221 bool dev_gone = false;
5317 struct brcms_txq_info *qi;
5318 5222
5319 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 5223 brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
5320 5224
5321 /* check if we are already in the going down path */ 5225 /* check if we are already in the going down path */
5322 if (wlc->going_down) { 5226 if (wlc->going_down) {
5323 wiphy_err(wlc->wiphy, "wl%d: %s: Driver going down so return" 5227 brcms_err(wlc->hw->d11core,
5324 "\n", wlc->pub->unit, __func__); 5228 "wl%d: %s: Driver going down so return\n",
5229 wlc->pub->unit, __func__);
5325 return 0; 5230 return 0;
5326 } 5231 }
5327 if (!wlc->pub->up) 5232 if (!wlc->pub->up)
@@ -5353,13 +5258,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
5353 5258
5354 wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL); 5259 wlc_phy_mute_upd(wlc->band->pi, false, PHY_MUTE_ALL);
5355 5260
5356 /* clear txq flow control */
5357 brcms_c_txflowcontrol_reset(wlc);
5358
5359 /* flush tx queues */
5360 for (qi = wlc->tx_queues; qi != NULL; qi = qi->next)
5361 brcmu_pktq_flush(&qi->q, true, NULL, NULL);
5362
5363 callbacks += brcms_b_down_finish(wlc->hw); 5261 callbacks += brcms_b_down_finish(wlc->hw);
5364 5262
5365 /* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */ 5263 /* brcms_b_down_finish has done brcms_c_coredisable(). so clk is off */
@@ -5441,7 +5339,7 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
5441 5339
5442 default: 5340 default:
5443 /* Error */ 5341 /* Error */
5444 wiphy_err(wlc->wiphy, "wl%d: %s: invalid gmode %d\n", 5342 brcms_err(wlc->hw->d11core, "wl%d: %s: invalid gmode %d\n",
5445 wlc->pub->unit, __func__, gmode); 5343 wlc->pub->unit, __func__, gmode);
5446 return -ENOTSUPP; 5344 return -ENOTSUPP;
5447 } 5345 }
@@ -5745,45 +5643,6 @@ int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
5745 return -ENODATA; 5643 return -ENODATA;
5746} 5644}
5747 5645
5748void brcms_c_print_txstatus(struct tx_status *txs)
5749{
5750 pr_debug("\ntxpkt (MPDU) Complete\n");
5751
5752 pr_debug("FrameID: %04x TxStatus: %04x\n", txs->frameid, txs->status);
5753
5754 pr_debug("[15:12] %d frame attempts\n",
5755 (txs->status & TX_STATUS_FRM_RTX_MASK) >>
5756 TX_STATUS_FRM_RTX_SHIFT);
5757 pr_debug(" [11:8] %d rts attempts\n",
5758 (txs->status & TX_STATUS_RTS_RTX_MASK) >>
5759 TX_STATUS_RTS_RTX_SHIFT);
5760 pr_debug(" [7] %d PM mode indicated\n",
5761 txs->status & TX_STATUS_PMINDCTD ? 1 : 0);
5762 pr_debug(" [6] %d intermediate status\n",
5763 txs->status & TX_STATUS_INTERMEDIATE ? 1 : 0);
5764 pr_debug(" [5] %d AMPDU\n",
5765 txs->status & TX_STATUS_AMPDU ? 1 : 0);
5766 pr_debug(" [4:2] %d Frame Suppressed Reason (%s)\n",
5767 (txs->status & TX_STATUS_SUPR_MASK) >> TX_STATUS_SUPR_SHIFT,
5768 (const char *[]) {
5769 "None",
5770 "PMQ Entry",
5771 "Flush request",
5772 "Previous frag failure",
5773 "Channel mismatch",
5774 "Lifetime Expiry",
5775 "Underflow"
5776 } [(txs->status & TX_STATUS_SUPR_MASK) >>
5777 TX_STATUS_SUPR_SHIFT]);
5778 pr_debug(" [1] %d acked\n",
5779 txs->status & TX_STATUS_ACK_RCV ? 1 : 0);
5780
5781 pr_debug("LastTxTime: %04x Seq: %04x PHYTxStatus: %04x RxAckRSSI: %04x RxAckSQ: %04x\n",
5782 txs->lasttxtime, txs->sequence, txs->phyerr,
5783 (txs->ackphyrxsh & PRXS1_JSSI_MASK) >> PRXS1_JSSI_SHIFT,
5784 (txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
5785}
5786
5787static bool brcms_c_chipmatch_pci(struct bcma_device *core) 5646static bool brcms_c_chipmatch_pci(struct bcma_device *core)
5788{ 5647{
5789 struct pci_dev *pcidev = core->bus->host_pci; 5648 struct pci_dev *pcidev = core->bus->host_pci;
@@ -5795,7 +5654,7 @@ static bool brcms_c_chipmatch_pci(struct bcma_device *core)
5795 return false; 5654 return false;
5796 } 5655 }
5797 5656
5798 if (device == BCM43224_D11N_ID_VEN1) 5657 if (device == BCM43224_D11N_ID_VEN1 || device == BCM43224_CHIP_ID)
5799 return true; 5658 return true;
5800 if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID)) 5659 if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
5801 return true; 5660 return true;
@@ -5832,184 +5691,6 @@ bool brcms_c_chipmatch(struct bcma_device *core)
5832 } 5691 }
5833} 5692}
5834 5693
5835#if defined(DEBUG)
5836void brcms_c_print_txdesc(struct d11txh *txh)
5837{
5838 u16 mtcl = le16_to_cpu(txh->MacTxControlLow);
5839 u16 mtch = le16_to_cpu(txh->MacTxControlHigh);
5840 u16 mfc = le16_to_cpu(txh->MacFrameControl);
5841 u16 tfest = le16_to_cpu(txh->TxFesTimeNormal);
5842 u16 ptcw = le16_to_cpu(txh->PhyTxControlWord);
5843 u16 ptcw_1 = le16_to_cpu(txh->PhyTxControlWord_1);
5844 u16 ptcw_1_Fbr = le16_to_cpu(txh->PhyTxControlWord_1_Fbr);
5845 u16 ptcw_1_Rts = le16_to_cpu(txh->PhyTxControlWord_1_Rts);
5846 u16 ptcw_1_FbrRts = le16_to_cpu(txh->PhyTxControlWord_1_FbrRts);
5847 u16 mainrates = le16_to_cpu(txh->MainRates);
5848 u16 xtraft = le16_to_cpu(txh->XtraFrameTypes);
5849 u8 *iv = txh->IV;
5850 u8 *ra = txh->TxFrameRA;
5851 u16 tfestfb = le16_to_cpu(txh->TxFesTimeFallback);
5852 u8 *rtspfb = txh->RTSPLCPFallback;
5853 u16 rtsdfb = le16_to_cpu(txh->RTSDurFallback);
5854 u8 *fragpfb = txh->FragPLCPFallback;
5855 u16 fragdfb = le16_to_cpu(txh->FragDurFallback);
5856 u16 mmodelen = le16_to_cpu(txh->MModeLen);
5857 u16 mmodefbrlen = le16_to_cpu(txh->MModeFbrLen);
5858 u16 tfid = le16_to_cpu(txh->TxFrameID);
5859 u16 txs = le16_to_cpu(txh->TxStatus);
5860 u16 mnmpdu = le16_to_cpu(txh->MaxNMpdus);
5861 u16 mabyte = le16_to_cpu(txh->MaxABytes_MRT);
5862 u16 mabyte_f = le16_to_cpu(txh->MaxABytes_FBR);
5863 u16 mmbyte = le16_to_cpu(txh->MinMBytes);
5864
5865 u8 *rtsph = txh->RTSPhyHeader;
5866 struct ieee80211_rts rts = txh->rts_frame;
5867
5868 /* add plcp header along with txh descriptor */
5869 brcmu_dbg_hex_dump(txh, sizeof(struct d11txh) + 48,
5870 "Raw TxDesc + plcp header:\n");
5871
5872 pr_debug("TxCtlLow: %04x ", mtcl);
5873 pr_debug("TxCtlHigh: %04x ", mtch);
5874 pr_debug("FC: %04x ", mfc);
5875 pr_debug("FES Time: %04x\n", tfest);
5876 pr_debug("PhyCtl: %04x%s ", ptcw,
5877 (ptcw & PHY_TXC_SHORT_HDR) ? " short" : "");
5878 pr_debug("PhyCtl_1: %04x ", ptcw_1);
5879 pr_debug("PhyCtl_1_Fbr: %04x\n", ptcw_1_Fbr);
5880 pr_debug("PhyCtl_1_Rts: %04x ", ptcw_1_Rts);
5881 pr_debug("PhyCtl_1_Fbr_Rts: %04x\n", ptcw_1_FbrRts);
5882 pr_debug("MainRates: %04x ", mainrates);
5883 pr_debug("XtraFrameTypes: %04x ", xtraft);
5884 pr_debug("\n");
5885
5886 print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
5887 print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
5888 ra, sizeof(txh->TxFrameRA));
5889
5890 pr_debug("Fb FES Time: %04x ", tfestfb);
5891 print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
5892 rtspfb, sizeof(txh->RTSPLCPFallback));
5893 pr_debug("RTS DUR: %04x ", rtsdfb);
5894 print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
5895 fragpfb, sizeof(txh->FragPLCPFallback));
5896 pr_debug("DUR: %04x", fragdfb);
5897 pr_debug("\n");
5898
5899 pr_debug("MModeLen: %04x ", mmodelen);
5900 pr_debug("MModeFbrLen: %04x\n", mmodefbrlen);
5901
5902 pr_debug("FrameID: %04x\n", tfid);
5903 pr_debug("TxStatus: %04x\n", txs);
5904
5905 pr_debug("MaxNumMpdu: %04x\n", mnmpdu);
5906 pr_debug("MaxAggbyte: %04x\n", mabyte);
5907 pr_debug("MaxAggbyte_fb: %04x\n", mabyte_f);
5908 pr_debug("MinByte: %04x\n", mmbyte);
5909
5910 print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
5911 rtsph, sizeof(txh->RTSPhyHeader));
5912 print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
5913 (u8 *)&rts, sizeof(txh->rts_frame));
5914 pr_debug("\n");
5915}
5916#endif /* defined(DEBUG) */
5917
5918#if defined(DEBUG)
5919static int
5920brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
5921 int len)
5922{
5923 int i;
5924 char *p = buf;
5925 char hexstr[16];
5926 int slen = 0, nlen = 0;
5927 u32 bit;
5928 const char *name;
5929
5930 if (len < 2 || !buf)
5931 return 0;
5932
5933 buf[0] = '\0';
5934
5935 for (i = 0; flags != 0; i++) {
5936 bit = bd[i].bit;
5937 name = bd[i].name;
5938 if (bit == 0 && flags != 0) {
5939 /* print any unnamed bits */
5940 snprintf(hexstr, 16, "0x%X", flags);
5941 name = hexstr;
5942 flags = 0; /* exit loop */
5943 } else if ((flags & bit) == 0)
5944 continue;
5945 flags &= ~bit;
5946 nlen = strlen(name);
5947 slen += nlen;
5948 /* count btwn flag space */
5949 if (flags != 0)
5950 slen += 1;
5951 /* need NULL char as well */
5952 if (len <= slen)
5953 break;
5954 /* copy NULL char but don't count it */
5955 strncpy(p, name, nlen + 1);
5956 p += nlen;
5957 /* copy btwn flag space and NULL char */
5958 if (flags != 0)
5959 p += snprintf(p, 2, " ");
5960 len -= slen;
5961 }
5962
5963 /* indicate the str was too short */
5964 if (flags != 0) {
5965 if (len < 2)
5966 p -= 2 - len; /* overwrite last char */
5967 p += snprintf(p, 2, ">");
5968 }
5969
5970 return (int)(p - buf);
5971}
5972#endif /* defined(DEBUG) */
5973
5974#if defined(DEBUG)
5975void brcms_c_print_rxh(struct d11rxhdr *rxh)
5976{
5977 u16 len = rxh->RxFrameSize;
5978 u16 phystatus_0 = rxh->PhyRxStatus_0;
5979 u16 phystatus_1 = rxh->PhyRxStatus_1;
5980 u16 phystatus_2 = rxh->PhyRxStatus_2;
5981 u16 phystatus_3 = rxh->PhyRxStatus_3;
5982 u16 macstatus1 = rxh->RxStatus1;
5983 u16 macstatus2 = rxh->RxStatus2;
5984 char flagstr[64];
5985 char lenbuf[20];
5986 static const struct brcms_c_bit_desc macstat_flags[] = {
5987 {RXS_FCSERR, "FCSErr"},
5988 {RXS_RESPFRAMETX, "Reply"},
5989 {RXS_PBPRES, "PADDING"},
5990 {RXS_DECATMPT, "DeCr"},
5991 {RXS_DECERR, "DeCrErr"},
5992 {RXS_BCNSENT, "Bcn"},
5993 {0, NULL}
5994 };
5995
5996 brcmu_dbg_hex_dump(rxh, sizeof(struct d11rxhdr), "Raw RxDesc:\n");
5997
5998 brcms_c_format_flags(macstat_flags, macstatus1, flagstr, 64);
5999
6000 snprintf(lenbuf, sizeof(lenbuf), "0x%x", len);
6001
6002 pr_debug("RxFrameSize: %6s (%d)%s\n", lenbuf, len,
6003 (rxh->PhyRxStatus_0 & PRXS0_SHORTH) ? " short preamble" : "");
6004 pr_debug("RxPHYStatus: %04x %04x %04x %04x\n",
6005 phystatus_0, phystatus_1, phystatus_2, phystatus_3);
6006 pr_debug("RxMACStatus: %x %s\n", macstatus1, flagstr);
6007 pr_debug("RXMACaggtype: %x\n",
6008 (macstatus2 & RXS_AGGTYPE_MASK));
6009 pr_debug("RxTSFTime: %04x\n", rxh->RxTSFTime);
6010}
6011#endif /* defined(DEBUG) */
6012
6013u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate) 5694u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
6014{ 5695{
6015 u16 table_ptr; 5696 u16 table_ptr;
@@ -6033,86 +5714,6 @@ u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate)
6033 return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2)); 5714 return 2 * brcms_b_read_shm(wlc_hw, table_ptr + (index * 2));
6034} 5715}
6035 5716
6036static bool
6037brcms_c_prec_enq_head(struct brcms_c_info *wlc, struct pktq *q,
6038 struct sk_buff *pkt, int prec, bool head)
6039{
6040 struct sk_buff *p;
6041 int eprec = -1; /* precedence to evict from */
6042
6043 /* Determine precedence from which to evict packet, if any */
6044 if (pktq_pfull(q, prec))
6045 eprec = prec;
6046 else if (pktq_full(q)) {
6047 p = brcmu_pktq_peek_tail(q, &eprec);
6048 if (eprec > prec) {
6049 wiphy_err(wlc->wiphy, "%s: Failing: eprec %d > prec %d"
6050 "\n", __func__, eprec, prec);
6051 return false;
6052 }
6053 }
6054
6055 /* Evict if needed */
6056 if (eprec >= 0) {
6057 bool discard_oldest;
6058
6059 discard_oldest = ac_bitmap_tst(0, eprec);
6060
6061 /* Refuse newer packet unless configured to discard oldest */
6062 if (eprec == prec && !discard_oldest) {
6063 wiphy_err(wlc->wiphy, "%s: No where to go, prec == %d"
6064 "\n", __func__, prec);
6065 return false;
6066 }
6067
6068 /* Evict packet according to discard policy */
6069 p = discard_oldest ? brcmu_pktq_pdeq(q, eprec) :
6070 brcmu_pktq_pdeq_tail(q, eprec);
6071 brcmu_pkt_buf_free_skb(p);
6072 }
6073
6074 /* Enqueue */
6075 if (head)
6076 p = brcmu_pktq_penq_head(q, prec, pkt);
6077 else
6078 p = brcmu_pktq_penq(q, prec, pkt);
6079
6080 return true;
6081}
6082
6083/*
6084 * Attempts to queue a packet onto a multiple-precedence queue,
6085 * if necessary evicting a lower precedence packet from the queue.
6086 *
6087 * 'prec' is the precedence number that has already been mapped
6088 * from the packet priority.
6089 *
6090 * Returns true if packet consumed (queued), false if not.
6091 */
6092static bool brcms_c_prec_enq(struct brcms_c_info *wlc, struct pktq *q,
6093 struct sk_buff *pkt, int prec)
6094{
6095 return brcms_c_prec_enq_head(wlc, q, pkt, prec, false);
6096}
6097
6098void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
6099 struct sk_buff *sdu, uint prec)
6100{
6101 struct brcms_txq_info *qi = wlc->pkt_queue; /* Check me */
6102 struct pktq *q = &qi->q;
6103 int prio;
6104
6105 prio = sdu->priority;
6106
6107 if (!brcms_c_prec_enq(wlc, q, sdu, prec)) {
6108 /*
6109 * we might hit this condtion in case
6110 * packet flooding from mac80211 stack
6111 */
6112 brcmu_pkt_buf_free_skb(sdu);
6113 }
6114}
6115
6116/* 5717/*
6117 * bcmc_fid_generate: 5718 * bcmc_fid_generate:
6118 * Generate frame ID for a BCMC packet. The frag field is not used 5719 * Generate frame ID for a BCMC packet. The frag field is not used
@@ -6140,8 +5741,6 @@ brcms_c_calc_ack_time(struct brcms_c_info *wlc, u32 rspec,
6140{ 5741{
6141 uint dur = 0; 5742 uint dur = 0;
6142 5743
6143 BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d\n",
6144 wlc->pub->unit, rspec, preamble_type);
6145 /* 5744 /*
6146 * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that 5745 * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
6147 * is less than or equal to the rate of the immediately previous 5746 * is less than or equal to the rate of the immediately previous
@@ -6159,8 +5758,6 @@ static uint
6159brcms_c_calc_cts_time(struct brcms_c_info *wlc, u32 rspec, 5758brcms_c_calc_cts_time(struct brcms_c_info *wlc, u32 rspec,
6160 u8 preamble_type) 5759 u8 preamble_type)
6161{ 5760{
6162 BCMMSG(wlc->wiphy, "wl%d: ratespec 0x%x, preamble_type %d\n",
6163 wlc->pub->unit, rspec, preamble_type);
6164 return brcms_c_calc_ack_time(wlc, rspec, preamble_type); 5761 return brcms_c_calc_ack_time(wlc, rspec, preamble_type);
6165} 5762}
6166 5763
@@ -6168,8 +5765,6 @@ static uint
6168brcms_c_calc_ba_time(struct brcms_c_info *wlc, u32 rspec, 5765brcms_c_calc_ba_time(struct brcms_c_info *wlc, u32 rspec,
6169 u8 preamble_type) 5766 u8 preamble_type)
6170{ 5767{
6171 BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, "
6172 "preamble_type %d\n", wlc->pub->unit, rspec, preamble_type);
6173 /* 5768 /*
6174 * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that 5769 * Spec 9.6: ack rate is the highest rate in BSSBasicRateSet that
6175 * is less than or equal to the rate of the immediately previous 5770 * is less than or equal to the rate of the immediately previous
@@ -6223,9 +5818,6 @@ brcms_c_calc_frame_len(struct brcms_c_info *wlc, u32 ratespec,
6223 uint nsyms, mac_len, Ndps, kNdps; 5818 uint nsyms, mac_len, Ndps, kNdps;
6224 uint rate = rspec2rate(ratespec); 5819 uint rate = rspec2rate(ratespec);
6225 5820
6226 BCMMSG(wlc->wiphy, "wl%d: rspec 0x%x, preamble_type %d, dur %d\n",
6227 wlc->pub->unit, ratespec, preamble_type, dur);
6228
6229 if (is_mcs_rate(ratespec)) { 5821 if (is_mcs_rate(ratespec)) {
6230 uint mcs = ratespec & RSPEC_RATE_MASK; 5822 uint mcs = ratespec & RSPEC_RATE_MASK;
6231 int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec); 5823 int tot_streams = mcs_2_txstreams(mcs) + rspec_stc(ratespec);
@@ -6292,7 +5884,7 @@ static bool brcms_c_valid_rate(struct brcms_c_info *wlc, u32 rspec, int band,
6292 return true; 5884 return true;
6293 error: 5885 error:
6294 if (verbose) 5886 if (verbose)
6295 wiphy_err(wlc->wiphy, "wl%d: valid_rate: rate spec 0x%x " 5887 brcms_err(wlc->hw->d11core, "wl%d: valid_rate: rate spec 0x%x "
6296 "not in hw_rateset\n", wlc->pub->unit, rspec); 5888 "not in hw_rateset\n", wlc->pub->unit, rspec);
6297 5889
6298 return false; 5890 return false;
@@ -6302,6 +5894,7 @@ static u32
6302mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band, 5894mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
6303 u32 int_val) 5895 u32 int_val)
6304{ 5896{
5897 struct bcma_device *core = wlc->hw->d11core;
6305 u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT; 5898 u8 stf = (int_val & NRATE_STF_MASK) >> NRATE_STF_SHIFT;
6306 u8 rate = int_val & NRATE_RATE_MASK; 5899 u8 rate = int_val & NRATE_RATE_MASK;
6307 u32 rspec; 5900 u32 rspec;
@@ -6318,7 +5911,7 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
6318 if ((wlc->pub->_n_enab & SUPPORT_11N) && ismcs) { 5911 if ((wlc->pub->_n_enab & SUPPORT_11N) && ismcs) {
6319 /* mcs only allowed when nmode */ 5912 /* mcs only allowed when nmode */
6320 if (stf > PHY_TXC1_MODE_SDM) { 5913 if (stf > PHY_TXC1_MODE_SDM) {
6321 wiphy_err(wlc->wiphy, "wl%d: %s: Invalid stf\n", 5914 brcms_err(core, "wl%d: %s: Invalid stf\n",
6322 wlc->pub->unit, __func__); 5915 wlc->pub->unit, __func__);
6323 bcmerror = -EINVAL; 5916 bcmerror = -EINVAL;
6324 goto done; 5917 goto done;
@@ -6329,8 +5922,8 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
6329 if (!CHSPEC_IS40(wlc->home_chanspec) || 5922 if (!CHSPEC_IS40(wlc->home_chanspec) ||
6330 ((stf != PHY_TXC1_MODE_SISO) 5923 ((stf != PHY_TXC1_MODE_SISO)
6331 && (stf != PHY_TXC1_MODE_CDD))) { 5924 && (stf != PHY_TXC1_MODE_CDD))) {
6332 wiphy_err(wlc->wiphy, "wl%d: %s: Invalid mcs " 5925 brcms_err(core, "wl%d: %s: Invalid mcs 32\n",
6333 "32\n", wlc->pub->unit, __func__); 5926 wlc->pub->unit, __func__);
6334 bcmerror = -EINVAL; 5927 bcmerror = -EINVAL;
6335 goto done; 5928 goto done;
6336 } 5929 }
@@ -6338,9 +5931,9 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
6338 } else if (rate > HIGHEST_SINGLE_STREAM_MCS) { 5931 } else if (rate > HIGHEST_SINGLE_STREAM_MCS) {
6339 /* mcs > 7 must use stf SDM */ 5932 /* mcs > 7 must use stf SDM */
6340 if (stf != PHY_TXC1_MODE_SDM) { 5933 if (stf != PHY_TXC1_MODE_SDM) {
6341 BCMMSG(wlc->wiphy, "wl%d: enabling " 5934 brcms_dbg_mac80211(core, "wl%d: enabling "
6342 "SDM mode for mcs %d\n", 5935 "SDM mode for mcs %d\n",
6343 wlc->pub->unit, rate); 5936 wlc->pub->unit, rate);
6344 stf = PHY_TXC1_MODE_SDM; 5937 stf = PHY_TXC1_MODE_SDM;
6345 } 5938 }
6346 } else { 5939 } else {
@@ -6351,15 +5944,15 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
6351 if ((stf > PHY_TXC1_MODE_STBC) || 5944 if ((stf > PHY_TXC1_MODE_STBC) ||
6352 (!BRCMS_STBC_CAP_PHY(wlc) 5945 (!BRCMS_STBC_CAP_PHY(wlc)
6353 && (stf == PHY_TXC1_MODE_STBC))) { 5946 && (stf == PHY_TXC1_MODE_STBC))) {
6354 wiphy_err(wlc->wiphy, "wl%d: %s: Invalid STBC" 5947 brcms_err(core, "wl%d: %s: Invalid STBC\n",
6355 "\n", wlc->pub->unit, __func__); 5948 wlc->pub->unit, __func__);
6356 bcmerror = -EINVAL; 5949 bcmerror = -EINVAL;
6357 goto done; 5950 goto done;
6358 } 5951 }
6359 } 5952 }
6360 } else if (is_ofdm_rate(rate)) { 5953 } else if (is_ofdm_rate(rate)) {
6361 if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) { 5954 if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
6362 wiphy_err(wlc->wiphy, "wl%d: %s: Invalid OFDM\n", 5955 brcms_err(core, "wl%d: %s: Invalid OFDM\n",
6363 wlc->pub->unit, __func__); 5956 wlc->pub->unit, __func__);
6364 bcmerror = -EINVAL; 5957 bcmerror = -EINVAL;
6365 goto done; 5958 goto done;
@@ -6367,20 +5960,20 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
6367 } else if (is_cck_rate(rate)) { 5960 } else if (is_cck_rate(rate)) {
6368 if ((cur_band->bandtype != BRCM_BAND_2G) 5961 if ((cur_band->bandtype != BRCM_BAND_2G)
6369 || (stf != PHY_TXC1_MODE_SISO)) { 5962 || (stf != PHY_TXC1_MODE_SISO)) {
6370 wiphy_err(wlc->wiphy, "wl%d: %s: Invalid CCK\n", 5963 brcms_err(core, "wl%d: %s: Invalid CCK\n",
6371 wlc->pub->unit, __func__); 5964 wlc->pub->unit, __func__);
6372 bcmerror = -EINVAL; 5965 bcmerror = -EINVAL;
6373 goto done; 5966 goto done;
6374 } 5967 }
6375 } else { 5968 } else {
6376 wiphy_err(wlc->wiphy, "wl%d: %s: Unknown rate type\n", 5969 brcms_err(core, "wl%d: %s: Unknown rate type\n",
6377 wlc->pub->unit, __func__); 5970 wlc->pub->unit, __func__);
6378 bcmerror = -EINVAL; 5971 bcmerror = -EINVAL;
6379 goto done; 5972 goto done;
6380 } 5973 }
6381 /* make sure multiple antennae are available for non-siso rates */ 5974 /* make sure multiple antennae are available for non-siso rates */
6382 if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) { 5975 if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
6383 wiphy_err(wlc->wiphy, "wl%d: %s: SISO antenna but !SISO " 5976 brcms_err(core, "wl%d: %s: SISO antenna but !SISO "
6384 "request\n", wlc->pub->unit, __func__); 5977 "request\n", wlc->pub->unit, __func__);
6385 bcmerror = -EINVAL; 5978 bcmerror = -EINVAL;
6386 goto done; 5979 goto done;
@@ -6449,7 +6042,7 @@ static void brcms_c_cck_plcp_set(struct brcms_c_info *wlc, int rate_500,
6449 break; 6042 break;
6450 6043
6451 default: 6044 default:
6452 wiphy_err(wlc->wiphy, 6045 brcms_err(wlc->hw->d11core,
6453 "brcms_c_cck_plcp_set: unsupported rate %d\n", 6046 "brcms_c_cck_plcp_set: unsupported rate %d\n",
6454 rate_500); 6047 rate_500);
6455 rate_500 = BRCM_RATE_1M; 6048 rate_500 = BRCM_RATE_1M;
@@ -6582,7 +6175,7 @@ static u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, u32 rspec)
6582 bw = rspec_get_bw(rspec); 6175 bw = rspec_get_bw(rspec);
6583 /* 10Mhz is not supported yet */ 6176 /* 10Mhz is not supported yet */
6584 if (bw < PHY_TXC1_BW_20MHZ) { 6177 if (bw < PHY_TXC1_BW_20MHZ) {
6585 wiphy_err(wlc->wiphy, "phytxctl1_calc: bw %d is " 6178 brcms_err(wlc->hw->d11core, "phytxctl1_calc: bw %d is "
6586 "not supported yet, set to 20L\n", bw); 6179 "not supported yet, set to 20L\n", bw);
6587 bw = PHY_TXC1_BW_20MHZ; 6180 bw = PHY_TXC1_BW_20MHZ;
6588 } 6181 }
@@ -6609,7 +6202,7 @@ static u16 brcms_c_phytxctl1_calc(struct brcms_c_info *wlc, u32 rspec)
6609 /* get the phyctl byte from rate phycfg table */ 6202 /* get the phyctl byte from rate phycfg table */
6610 phycfg = brcms_c_rate_legacy_phyctl(rspec2rate(rspec)); 6203 phycfg = brcms_c_rate_legacy_phyctl(rspec2rate(rspec));
6611 if (phycfg == -1) { 6204 if (phycfg == -1) {
6612 wiphy_err(wlc->wiphy, "phytxctl1_calc: wrong " 6205 brcms_err(wlc->hw->d11core, "phytxctl1_calc: wrong "
6613 "legacy OFDM/CCK rate\n"); 6206 "legacy OFDM/CCK rate\n");
6614 phycfg = 0; 6207 phycfg = 0;
6615 } 6208 }
@@ -6689,8 +6282,9 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
6689 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 6282 if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
6690 /* non-AP STA should never use BCMC queue */ 6283 /* non-AP STA should never use BCMC queue */
6691 if (queue == TX_BCMC_FIFO) { 6284 if (queue == TX_BCMC_FIFO) {
6692 wiphy_err(wlc->wiphy, "wl%d: %s: ASSERT queue == " 6285 brcms_err(wlc->hw->d11core,
6693 "TX_BCMC!\n", wlc->pub->unit, __func__); 6286 "wl%d: %s: ASSERT queue == TX_BCMC!\n",
6287 wlc->pub->unit, __func__);
6694 frameid = bcmc_fid_generate(wlc, NULL, txh); 6288 frameid = bcmc_fid_generate(wlc, NULL, txh);
6695 } else { 6289 } else {
6696 /* Increment the counter for first fragment */ 6290 /* Increment the counter for first fragment */
@@ -6860,7 +6454,8 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
6860 6454
6861 if ((txrate[k]->flags & IEEE80211_TX_RC_MCS) 6455 if ((txrate[k]->flags & IEEE80211_TX_RC_MCS)
6862 && (!is_mcs_rate(rspec[k]))) { 6456 && (!is_mcs_rate(rspec[k]))) {
6863 wiphy_err(wlc->wiphy, "wl%d: %s: IEEE80211_TX_" 6457 brcms_err(wlc->hw->d11core,
6458 "wl%d: %s: IEEE80211_TX_"
6864 "RC_MCS != is_mcs_rate(rspec)\n", 6459 "RC_MCS != is_mcs_rate(rspec)\n",
6865 wlc->pub->unit, __func__); 6460 wlc->pub->unit, __func__);
6866 } 6461 }
@@ -7254,14 +6849,16 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
7254 wlc->fragthresh[queue] = 6849 wlc->fragthresh[queue] =
7255 (u16) newfragthresh; 6850 (u16) newfragthresh;
7256 } else { 6851 } else {
7257 wiphy_err(wlc->wiphy, "wl%d: %s txop invalid " 6852 brcms_err(wlc->hw->d11core,
6853 "wl%d: %s txop invalid "
7258 "for rate %d\n", 6854 "for rate %d\n",
7259 wlc->pub->unit, fifo_names[queue], 6855 wlc->pub->unit, fifo_names[queue],
7260 rspec2rate(rspec[0])); 6856 rspec2rate(rspec[0]));
7261 } 6857 }
7262 6858
7263 if (dur > wlc->edcf_txop[ac]) 6859 if (dur > wlc->edcf_txop[ac])
7264 wiphy_err(wlc->wiphy, "wl%d: %s: %s txop " 6860 brcms_err(wlc->hw->d11core,
6861 "wl%d: %s: %s txop "
7265 "exceeded phylen %d/%d dur %d/%d\n", 6862 "exceeded phylen %d/%d dur %d/%d\n",
7266 wlc->pub->unit, __func__, 6863 wlc->pub->unit, __func__,
7267 fifo_names[queue], 6864 fifo_names[queue],
@@ -7273,79 +6870,33 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
7273 return 0; 6870 return 0;
7274} 6871}
7275 6872
7276void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu, 6873static int brcms_c_tx(struct brcms_c_info *wlc, struct sk_buff *skb)
7277 struct ieee80211_hw *hw)
7278{ 6874{
7279 u8 prio; 6875 struct dma_pub *dma;
7280 uint fifo; 6876 int fifo, ret = -ENOSPC;
7281 struct scb *scb = &wlc->pri_scb; 6877 struct d11txh *txh;
7282 struct ieee80211_hdr *d11_header = (struct ieee80211_hdr *)(sdu->data); 6878 u16 frameid = INVALIDFID;
7283
7284 /*
7285 * 802.11 standard requires management traffic
7286 * to go at highest priority
7287 */
7288 prio = ieee80211_is_data(d11_header->frame_control) ? sdu->priority :
7289 MAXPRIO;
7290 fifo = prio2fifo[prio];
7291 if (brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0))
7292 return;
7293 brcms_c_txq_enq(wlc, scb, sdu, BRCMS_PRIO_TO_PREC(prio));
7294 brcms_c_send_q(wlc);
7295}
7296
7297void brcms_c_send_q(struct brcms_c_info *wlc)
7298{
7299 struct sk_buff *pkt[DOT11_MAXNUMFRAGS];
7300 int prec;
7301 u16 prec_map;
7302 int err = 0, i, count;
7303 uint fifo;
7304 struct brcms_txq_info *qi = wlc->pkt_queue;
7305 struct pktq *q = &qi->q;
7306 struct ieee80211_tx_info *tx_info;
7307 6879
7308 prec_map = wlc->tx_prec_map; 6880 fifo = brcms_ac_to_fifo(skb_get_queue_mapping(skb));
6881 dma = wlc->hw->di[fifo];
6882 txh = (struct d11txh *)(skb->data);
7309 6883
7310 /* Send all the enq'd pkts that we can. 6884 if (dma->txavail == 0) {
7311 * Dequeue packets with precedence with empty HW fifo only 6885 /*
7312 */ 6886 * We sometimes get a frame from mac80211 after stopping
7313 while (prec_map && (pkt[0] = brcmu_pktq_mdeq(q, prec_map, &prec))) { 6887 * the queues. This only ever seems to be a single frame
7314 tx_info = IEEE80211_SKB_CB(pkt[0]); 6888 * and is seems likely to be a race. TX_HEADROOM should
7315 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 6889 * ensure that we have enough space to handle these stray
7316 err = brcms_c_sendampdu(wlc->ampdu, qi, pkt, prec); 6890 * packets, so warn if there isn't. If we're out of space
7317 } else { 6891 * in the tx ring and the tx queue isn't stopped then
7318 count = 1; 6892 * we've really got a bug; warn loudly if that happens.
7319 err = brcms_c_prep_pdu(wlc, pkt[0], &fifo); 6893 */
7320 if (!err) { 6894 brcms_warn(wlc->hw->d11core,
7321 for (i = 0; i < count; i++) 6895 "Received frame for tx with no space in DMA ring\n");
7322 brcms_c_txfifo(wlc, fifo, pkt[i], true, 6896 WARN_ON(!ieee80211_queue_stopped(wlc->pub->ieee_hw,
7323 1); 6897 skb_get_queue_mapping(skb)));
7324 } 6898 return -ENOSPC;
7325 }
7326
7327 if (err == -EBUSY) {
7328 brcmu_pktq_penq_head(q, prec, pkt[0]);
7329 /*
7330 * If send failed due to any other reason than a
7331 * change in HW FIFO condition, quit. Otherwise,
7332 * read the new prec_map!
7333 */
7334 if (prec_map == wlc->tx_prec_map)
7335 break;
7336 prec_map = wlc->tx_prec_map;
7337 }
7338 } 6899 }
7339}
7340
7341void
7342brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
7343 bool commit, s8 txpktpend)
7344{
7345 u16 frameid = INVALIDFID;
7346 struct d11txh *txh;
7347
7348 txh = (struct d11txh *) (p->data);
7349 6900
7350 /* When a BC/MC frame is being committed to the BCMC fifo 6901 /* When a BC/MC frame is being committed to the BCMC fifo
7351 * via DMA (NOT PIO), update ucode or BSS info as appropriate. 6902 * via DMA (NOT PIO), update ucode or BSS info as appropriate.
@@ -7353,16 +6904,6 @@ brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
7353 if (fifo == TX_BCMC_FIFO) 6904 if (fifo == TX_BCMC_FIFO)
7354 frameid = le16_to_cpu(txh->TxFrameID); 6905 frameid = le16_to_cpu(txh->TxFrameID);
7355 6906
7356 /*
7357 * Bump up pending count for if not using rpc. If rpc is
7358 * used, this will be handled in brcms_b_txfifo()
7359 */
7360 if (commit) {
7361 wlc->core->txpktpend[fifo] += txpktpend;
7362 BCMMSG(wlc->wiphy, "pktpend inc %d to %d\n",
7363 txpktpend, wlc->core->txpktpend[fifo]);
7364 }
7365
7366 /* Commit BCMC sequence number in the SHM frame ID location */ 6907 /* Commit BCMC sequence number in the SHM frame ID location */
7367 if (frameid != INVALIDFID) { 6908 if (frameid != INVALIDFID) {
7368 /* 6909 /*
@@ -7372,8 +6913,55 @@ brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p,
7372 brcms_b_write_shm(wlc->hw, M_BCMC_FID, frameid); 6913 brcms_b_write_shm(wlc->hw, M_BCMC_FID, frameid);
7373 } 6914 }
7374 6915
7375 if (dma_txfast(wlc->hw->di[fifo], p, commit) < 0) 6916 ret = brcms_c_txfifo(wlc, fifo, skb);
6917 /*
6918 * The only reason for brcms_c_txfifo to fail is because
6919 * there weren't any DMA descriptors, but we've already
6920 * checked for that. So if it does fail yell loudly.
6921 */
6922 WARN_ON_ONCE(ret);
6923
6924 return ret;
6925}
6926
6927bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
6928 struct ieee80211_hw *hw)
6929{
6930 uint fifo;
6931 struct scb *scb = &wlc->pri_scb;
6932
6933 fifo = brcms_ac_to_fifo(skb_get_queue_mapping(sdu));
6934 brcms_c_d11hdrs_mac80211(wlc, hw, sdu, scb, 0, 1, fifo, 0);
6935 if (!brcms_c_tx(wlc, sdu))
6936 return true;
6937
6938 /* packet discarded */
6939 dev_kfree_skb_any(sdu);
6940 return false;
6941}
6942
6943int
6944brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p)
6945{
6946 struct dma_pub *dma = wlc->hw->di[fifo];
6947 int ret;
6948 u16 queue;
6949
6950 ret = dma_txfast(wlc, dma, p);
6951 if (ret < 0)
7376 wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n"); 6952 wiphy_err(wlc->wiphy, "txfifo: fatal, toss frames !!!\n");
6953
6954 /*
6955 * Stop queue if DMA ring is full. Reserve some free descriptors,
6956 * as we sometimes receive a frame from mac80211 after the queues
6957 * are stopped.
6958 */
6959 queue = skb_get_queue_mapping(p);
6960 if (dma->txavail <= TX_HEADROOM && fifo < TX_BCMC_FIFO &&
6961 !ieee80211_queue_stopped(wlc->pub->ieee_hw, queue))
6962 ieee80211_stop_queue(wlc->pub->ieee_hw, queue);
6963
6964 return ret;
7377} 6965}
7378 6966
7379u32 6967u32
@@ -7423,19 +7011,6 @@ brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
7423 return rts_rspec; 7011 return rts_rspec;
7424} 7012}
7425 7013
7426void
7427brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo, s8 txpktpend)
7428{
7429 wlc->core->txpktpend[fifo] -= txpktpend;
7430 BCMMSG(wlc->wiphy, "pktpend dec %d to %d\n", txpktpend,
7431 wlc->core->txpktpend[fifo]);
7432
7433 /* There is more room; mark precedences related to this FIFO sendable */
7434 wlc->tx_prec_map |= wlc->fifo2prec_map[fifo];
7435
7436 /* figure out which bsscfg is being worked on... */
7437}
7438
7439/* Update beacon listen interval in shared memory */ 7014/* Update beacon listen interval in shared memory */
7440static void brcms_c_bcn_li_upd(struct brcms_c_info *wlc) 7015static void brcms_c_bcn_li_upd(struct brcms_c_info *wlc)
7441{ 7016{
@@ -7508,7 +7083,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7508 7083
7509 /* fill in TSF and flag its presence */ 7084 /* fill in TSF and flag its presence */
7510 rx_status->mactime = brcms_c_recover_tsf64(wlc, rxh); 7085 rx_status->mactime = brcms_c_recover_tsf64(wlc, rxh);
7511 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 7086 rx_status->flag |= RX_FLAG_MACTIME_START;
7512 7087
7513 channel = BRCMS_CHAN_CHANNEL(rxh->RxChan); 7088 channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
7514 7089
@@ -7571,7 +7146,8 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7571 rx_status->rate_idx = 11; 7146 rx_status->rate_idx = 11;
7572 break; 7147 break;
7573 default: 7148 default:
7574 wiphy_err(wlc->wiphy, "%s: Unknown rate\n", __func__); 7149 brcms_err(wlc->hw->d11core,
7150 "%s: Unknown rate\n", __func__);
7575 } 7151 }
7576 7152
7577 /* 7153 /*
@@ -7590,7 +7166,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7590 } else if (is_ofdm_rate(rspec)) { 7166 } else if (is_ofdm_rate(rspec)) {
7591 rx_status->flag |= RX_FLAG_SHORTPRE; 7167 rx_status->flag |= RX_FLAG_SHORTPRE;
7592 } else { 7168 } else {
7593 wiphy_err(wlc->wiphy, "%s: Unknown modulation\n", 7169 brcms_err(wlc->hw->d11core, "%s: Unknown modulation\n",
7594 __func__); 7170 __func__);
7595 } 7171 }
7596 } 7172 }
@@ -7600,12 +7176,12 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
7600 7176
7601 if (rxh->RxStatus1 & RXS_DECERR) { 7177 if (rxh->RxStatus1 & RXS_DECERR) {
7602 rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; 7178 rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
7603 wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_PLCP_CRC\n", 7179 brcms_err(wlc->hw->d11core, "%s: RX_FLAG_FAILED_PLCP_CRC\n",
7604 __func__); 7180 __func__);
7605 } 7181 }
7606 if (rxh->RxStatus1 & RXS_FCSERR) { 7182 if (rxh->RxStatus1 & RXS_FCSERR) {
7607 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; 7183 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
7608 wiphy_err(wlc->wiphy, "%s: RX_FLAG_FAILED_FCS_CRC\n", 7184 brcms_err(wlc->hw->d11core, "%s: RX_FLAG_FAILED_FCS_CRC\n",
7609 __func__); 7185 __func__);
7610 } 7186 }
7611} 7187}
@@ -7649,9 +7225,6 @@ brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
7649{ 7225{
7650 uint nsyms, len = 0, kNdps; 7226 uint nsyms, len = 0, kNdps;
7651 7227
7652 BCMMSG(wlc->wiphy, "wl%d: rate %d, len%d\n",
7653 wlc->pub->unit, rspec2rate(ratespec), mac_len);
7654
7655 if (is_mcs_rate(ratespec)) { 7228 if (is_mcs_rate(ratespec)) {
7656 uint mcs = ratespec & RSPEC_RATE_MASK; 7229 uint mcs = ratespec & RSPEC_RATE_MASK;
7657 int tot_streams = (mcs_2_txstreams(mcs) + 1) + 7230 int tot_streams = (mcs_2_txstreams(mcs) + 1) +
@@ -7883,35 +7456,6 @@ void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend)
7883 brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend); 7456 brcms_c_bss_update_probe_resp(wlc, bsscfg, suspend);
7884} 7457}
7885 7458
7886/* prepares pdu for transmission. returns BCM error codes */
7887int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu, uint *fifop)
7888{
7889 uint fifo;
7890 struct d11txh *txh;
7891 struct ieee80211_hdr *h;
7892 struct scb *scb;
7893
7894 txh = (struct d11txh *) (pdu->data);
7895 h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
7896
7897 /* get the pkt queue info. This was put at brcms_c_sendctl or
7898 * brcms_c_send for PDU */
7899 fifo = le16_to_cpu(txh->TxFrameID) & TXFID_QUEUE_MASK;
7900
7901 scb = NULL;
7902
7903 *fifop = fifo;
7904
7905 /* return if insufficient dma resources */
7906 if (*wlc->core->txavail[fifo] < MAX_DMA_SEGS) {
7907 /* Mark precedences related to this FIFO, unsendable */
7908 /* A fifo is full. Clear precedences related to that FIFO */
7909 wlc->tx_prec_map &= ~(wlc->fifo2prec_map[fifo]);
7910 return -EBUSY;
7911 }
7912 return 0;
7913}
7914
7915int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo, 7459int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
7916 uint *blocks) 7460 uint *blocks)
7917{ 7461{
@@ -7977,13 +7521,15 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
7977void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop) 7521void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
7978{ 7522{
7979 int timeout = 20; 7523 int timeout = 20;
7524 int i;
7980 7525
7981 /* flush packet queue when requested */ 7526 /* Kick DMA to send any pending AMPDU */
7982 if (drop) 7527 for (i = 0; i < ARRAY_SIZE(wlc->hw->di); i++)
7983 brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL); 7528 if (wlc->hw->di[i])
7529 dma_txflush(wlc->hw->di[i]);
7984 7530
7985 /* wait for queue and DMA fifos to run dry */ 7531 /* wait for queue and DMA fifos to run dry */
7986 while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) { 7532 while (brcms_txpktpendtot(wlc) > 0) {
7987 brcms_msleep(wlc->wl, 1); 7533 brcms_msleep(wlc->wl, 1);
7988 7534
7989 if (--timeout == 0) 7535 if (--timeout == 0)
@@ -8032,8 +7578,6 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
8032 uint len; 7578 uint len;
8033 bool is_amsdu; 7579 bool is_amsdu;
8034 7580
8035 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
8036
8037 /* frame starts with rxhdr */ 7581 /* frame starts with rxhdr */
8038 rxh = (struct d11rxhdr *) (p->data); 7582 rxh = (struct d11rxhdr *) (p->data);
8039 7583
@@ -8043,8 +7587,9 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
8043 /* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */ 7587 /* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU subframes */
8044 if (rxh->RxStatus1 & RXS_PBPRES) { 7588 if (rxh->RxStatus1 & RXS_PBPRES) {
8045 if (p->len < 2) { 7589 if (p->len < 2) {
8046 wiphy_err(wlc->wiphy, "wl%d: recv: rcvd runt of " 7590 brcms_err(wlc->hw->d11core,
8047 "len %d\n", wlc->pub->unit, p->len); 7591 "wl%d: recv: rcvd runt of len %d\n",
7592 wlc->pub->unit, p->len);
8048 goto toss; 7593 goto toss;
8049 } 7594 }
8050 skb_pull(p, 2); 7595 skb_pull(p, 2);
@@ -8088,17 +7633,19 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
8088 7633
8089 uint n = 0; 7634 uint n = 0;
8090 uint bound_limit = bound ? RXBND : -1; 7635 uint bound_limit = bound ? RXBND : -1;
7636 bool morepending;
8091 7637
8092 BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
8093 skb_queue_head_init(&recv_frames); 7638 skb_queue_head_init(&recv_frames);
8094 7639
8095 /* gather received frames */ 7640 /* gather received frames */
8096 while (dma_rx(wlc_hw->di[fifo], &recv_frames)) { 7641 do {
8097
8098 /* !give others some time to run! */ 7642 /* !give others some time to run! */
8099 if (++n >= bound_limit) 7643 if (n >= bound_limit)
8100 break; 7644 break;
8101 } 7645
7646 morepending = dma_rx(wlc_hw->di[fifo], &recv_frames);
7647 n++;
7648 } while (morepending);
8102 7649
8103 /* post more rbufs */ 7650 /* post more rbufs */
8104 dma_rxfill(wlc_hw->di[fifo]); 7651 dma_rxfill(wlc_hw->di[fifo]);
@@ -8128,7 +7675,7 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
8128 brcms_c_recv(wlc_hw->wlc, p); 7675 brcms_c_recv(wlc_hw->wlc, p);
8129 } 7676 }
8130 7677
8131 return n >= bound_limit; 7678 return morepending;
8132} 7679}
8133 7680
8134/* second-level interrupt processing 7681/* second-level interrupt processing
@@ -8140,10 +7687,9 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8140 u32 macintstatus; 7687 u32 macintstatus;
8141 struct brcms_hardware *wlc_hw = wlc->hw; 7688 struct brcms_hardware *wlc_hw = wlc->hw;
8142 struct bcma_device *core = wlc_hw->d11core; 7689 struct bcma_device *core = wlc_hw->d11core;
8143 struct wiphy *wiphy = wlc->wiphy;
8144 7690
8145 if (brcms_deviceremoved(wlc)) { 7691 if (brcms_deviceremoved(wlc)) {
8146 wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit, 7692 brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit,
8147 __func__); 7693 __func__);
8148 brcms_down(wlc->wl); 7694 brcms_down(wlc->wl);
8149 return false; 7695 return false;
@@ -8153,8 +7699,8 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8153 macintstatus = wlc->macintstatus; 7699 macintstatus = wlc->macintstatus;
8154 wlc->macintstatus = 0; 7700 wlc->macintstatus = 0;
8155 7701
8156 BCMMSG(wlc->wiphy, "wl%d: macintstatus 0x%x\n", 7702 brcms_dbg_int(core, "wl%d: macintstatus 0x%x\n",
8157 wlc_hw->unit, macintstatus); 7703 wlc_hw->unit, macintstatus);
8158 7704
8159 WARN_ON(macintstatus & MI_PRQ); /* PRQ Interrupt in non-MBSS */ 7705 WARN_ON(macintstatus & MI_PRQ); /* PRQ Interrupt in non-MBSS */
8160 7706
@@ -8164,7 +7710,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8164 if (brcms_b_txstatus(wlc->hw, bounded, &fatal)) 7710 if (brcms_b_txstatus(wlc->hw, bounded, &fatal))
8165 wlc->macintstatus |= MI_TFS; 7711 wlc->macintstatus |= MI_TFS;
8166 if (fatal) { 7712 if (fatal) {
8167 wiphy_err(wiphy, "MI_TFS: fatal\n"); 7713 brcms_err(core, "MI_TFS: fatal\n");
8168 goto fatal; 7714 goto fatal;
8169 } 7715 }
8170 } 7716 }
@@ -8174,7 +7720,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8174 7720
8175 /* ATIM window end */ 7721 /* ATIM window end */
8176 if (macintstatus & MI_ATIMWINEND) { 7722 if (macintstatus & MI_ATIMWINEND) {
8177 BCMMSG(wlc->wiphy, "end of ATIM window\n"); 7723 brcms_dbg_info(core, "end of ATIM window\n");
8178 bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid); 7724 bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
8179 wlc->qvalid = 0; 7725 wlc->qvalid = 0;
8180 } 7726 }
@@ -8192,7 +7738,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8192 wlc_phy_noise_sample_intr(wlc_hw->band->pi); 7738 wlc_phy_noise_sample_intr(wlc_hw->band->pi);
8193 7739
8194 if (macintstatus & MI_GP0) { 7740 if (macintstatus & MI_GP0) {
8195 wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d " 7741 brcms_err(core, "wl%d: PSM microcode watchdog fired at %d "
8196 "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now); 7742 "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
8197 7743
8198 printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n", 7744 printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
@@ -8206,15 +7752,11 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
8206 bcma_write32(core, D11REGOFFS(gptimer), 0); 7752 bcma_write32(core, D11REGOFFS(gptimer), 0);
8207 7753
8208 if (macintstatus & MI_RFDISABLE) { 7754 if (macintstatus & MI_RFDISABLE) {
8209 BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the" 7755 brcms_dbg_info(core, "wl%d: BMAC Detected a change on the"
8210 " RF Disable Input\n", wlc_hw->unit); 7756 " RF Disable Input\n", wlc_hw->unit);
8211 brcms_rfkill_set_hw_state(wlc->wl); 7757 brcms_rfkill_set_hw_state(wlc->wl);
8212 } 7758 }
8213 7759
8214 /* send any enq'd tx packets. Just makes sure to jump start tx */
8215 if (!pktq_empty(&wlc->pkt_queue->q))
8216 brcms_c_send_q(wlc);
8217
8218 /* it isn't done and needs to be resched if macintstatus is non-zero */ 7760 /* it isn't done and needs to be resched if macintstatus is non-zero */
8219 return wlc->macintstatus != 0; 7761 return wlc->macintstatus != 0;
8220 7762
@@ -8229,7 +7771,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8229 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; 7771 struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
8230 u16 chanspec; 7772 u16 chanspec;
8231 7773
8232 BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit); 7774 brcms_dbg_info(core, "wl%d\n", wlc->pub->unit);
8233 7775
8234 chanspec = ch20mhz_chspec(ch->hw_value); 7776 chanspec = ch20mhz_chspec(ch->hw_value);
8235 7777
@@ -8286,9 +7828,6 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8286 bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF); 7828 bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
8287 brcms_c_edcf_setparams(wlc, false); 7829 brcms_c_edcf_setparams(wlc, false);
8288 7830
8289 /* Init precedence maps for empty FIFOs */
8290 brcms_c_tx_prec_map_init(wlc);
8291
8292 /* read the ucode version if we have not yet done so */ 7831 /* read the ucode version if we have not yet done so */
8293 if (wlc->ucode_rev == 0) { 7832 if (wlc->ucode_rev == 0) {
8294 wlc->ucode_rev = 7833 wlc->ucode_rev =
@@ -8303,9 +7842,6 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
8303 if (mute_tx) 7842 if (mute_tx)
8304 brcms_b_mute(wlc->hw, true); 7843 brcms_b_mute(wlc->hw, true);
8305 7844
8306 /* clear tx flow control */
8307 brcms_c_txflowcontrol_reset(wlc);
8308
8309 /* enable the RF Disable Delay timer */ 7845 /* enable the RF Disable Delay timer */
8310 bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT); 7846 bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
8311 7847
@@ -8464,15 +8000,6 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
8464 * Complete the wlc default state initializations.. 8000 * Complete the wlc default state initializations..
8465 */ 8001 */
8466 8002
8467 /* allocate our initial queue */
8468 wlc->pkt_queue = brcms_c_txq_alloc(wlc);
8469 if (wlc->pkt_queue == NULL) {
8470 wiphy_err(wl->wiphy, "wl%d: %s: failed to malloc tx queue\n",
8471 unit, __func__);
8472 err = 100;
8473 goto fail;
8474 }
8475
8476 wlc->bsscfg->wlc = wlc; 8003 wlc->bsscfg->wlc = wlc;
8477 8004
8478 wlc->mimoft = FT_HT; 8005 wlc->mimoft = FT_HT;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index 8debc74c54e1..fb447747c2c6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -101,9 +101,6 @@
101 101
102#define DATA_BLOCK_TX_SUPR (1 << 4) 102#define DATA_BLOCK_TX_SUPR (1 << 4)
103 103
104/* 802.1D Priority to TX FIFO number for wme */
105extern const u8 prio2fifo[];
106
107/* Ucode MCTL_WAKE override bits */ 104/* Ucode MCTL_WAKE override bits */
108#define BRCMS_WAKE_OVERRIDE_CLKCTL 0x01 105#define BRCMS_WAKE_OVERRIDE_CLKCTL 0x01
109#define BRCMS_WAKE_OVERRIDE_PHYREG 0x02 106#define BRCMS_WAKE_OVERRIDE_PHYREG 0x02
@@ -242,7 +239,6 @@ struct brcms_core {
242 239
243 /* fifo */ 240 /* fifo */
244 uint *txavail[NFIFO]; /* # tx descriptors available */ 241 uint *txavail[NFIFO]; /* # tx descriptors available */
245 s16 txpktpend[NFIFO]; /* tx admission control */
246 242
247 struct macstat *macstat_snapshot; /* mac hw prev read values */ 243 struct macstat *macstat_snapshot; /* mac hw prev read values */
248}; 244};
@@ -382,19 +378,6 @@ struct brcms_hardware {
382 */ 378 */
383}; 379};
384 380
385/* TX Queue information
386 *
387 * Each flow of traffic out of the device has a TX Queue with independent
388 * flow control. Several interfaces may be associated with a single TX Queue
389 * if they belong to the same flow of traffic from the device. For multi-channel
390 * operation there are independent TX Queues for each channel.
391 */
392struct brcms_txq_info {
393 struct brcms_txq_info *next;
394 struct pktq q;
395 uint stopped; /* tx flow control bits */
396};
397
398/* 381/*
399 * Principal common driver data structure. 382 * Principal common driver data structure.
400 * 383 *
@@ -435,11 +418,8 @@ struct brcms_txq_info {
435 * WDlast: last time wlc_watchdog() was called. 418 * WDlast: last time wlc_watchdog() was called.
436 * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac. 419 * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
437 * wme_retries: per-AC retry limits. 420 * wme_retries: per-AC retry limits.
438 * tx_prec_map: Precedence map based on HW FIFO space.
439 * fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
440 * bsscfg: set of BSS configurations, idx 0 is default and always valid. 421 * bsscfg: set of BSS configurations, idx 0 is default and always valid.
441 * cfg: the primary bsscfg (can be AP or STA). 422 * cfg: the primary bsscfg (can be AP or STA).
442 * tx_queues: common TX Queue list.
443 * modulecb: 423 * modulecb:
444 * mimoft: SIGN or 11N. 424 * mimoft: SIGN or 11N.
445 * cck_40txbw: 11N, cck tx b/w override when in 40MHZ mode. 425 * cck_40txbw: 11N, cck tx b/w override when in 40MHZ mode.
@@ -469,7 +449,6 @@ struct brcms_txq_info {
469 * tempsense_lasttime; 449 * tempsense_lasttime;
470 * tx_duty_cycle_ofdm: maximum allowed duty cycle for OFDM. 450 * tx_duty_cycle_ofdm: maximum allowed duty cycle for OFDM.
471 * tx_duty_cycle_cck: maximum allowed duty cycle for CCK. 451 * tx_duty_cycle_cck: maximum allowed duty cycle for CCK.
472 * pkt_queue: txq for transmit packets.
473 * wiphy: 452 * wiphy:
474 * pri_scb: primary Station Control Block 453 * pri_scb: primary Station Control Block
475 */ 454 */
@@ -533,14 +512,9 @@ struct brcms_c_info {
533 u16 edcf_txop[IEEE80211_NUM_ACS]; 512 u16 edcf_txop[IEEE80211_NUM_ACS];
534 513
535 u16 wme_retries[IEEE80211_NUM_ACS]; 514 u16 wme_retries[IEEE80211_NUM_ACS];
536 u16 tx_prec_map;
537 u16 fifo2prec_map[NFIFO];
538 515
539 struct brcms_bss_cfg *bsscfg; 516 struct brcms_bss_cfg *bsscfg;
540 517
541 /* tx queue */
542 struct brcms_txq_info *tx_queues;
543
544 struct modulecb *modulecb; 518 struct modulecb *modulecb;
545 519
546 u8 mimoft; 520 u8 mimoft;
@@ -585,7 +559,6 @@ struct brcms_c_info {
585 u16 tx_duty_cycle_ofdm; 559 u16 tx_duty_cycle_ofdm;
586 u16 tx_duty_cycle_cck; 560 u16 tx_duty_cycle_cck;
587 561
588 struct brcms_txq_info *pkt_queue;
589 struct wiphy *wiphy; 562 struct wiphy *wiphy;
590 struct scb pri_scb; 563 struct scb pri_scb;
591}; 564};
@@ -637,30 +610,13 @@ struct brcms_bss_cfg {
637 struct brcms_bss_info *current_bss; 610 struct brcms_bss_info *current_bss;
638}; 611};
639 612
640extern void brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, 613extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
641 struct sk_buff *p, 614 struct sk_buff *p);
642 bool commit, s8 txpktpend);
643extern void brcms_c_txfifo_complete(struct brcms_c_info *wlc, uint fifo,
644 s8 txpktpend);
645extern void brcms_c_txq_enq(struct brcms_c_info *wlc, struct scb *scb,
646 struct sk_buff *sdu, uint prec);
647extern void brcms_c_print_txstatus(struct tx_status *txs);
648extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo, 615extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
649 uint *blocks); 616 uint *blocks);
650 617
651#if defined(DEBUG)
652extern void brcms_c_print_txdesc(struct d11txh *txh);
653#else
654static inline void brcms_c_print_txdesc(struct d11txh *txh)
655{
656}
657#endif
658
659extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config); 618extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
660extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags); 619extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
661extern void brcms_c_send_q(struct brcms_c_info *wlc);
662extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
663 uint *fifo);
664extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, 620extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
665 uint mac_len); 621 uint mac_len);
666extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, 622extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index abfd78822fb8..606b534347bc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
1137 gain0_15 = ((biq1 & 0xf) << 12) | 1137 gain0_15 = ((biq1 & 0xf) << 12) |
1138 ((tia & 0xf) << 8) | 1138 ((tia & 0xf) << 8) |
1139 ((lna2 & 0x3) << 6) | 1139 ((lna2 & 0x3) << 6) |
1140 ((lna2 & 1140 ((lna2 & 0x3) << 4) |
1141 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); 1141 ((lna1 & 0x3) << 2) |
1142 ((lna1 & 0x3) << 0);
1142 1143
1143 mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); 1144 mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
1144 mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); 1145 mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1156,6 +1157,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
1156 } 1157 }
1157 1158
1158 mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); 1159 mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
1160 mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
1161 mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
1159 1162
1160} 1163}
1161 1164
@@ -1328,6 +1331,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
1328 return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; 1331 return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
1329} 1332}
1330 1333
1334static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
1335 u16 tia_gain, u16 lna2_gain)
1336{
1337 u32 i_thresh_l, q_thresh_l;
1338 u32 i_thresh_h, q_thresh_h;
1339 struct lcnphy_iq_est iq_est_h, iq_est_l;
1340
1341 wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
1342 lna2_gain, 0);
1343
1344 wlc_lcnphy_rx_gain_override_enable(pi, true);
1345 wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
1346 usleep_range(500, 500);
1347 write_radio_reg(pi, RADIO_2064_REG112, 0);
1348 if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
1349 return false;
1350
1351 wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
1352 usleep_range(500, 500);
1353 write_radio_reg(pi, RADIO_2064_REG112, 0);
1354 if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
1355 return false;
1356
1357 i_thresh_l = (iq_est_l.i_pwr << 1);
1358 i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
1359
1360 q_thresh_l = (iq_est_l.q_pwr << 1);
1361 q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
1362 if ((iq_est_h.i_pwr > i_thresh_l) &&
1363 (iq_est_h.i_pwr < i_thresh_h) &&
1364 (iq_est_h.q_pwr > q_thresh_l) &&
1365 (iq_est_h.q_pwr < q_thresh_h))
1366 return true;
1367
1368 return false;
1369}
1370
1331static bool 1371static bool
1332wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, 1372wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1333 const struct lcnphy_rx_iqcomp *iqcomp, 1373 const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1342,8 +1382,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1342 RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, 1382 RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
1343 rfoverride3_old, rfoverride3val_old, rfoverride4_old, 1383 rfoverride3_old, rfoverride3val_old, rfoverride4_old,
1344 rfoverride4val_old, afectrlovr_old, afectrlovrval_old; 1384 rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
1345 int tia_gain; 1385 int tia_gain, lna2_gain, biq1_gain;
1346 u32 received_power, rx_pwr_threshold; 1386 bool set_gain;
1347 u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; 1387 u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
1348 u16 values_to_save[11]; 1388 u16 values_to_save[11];
1349 s16 *ptr; 1389 s16 *ptr;
@@ -1368,126 +1408,134 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
1368 goto cal_done; 1408 goto cal_done;
1369 } 1409 }
1370 1410
1371 if (module == 1) { 1411 WARN_ON(module != 1);
1412 tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
1413 wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
1372 1414
1373 tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); 1415 for (i = 0; i < 11; i++)
1374 wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); 1416 values_to_save[i] =
1417 read_radio_reg(pi, rxiq_cal_rf_reg[i]);
1418 Core1TxControl_old = read_phy_reg(pi, 0x631);
1419
1420 or_phy_reg(pi, 0x631, 0x0015);
1421
1422 RFOverride0_old = read_phy_reg(pi, 0x44c);
1423 RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
1424 rfoverride2_old = read_phy_reg(pi, 0x4b0);
1425 rfoverride2val_old = read_phy_reg(pi, 0x4b1);
1426 rfoverride3_old = read_phy_reg(pi, 0x4f9);
1427 rfoverride3val_old = read_phy_reg(pi, 0x4fa);
1428 rfoverride4_old = read_phy_reg(pi, 0x938);
1429 rfoverride4val_old = read_phy_reg(pi, 0x939);
1430 afectrlovr_old = read_phy_reg(pi, 0x43b);
1431 afectrlovrval_old = read_phy_reg(pi, 0x43c);
1432 old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
1433 old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
1375 1434
1376 for (i = 0; i < 11; i++) 1435 tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
1377 values_to_save[i] = 1436 if (tx_gain_override_old) {
1378 read_radio_reg(pi, rxiq_cal_rf_reg[i]); 1437 wlc_lcnphy_get_tx_gain(pi, &old_gains);
1379 Core1TxControl_old = read_phy_reg(pi, 0x631); 1438 tx_gain_index_old = pi_lcn->lcnphy_current_index;
1380 1439 }
1381 or_phy_reg(pi, 0x631, 0x0015);
1382
1383 RFOverride0_old = read_phy_reg(pi, 0x44c);
1384 RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
1385 rfoverride2_old = read_phy_reg(pi, 0x4b0);
1386 rfoverride2val_old = read_phy_reg(pi, 0x4b1);
1387 rfoverride3_old = read_phy_reg(pi, 0x4f9);
1388 rfoverride3val_old = read_phy_reg(pi, 0x4fa);
1389 rfoverride4_old = read_phy_reg(pi, 0x938);
1390 rfoverride4val_old = read_phy_reg(pi, 0x939);
1391 afectrlovr_old = read_phy_reg(pi, 0x43b);
1392 afectrlovrval_old = read_phy_reg(pi, 0x43c);
1393 old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
1394 old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
1395
1396 tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
1397 if (tx_gain_override_old) {
1398 wlc_lcnphy_get_tx_gain(pi, &old_gains);
1399 tx_gain_index_old = pi_lcn->lcnphy_current_index;
1400 }
1401 1440
1402 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); 1441 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
1403 1442
1404 mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); 1443 mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
1405 mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); 1444 mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
1406 1445
1407 mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); 1446 mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
1408 mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); 1447 mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
1409 1448
1410 write_radio_reg(pi, RADIO_2064_REG116, 0x06); 1449 write_radio_reg(pi, RADIO_2064_REG116, 0x06);
1411 write_radio_reg(pi, RADIO_2064_REG12C, 0x07); 1450 write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
1412 write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); 1451 write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
1413 write_radio_reg(pi, RADIO_2064_REG098, 0x03); 1452 write_radio_reg(pi, RADIO_2064_REG098, 0x03);
1414 write_radio_reg(pi, RADIO_2064_REG00B, 0x7); 1453 write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
1415 mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); 1454 mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
1416 write_radio_reg(pi, RADIO_2064_REG01D, 0x01); 1455 write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
1417 write_radio_reg(pi, RADIO_2064_REG114, 0x01); 1456 write_radio_reg(pi, RADIO_2064_REG114, 0x01);
1418 write_radio_reg(pi, RADIO_2064_REG02E, 0x10); 1457 write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
1419 write_radio_reg(pi, RADIO_2064_REG12A, 0x08); 1458 write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
1420 1459
1421 mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); 1460 mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
1422 mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); 1461 mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
1423 mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); 1462 mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
1424 mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); 1463 mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
1425 mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); 1464 mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
1426 mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); 1465 mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
1427 mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); 1466 mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
1428 mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); 1467 mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
1429 mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); 1468 mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
1430 mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); 1469 mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
1431
1432 mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
1433 mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
1434
1435 wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
1436 write_phy_reg(pi, 0x6da, 0xffff);
1437 or_phy_reg(pi, 0x6db, 0x3);
1438 wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
1439 wlc_lcnphy_rx_gain_override_enable(pi, true);
1440
1441 tia_gain = 8;
1442 rx_pwr_threshold = 950;
1443 while (tia_gain > 0) {
1444 tia_gain -= 1;
1445 wlc_lcnphy_set_rx_gain_by_distribution(pi,
1446 0, 0, 2, 2,
1447 (u16)
1448 tia_gain, 1, 0);
1449 udelay(500);
1450 1470
1451 received_power = 1471 mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
1452 wlc_lcnphy_measure_digital_power(pi, 2000); 1472 mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
1453 if (received_power < rx_pwr_threshold) 1473
1454 break; 1474 write_phy_reg(pi, 0x6da, 0xffff);
1475 or_phy_reg(pi, 0x6db, 0x3);
1476
1477 wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
1478 set_gain = false;
1479
1480 lna2_gain = 3;
1481 while ((lna2_gain >= 0) && !set_gain) {
1482 tia_gain = 4;
1483
1484 while ((tia_gain >= 0) && !set_gain) {
1485 biq1_gain = 6;
1486
1487 while ((biq1_gain >= 0) && !set_gain) {
1488 set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
1489 (u16)
1490 biq1_gain,
1491 (u16)
1492 tia_gain,
1493 (u16)
1494 lna2_gain);
1495 biq1_gain -= 1;
1496 }
1497 tia_gain -= 1;
1455 } 1498 }
1456 result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); 1499 lna2_gain -= 1;
1500 }
1457 1501
1458 wlc_lcnphy_stop_tx_tone(pi); 1502 if (set_gain)
1503 result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
1504 else
1505 result = false;
1459 1506
1460 write_phy_reg(pi, 0x631, Core1TxControl_old); 1507 wlc_lcnphy_stop_tx_tone(pi);
1461 1508
1462 write_phy_reg(pi, 0x44c, RFOverrideVal0_old); 1509 write_phy_reg(pi, 0x631, Core1TxControl_old);
1463 write_phy_reg(pi, 0x44d, RFOverrideVal0_old); 1510
1464 write_phy_reg(pi, 0x4b0, rfoverride2_old); 1511 write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
1465 write_phy_reg(pi, 0x4b1, rfoverride2val_old); 1512 write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
1466 write_phy_reg(pi, 0x4f9, rfoverride3_old); 1513 write_phy_reg(pi, 0x4b0, rfoverride2_old);
1467 write_phy_reg(pi, 0x4fa, rfoverride3val_old); 1514 write_phy_reg(pi, 0x4b1, rfoverride2val_old);
1468 write_phy_reg(pi, 0x938, rfoverride4_old); 1515 write_phy_reg(pi, 0x4f9, rfoverride3_old);
1469 write_phy_reg(pi, 0x939, rfoverride4val_old); 1516 write_phy_reg(pi, 0x4fa, rfoverride3val_old);
1470 write_phy_reg(pi, 0x43b, afectrlovr_old); 1517 write_phy_reg(pi, 0x938, rfoverride4_old);
1471 write_phy_reg(pi, 0x43c, afectrlovrval_old); 1518 write_phy_reg(pi, 0x939, rfoverride4val_old);
1472 write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); 1519 write_phy_reg(pi, 0x43b, afectrlovr_old);
1473 write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); 1520 write_phy_reg(pi, 0x43c, afectrlovrval_old);
1521 write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
1522 write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
1474 1523
1475 wlc_lcnphy_clear_trsw_override(pi); 1524 wlc_lcnphy_clear_trsw_override(pi);
1476 1525
1477 mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); 1526 mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
1478 1527
1479 for (i = 0; i < 11; i++) 1528 for (i = 0; i < 11; i++)
1480 write_radio_reg(pi, rxiq_cal_rf_reg[i], 1529 write_radio_reg(pi, rxiq_cal_rf_reg[i],
1481 values_to_save[i]); 1530 values_to_save[i]);
1482 1531
1483 if (tx_gain_override_old) 1532 if (tx_gain_override_old)
1484 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); 1533 wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
1485 else 1534 else
1486 wlc_lcnphy_disable_tx_gain_override(pi); 1535 wlc_lcnphy_disable_tx_gain_override(pi);
1487 1536
1488 wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); 1537 wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
1489 wlc_lcnphy_rx_gain_override_enable(pi, false); 1538 wlc_lcnphy_rx_gain_override_enable(pi, false);
1490 }
1491 1539
1492cal_done: 1540cal_done:
1493 kfree(ptr); 1541 kfree(ptr);
@@ -1781,6 +1829,17 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
1781 write_radio_reg(pi, RADIO_2064_REG038, 3); 1829 write_radio_reg(pi, RADIO_2064_REG038, 3);
1782 write_radio_reg(pi, RADIO_2064_REG091, 7); 1830 write_radio_reg(pi, RADIO_2064_REG091, 7);
1783 } 1831 }
1832
1833 if (!(pi->sh->boardflags & BFL_FEM)) {
1834 u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
1835 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
1836
1837 write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
1838 write_radio_reg(pi, RADIO_2064_REG091, 0x3);
1839 write_radio_reg(pi, RADIO_2064_REG038, 0x3);
1840
1841 write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
1842 }
1784} 1843}
1785 1844
1786static int 1845static int
@@ -1860,41 +1919,6 @@ wlc_lcnphy_load_tx_iir_filter(struct brcms_phy *pi, bool is_ofdm, s16 filt_type)
1860 return (filt_index != -1) ? 0 : -1; 1919 return (filt_index != -1) ? 0 : -1;
1861} 1920}
1862 1921
1863void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
1864{
1865 u8 channel = CHSPEC_CHANNEL(chanspec);
1866
1867 wlc_phy_chanspec_radio_set((struct brcms_phy_pub *) pi, chanspec);
1868
1869 wlc_lcnphy_set_chanspec_tweaks(pi, pi->radio_chanspec);
1870
1871 or_phy_reg(pi, 0x44a, 0x44);
1872 write_phy_reg(pi, 0x44a, 0x80);
1873
1874 wlc_lcnphy_radio_2064_channel_tune_4313(pi, channel);
1875 udelay(1000);
1876
1877 wlc_lcnphy_toggle_afe_pwdn(pi);
1878
1879 write_phy_reg(pi, 0x657, lcnphy_sfo_cfg[channel - 1].ptcentreTs20);
1880 write_phy_reg(pi, 0x658, lcnphy_sfo_cfg[channel - 1].ptcentreFactor);
1881
1882 if (CHSPEC_CHANNEL(pi->radio_chanspec) == 14) {
1883 mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8);
1884
1885 wlc_lcnphy_load_tx_iir_filter(pi, false, 3);
1886 } else {
1887 mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8);
1888
1889 wlc_lcnphy_load_tx_iir_filter(pi, false, 2);
1890 }
1891
1892 wlc_lcnphy_load_tx_iir_filter(pi, true, 0);
1893
1894 mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
1895
1896}
1897
1898static u16 wlc_lcnphy_get_pa_gain(struct brcms_phy *pi) 1922static u16 wlc_lcnphy_get_pa_gain(struct brcms_phy *pi)
1899{ 1923{
1900 u16 pa_gain; 1924 u16 pa_gain;
@@ -1936,6 +1960,21 @@ static void wlc_lcnphy_set_tx_gain(struct brcms_phy *pi,
1936 wlc_lcnphy_enable_tx_gain_override(pi); 1960 wlc_lcnphy_enable_tx_gain_override(pi);
1937} 1961}
1938 1962
1963static u8 wlc_lcnphy_get_bbmult(struct brcms_phy *pi)
1964{
1965 u16 m0m1;
1966 struct phytbl_info tab;
1967
1968 tab.tbl_ptr = &m0m1;
1969 tab.tbl_len = 1;
1970 tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
1971 tab.tbl_offset = 87;
1972 tab.tbl_width = 16;
1973 wlc_lcnphy_read_table(pi, &tab);
1974
1975 return (u8) ((m0m1 & 0xff00) >> 8);
1976}
1977
1939static void wlc_lcnphy_set_bbmult(struct brcms_phy *pi, u8 m0) 1978static void wlc_lcnphy_set_bbmult(struct brcms_phy *pi, u8 m0)
1940{ 1979{
1941 u16 m0m1 = (u16) m0 << 8; 1980 u16 m0m1 = (u16) m0 << 8;
@@ -1995,6 +2034,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
1995 } else { 2034 } else {
1996 mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); 2035 mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
1997 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); 2036 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
2037 mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
2038 mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
2039 mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
2040 mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
2041 mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
2042 mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
2043 mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
2044 mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
2045 mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
2046 mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
1998 } 2047 }
1999 } else { 2048 } else {
2000 mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); 2049 mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2081,12 +2130,14 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
2081 (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); 2130 (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
2082 2131
2083 mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); 2132 mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
2133 mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
2084} 2134}
2085 2135
2086static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) 2136static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2087{ 2137{
2088 struct phytbl_info tab; 2138 struct phytbl_info tab;
2089 u32 rfseq, ind; 2139 u32 rfseq, ind;
2140 u8 tssi_sel;
2090 2141
2091 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; 2142 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
2092 tab.tbl_width = 32; 2143 tab.tbl_width = 32;
@@ -2108,7 +2159,13 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2108 2159
2109 mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); 2160 mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
2110 2161
2111 wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); 2162 if (pi->sh->boardflags & BFL_FEM) {
2163 tssi_sel = 0x1;
2164 wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
2165 } else {
2166 tssi_sel = 0xe;
2167 wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
2168 }
2112 mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); 2169 mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
2113 2170
2114 mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); 2171 mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2144,9 +2201,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2144 mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); 2201 mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
2145 2202
2146 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { 2203 if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
2147 mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); 2204 mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
2148 mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); 2205 mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
2149 } else { 2206 } else {
2207 mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
2150 mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); 2208 mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
2151 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); 2209 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
2152 } 2210 }
@@ -2193,6 +2251,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
2193 2251
2194 mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); 2252 mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
2195 2253
2254 mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
2255 mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
2256 mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
2257
2196 wlc_lcnphy_pwrctrl_rssiparams(pi); 2258 wlc_lcnphy_pwrctrl_rssiparams(pi);
2197} 2259}
2198 2260
@@ -2811,6 +2873,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2811 read_radio_reg(pi, RADIO_2064_REG007) & 1; 2873 read_radio_reg(pi, RADIO_2064_REG007) & 1;
2812 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; 2874 u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
2813 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; 2875 u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
2876 u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
2877
2814 idleTssi = read_phy_reg(pi, 0x4ab); 2878 idleTssi = read_phy_reg(pi, 0x4ab);
2815 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & 2879 suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
2816 MCTL_EN_MAC)); 2880 MCTL_EN_MAC));
@@ -2828,6 +2892,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2828 mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); 2892 mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
2829 mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); 2893 mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
2830 wlc_lcnphy_tssi_setup(pi); 2894 wlc_lcnphy_tssi_setup(pi);
2895
2896 mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
2897 mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
2898
2899 wlc_lcnphy_set_bbmult(pi, 0x0);
2900
2831 wlc_phy_do_dummy_tx(pi, true, OFF); 2901 wlc_phy_do_dummy_tx(pi, true, OFF);
2832 idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) 2902 idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
2833 >> 0); 2903 >> 0);
@@ -2849,6 +2919,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
2849 2919
2850 mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); 2920 mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
2851 2921
2922 wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
2852 wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); 2923 wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
2853 wlc_lcnphy_set_tx_gain(pi, &old_gains); 2924 wlc_lcnphy_set_tx_gain(pi, &old_gains);
2854 wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); 2925 wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3062,6 +3133,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
3062 wlc_lcnphy_write_table(pi, &tab); 3133 wlc_lcnphy_write_table(pi, &tab);
3063 tab.tbl_offset++; 3134 tab.tbl_offset++;
3064 } 3135 }
3136 mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
3137 mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
3138 mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
3139 mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
3140 mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
3065 3141
3066 mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); 3142 mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
3067 3143
@@ -3075,21 +3151,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
3075 wlapi_enable_mac(pi->sh->physhim); 3151 wlapi_enable_mac(pi->sh->physhim);
3076} 3152}
3077 3153
3078static u8 wlc_lcnphy_get_bbmult(struct brcms_phy *pi)
3079{
3080 u16 m0m1;
3081 struct phytbl_info tab;
3082
3083 tab.tbl_ptr = &m0m1;
3084 tab.tbl_len = 1;
3085 tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
3086 tab.tbl_offset = 87;
3087 tab.tbl_width = 16;
3088 wlc_lcnphy_read_table(pi, &tab);
3089
3090 return (u8) ((m0m1 & 0xff00) >> 8);
3091}
3092
3093static void wlc_lcnphy_set_pa_gain(struct brcms_phy *pi, u16 gain) 3154static void wlc_lcnphy_set_pa_gain(struct brcms_phy *pi, u16 gain)
3094{ 3155{
3095 mod_phy_reg(pi, 0x4fb, 3156 mod_phy_reg(pi, 0x4fb,
@@ -3878,7 +3939,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
3878 target_gains.pad_gain = 21; 3939 target_gains.pad_gain = 21;
3879 target_gains.dac_gain = 0; 3940 target_gains.dac_gain = 0;
3880 wlc_lcnphy_set_tx_gain(pi, &target_gains); 3941 wlc_lcnphy_set_tx_gain(pi, &target_gains);
3881 wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
3882 3942
3883 if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { 3943 if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
3884 3944
@@ -3889,6 +3949,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
3889 lcnphy_recal ? LCNPHY_CAL_RECAL : 3949 lcnphy_recal ? LCNPHY_CAL_RECAL :
3890 LCNPHY_CAL_FULL), false); 3950 LCNPHY_CAL_FULL), false);
3891 } else { 3951 } else {
3952 wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
3892 wlc_lcnphy_tx_iqlo_soft_cal_full(pi); 3953 wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
3893 } 3954 }
3894 3955
@@ -4313,17 +4374,22 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
4313 if (CHSPEC_IS5G(pi->radio_chanspec)) 4374 if (CHSPEC_IS5G(pi->radio_chanspec))
4314 pa_gain = 0x70; 4375 pa_gain = 0x70;
4315 else 4376 else
4316 pa_gain = 0x70; 4377 pa_gain = 0x60;
4317 4378
4318 if (pi->sh->boardflags & BFL_FEM) 4379 if (pi->sh->boardflags & BFL_FEM)
4319 pa_gain = 0x10; 4380 pa_gain = 0x10;
4381
4320 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; 4382 tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
4321 tab.tbl_width = 32; 4383 tab.tbl_width = 32;
4322 tab.tbl_len = 1; 4384 tab.tbl_len = 1;
4323 tab.tbl_ptr = &val; 4385 tab.tbl_ptr = &val;
4324 4386
4325 for (j = 0; j < 128; j++) { 4387 for (j = 0; j < 128; j++) {
4326 gm_gain = gain_table[j].gm; 4388 if (pi->sh->boardflags & BFL_FEM)
4389 gm_gain = gain_table[j].gm;
4390 else
4391 gm_gain = 15;
4392
4327 val = (((u32) pa_gain << 24) | 4393 val = (((u32) pa_gain << 24) |
4328 (gain_table[j].pad << 16) | 4394 (gain_table[j].pad << 16) |
4329 (gain_table[j].pga << 8) | gm_gain); 4395 (gain_table[j].pga << 8) | gm_gain);
@@ -4534,7 +4600,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
4534 4600
4535 write_phy_reg(pi, 0x4ea, 0x4688); 4601 write_phy_reg(pi, 0x4ea, 0x4688);
4536 4602
4537 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); 4603 if (pi->sh->boardflags & BFL_FEM)
4604 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
4605 else
4606 mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
4538 4607
4539 mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); 4608 mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
4540 4609
@@ -4545,6 +4614,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
4545 wlc_lcnphy_rcal(pi); 4614 wlc_lcnphy_rcal(pi);
4546 4615
4547 wlc_lcnphy_rc_cal(pi); 4616 wlc_lcnphy_rc_cal(pi);
4617
4618 if (!(pi->sh->boardflags & BFL_FEM)) {
4619 write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
4620 write_radio_reg(pi, RADIO_2064_REG033, 0x19);
4621 write_radio_reg(pi, RADIO_2064_REG039, 0xe);
4622 }
4623
4548} 4624}
4549 4625
4550static void wlc_lcnphy_radio_init(struct brcms_phy *pi) 4626static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4574,22 +4650,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
4574 wlc_lcnphy_write_table(pi, &tab); 4650 wlc_lcnphy_write_table(pi, &tab);
4575 } 4651 }
4576 4652
4577 tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; 4653 if (!(pi->sh->boardflags & BFL_FEM)) {
4578 tab.tbl_width = 16; 4654 tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
4579 tab.tbl_ptr = &val; 4655 tab.tbl_width = 16;
4580 tab.tbl_len = 1; 4656 tab.tbl_ptr = &val;
4581 4657 tab.tbl_len = 1;
4582 val = 114;
4583 tab.tbl_offset = 0;
4584 wlc_lcnphy_write_table(pi, &tab);
4585 4658
4586 val = 130; 4659 val = 150;
4587 tab.tbl_offset = 1; 4660 tab.tbl_offset = 0;
4588 wlc_lcnphy_write_table(pi, &tab); 4661 wlc_lcnphy_write_table(pi, &tab);
4589 4662
4590 val = 6; 4663 val = 220;
4591 tab.tbl_offset = 8; 4664 tab.tbl_offset = 1;
4592 wlc_lcnphy_write_table(pi, &tab); 4665 wlc_lcnphy_write_table(pi, &tab);
4666 }
4593 4667
4594 if (CHSPEC_IS2G(pi->radio_chanspec)) { 4668 if (CHSPEC_IS2G(pi->radio_chanspec)) {
4595 if (pi->sh->boardflags & BFL_FEM) 4669 if (pi->sh->boardflags & BFL_FEM)
@@ -4946,6 +5020,44 @@ void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi)
4946 } 5020 }
4947} 5021}
4948 5022
5023void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
5024{
5025 u8 channel = CHSPEC_CHANNEL(chanspec);
5026
5027 wlc_phy_chanspec_radio_set((struct brcms_phy_pub *)pi, chanspec);
5028
5029 wlc_lcnphy_set_chanspec_tweaks(pi, pi->radio_chanspec);
5030
5031 or_phy_reg(pi, 0x44a, 0x44);
5032 write_phy_reg(pi, 0x44a, 0x80);
5033
5034 wlc_lcnphy_radio_2064_channel_tune_4313(pi, channel);
5035 udelay(1000);
5036
5037 wlc_lcnphy_toggle_afe_pwdn(pi);
5038
5039 write_phy_reg(pi, 0x657, lcnphy_sfo_cfg[channel - 1].ptcentreTs20);
5040 write_phy_reg(pi, 0x658, lcnphy_sfo_cfg[channel - 1].ptcentreFactor);
5041
5042 if (CHSPEC_CHANNEL(pi->radio_chanspec) == 14) {
5043 mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8);
5044
5045 wlc_lcnphy_load_tx_iir_filter(pi, false, 3);
5046 } else {
5047 mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8);
5048
5049 wlc_lcnphy_load_tx_iir_filter(pi, false, 2);
5050 }
5051
5052 if (pi->sh->boardflags & BFL_FEM)
5053 wlc_lcnphy_load_tx_iir_filter(pi, true, 0);
5054 else
5055 wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
5056
5057 mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
5058 wlc_lcnphy_tssi_setup(pi);
5059}
5060
4949void wlc_phy_detach_lcnphy(struct brcms_phy *pi) 5061void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
4950{ 5062{
4951 kfree(pi->u.pi_lcnphy); 5063 kfree(pi->u.pi_lcnphy);
@@ -4982,8 +5094,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
4982 if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) 5094 if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
4983 return false; 5095 return false;
4984 5096
4985 if ((pi->sh->boardflags & BFL_FEM) && 5097 if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
4986 (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
4987 if (pi_lcn->lcnphy_tempsense_option == 3) { 5098 if (pi_lcn->lcnphy_tempsense_option == 3) {
4988 pi->hwpwrctrl = true; 5099 pi->hwpwrctrl = true;
4989 pi->hwpwrctrl_capable = true; 5100 pi->hwpwrctrl_capable = true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
index 622c01ca72c5..b7e95acc2084 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
1992}; 1992};
1993 1993
1994static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { 1994static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
1995 0x000a,
1996 0x0009, 1995 0x0009,
1997 0x0006,
1998 0x0005,
1999 0x000a, 1996 0x000a,
2000 0x0009,
2001 0x0006,
2002 0x0005, 1997 0x0005,
2003 0x000a,
2004 0x0009,
2005 0x0006, 1998 0x0006,
2006 0x0005,
2007 0x000a,
2008 0x0009, 1999 0x0009,
2009 0x0006,
2010 0x0005,
2011 0x000a, 2000 0x000a,
2012 0x0009,
2013 0x0006,
2014 0x0005, 2001 0x0005,
2015 0x000a,
2016 0x0009,
2017 0x0006, 2002 0x0006,
2018 0x0005,
2019 0x000a,
2020 0x0009, 2003 0x0009,
2021 0x0006,
2022 0x0005,
2023 0x000a, 2004 0x000a,
2024 0x0009,
2025 0x0006,
2026 0x0005, 2005 0x0005,
2027 0x000a,
2028 0x0009,
2029 0x0006, 2006 0x0006,
2030 0x0005,
2031 0x000a,
2032 0x0009, 2007 0x0009,
2033 0x0006,
2034 0x0005,
2035 0x000a, 2008 0x000a,
2036 0x0009,
2037 0x0006,
2038 0x0005, 2009 0x0005,
2039 0x000a,
2040 0x0009,
2041 0x0006, 2010 0x0006,
2042 0x0005, 2011 0x0009,
2043 0x000a, 2012 0x000a,
2013 0x0005,
2014 0x0006,
2044 0x0009, 2015 0x0009,
2016 0x000a,
2017 0x0005,
2045 0x0006, 2018 0x0006,
2019 0x0009,
2020 0x000a,
2046 0x0005, 2021 0x0005,
2022 0x0006,
2023 0x0009,
2047 0x000a, 2024 0x000a,
2025 0x0005,
2026 0x0006,
2048 0x0009, 2027 0x0009,
2028 0x000a,
2029 0x0005,
2049 0x0006, 2030 0x0006,
2031 0x0009,
2032 0x000a,
2050 0x0005, 2033 0x0005,
2034 0x0006,
2035 0x0009,
2051 0x000a, 2036 0x000a,
2037 0x0005,
2038 0x0006,
2052 0x0009, 2039 0x0009,
2040 0x000a,
2041 0x0005,
2053 0x0006, 2042 0x0006,
2043 0x0009,
2044 0x000a,
2054 0x0005, 2045 0x0005,
2046 0x0006,
2047 0x0009,
2055 0x000a, 2048 0x000a,
2049 0x0005,
2050 0x0006,
2056 0x0009, 2051 0x0009,
2052 0x000a,
2053 0x0005,
2057 0x0006, 2054 0x0006,
2055 0x0009,
2056 0x000a,
2058 0x0005, 2057 0x0005,
2058 0x0006,
2059}; 2059};
2060 2060
2061static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { 2061static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 5855f4fd16dc..4fb2834f4e64 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -176,6 +176,7 @@ struct brcms_pub {
176 bool phy_11ncapable; /* the PHY/HW is capable of 802.11N */ 176 bool phy_11ncapable; /* the PHY/HW is capable of 802.11N */
177 177
178 struct wl_cnt *_cnt; /* low-level counters in driver */ 178 struct wl_cnt *_cnt; /* low-level counters in driver */
179 struct dentry *dbgfs_dir;
179}; 180};
180 181
181enum wlc_par_id { 182enum wlc_par_id {
@@ -200,43 +201,6 @@ enum wlc_par_id {
200/* WL11N Support */ 201/* WL11N Support */
201#define AMPDU_AGG_HOST 1 202#define AMPDU_AGG_HOST 1
202 203
203/* pri is priority encoded in the packet. This maps the Packet priority to
204 * enqueue precedence as defined in wlc_prec_map
205 */
206extern const u8 wlc_prio2prec_map[];
207#define BRCMS_PRIO_TO_PREC(pri) wlc_prio2prec_map[(pri) & 7]
208
209#define BRCMS_PREC_COUNT 16 /* Max precedence level implemented */
210
211/* Mask to describe all precedence levels */
212#define BRCMS_PREC_BMP_ALL MAXBITVAL(BRCMS_PREC_COUNT)
213
214/*
215 * This maps priority to one precedence higher - Used by PS-Poll response
216 * packets to simulate enqueue-at-head operation, but still maintain the
217 * order on the queue
218 */
219#define BRCMS_PRIO_TO_HI_PREC(pri) min(BRCMS_PRIO_TO_PREC(pri) + 1,\
220 BRCMS_PREC_COUNT - 1)
221
222/* Define a bitmap of precedences comprised by each AC */
223#define BRCMS_PREC_BMP_AC_BE (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BE)) | \
224 NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BE)) | \
225 NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_EE)) | \
226 NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_EE)))
227#define BRCMS_PREC_BMP_AC_BK (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_BK)) | \
228 NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_BK)) | \
229 NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NONE)) | \
230 NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NONE)))
231#define BRCMS_PREC_BMP_AC_VI (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_CL)) | \
232 NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_CL)) | \
233 NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VI)) | \
234 NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VI)))
235#define BRCMS_PREC_BMP_AC_VO (NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_VO)) | \
236 NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_VO)) | \
237 NBITVAL(BRCMS_PRIO_TO_PREC(PRIO_8021D_NC)) | \
238 NBITVAL(BRCMS_PRIO_TO_HI_PREC(PRIO_8021D_NC)))
239
240/* network protection config */ 204/* network protection config */
241#define BRCMS_PROT_G_SPEC 1 /* SPEC g protection */ 205#define BRCMS_PROT_G_SPEC 1 /* SPEC g protection */
242#define BRCMS_PROT_G_OVR 2 /* SPEC g prot override */ 206#define BRCMS_PROT_G_OVR 2 /* SPEC g prot override */
@@ -319,9 +283,9 @@ extern void brcms_c_intrson(struct brcms_c_info *wlc);
319extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc); 283extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
320extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask); 284extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
321extern bool brcms_c_intrsupd(struct brcms_c_info *wlc); 285extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
322extern bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc); 286extern bool brcms_c_isr(struct brcms_c_info *wlc);
323extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded); 287extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
324extern void brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, 288extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
325 struct sk_buff *sdu, 289 struct sk_buff *sdu,
326 struct ieee80211_hw *hw); 290 struct ieee80211_hw *hw);
327extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid); 291extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
index ed1d1aa71d2d..dd9162722495 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/stf.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/stf.c
@@ -23,6 +23,7 @@
23#include "channel.h" 23#include "channel.h"
24#include "main.h" 24#include "main.h"
25#include "stf.h" 25#include "stf.h"
26#include "debug.h"
26 27
27#define MIN_SPATIAL_EXPANSION 0 28#define MIN_SPATIAL_EXPANSION 0
28#define MAX_SPATIAL_EXPANSION 1 29#define MAX_SPATIAL_EXPANSION 1
@@ -160,8 +161,8 @@ bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val)
160static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts, 161static int brcms_c_stf_txcore_set(struct brcms_c_info *wlc, u8 Nsts,
161 u8 core_mask) 162 u8 core_mask)
162{ 163{
163 BCMMSG(wlc->wiphy, "wl%d: Nsts %d core_mask %x\n", 164 brcms_dbg_ht(wlc->hw->d11core, "wl%d: Nsts %d core_mask %x\n",
164 wlc->pub->unit, Nsts, core_mask); 165 wlc->pub->unit, Nsts, core_mask);
165 166
166 if (hweight8(core_mask) > wlc->stf->txstreams) 167 if (hweight8(core_mask) > wlc->stf->txstreams)
167 core_mask = 0; 168 core_mask = 0;
@@ -194,7 +195,8 @@ static int brcms_c_stf_spatial_policy_set(struct brcms_c_info *wlc, int val)
194 int i; 195 int i;
195 u8 core_mask = 0; 196 u8 core_mask = 0;
196 197
197 BCMMSG(wlc->wiphy, "wl%d: val %x\n", wlc->pub->unit, val); 198 brcms_dbg_ht(wlc->hw->d11core, "wl%d: val %x\n", wlc->pub->unit,
199 val);
198 200
199 wlc->stf->spatial_policy = (s8) val; 201 wlc->stf->spatial_policy = (s8) val;
200 for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) { 202 for (i = 1; i <= MAX_STREAMS_SUPPORTED; i++) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index e11ae83111e4..ae1f3ad40d45 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -246,7 +246,7 @@
246 246
247#define BCMMSG(dev, fmt, args...) \ 247#define BCMMSG(dev, fmt, args...) \
248do { \ 248do { \
249 if (brcm_msg_level & LOG_TRACE_VAL) \ 249 if (brcm_msg_level & BRCM_DL_INFO) \
250 wiphy_err(dev, "%s: " fmt, __func__, ##args); \ 250 wiphy_err(dev, "%s: " fmt, __func__, ##args); \
251} while (0) 251} while (0)
252 252
@@ -281,7 +281,6 @@ struct ieee80211_tx_queue_params;
281struct brcms_info; 281struct brcms_info;
282struct brcms_c_info; 282struct brcms_c_info;
283struct brcms_hardware; 283struct brcms_hardware;
284struct brcms_txq_info;
285struct brcms_band; 284struct brcms_band;
286struct dma_pub; 285struct dma_pub;
287struct si_pub; 286struct si_pub;
diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h
index f0d8c04a9c8c..fb7cbcf81179 100644
--- a/drivers/net/wireless/brcm80211/include/defs.h
+++ b/drivers/net/wireless/brcm80211/include/defs.h
@@ -78,9 +78,14 @@
78#define PM_OFF 0 78#define PM_OFF 0
79#define PM_MAX 1 79#define PM_MAX 1
80 80
81/* Message levels */ 81/* Debug levels */
82#define LOG_ERROR_VAL 0x00000001 82#define BRCM_DL_INFO 0x00000001
83#define LOG_TRACE_VAL 0x00000002 83#define BRCM_DL_MAC80211 0x00000002
84#define BRCM_DL_RX 0x00000004
85#define BRCM_DL_TX 0x00000008
86#define BRCM_DL_INT 0x00000010
87#define BRCM_DL_DMA 0x00000020
88#define BRCM_DL_HT 0x00000040
84 89
85#define PM_OFF 0 90#define PM_OFF 0
86#define PM_MAX 1 91#define PM_MAX 1
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index df7050abe717..d39e3e24077b 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -415,7 +415,7 @@ static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
415 ssid = pos + 2; 415 ssid = pos + 2;
416 ssid_len = pos[1]; 416 ssid_len = pos[1];
417 break; 417 break;
418 case WLAN_EID_GENERIC: 418 case WLAN_EID_VENDOR_SPECIFIC:
419 if (pos[1] >= 4 && 419 if (pos[1] >= 4 &&
420 pos[2] == 0x00 && pos[3] == 0x50 && 420 pos[2] == 0x00 && pos[3] == 0x50 &&
421 pos[4] == 0xf2 && pos[5] == 1) { 421 pos[4] == 0xf2 && pos[5] == 1) {
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 29b8fa1adefd..d92b21a8e597 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1788,10 +1788,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1788 } 1788 }
1789 1789
1790 /* Initialize the geo */ 1790 /* Initialize the geo */
1791 if (libipw_set_geo(priv->ieee, &ipw_geos[0])) { 1791 libipw_set_geo(priv->ieee, &ipw_geos[0]);
1792 printk(KERN_WARNING DRV_NAME "Could not set geo\n");
1793 return 0;
1794 }
1795 priv->ieee->freq_band = LIBIPW_24GHZ_BAND; 1792 priv->ieee->freq_band = LIBIPW_24GHZ_BAND;
1796 1793
1797 lock = LOCK_NONE; 1794 lock = LOCK_NONE;
@@ -6413,7 +6410,7 @@ out:
6413 goto out; 6410 goto out;
6414} 6411}
6415 6412
6416static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) 6413static void ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6417{ 6414{
6418 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); 6415 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6419 struct net_device *dev = priv->net_dev; 6416 struct net_device *dev = priv->net_dev;
@@ -6609,7 +6606,7 @@ static struct pci_driver ipw2100_pci_driver = {
6609 .name = DRV_NAME, 6606 .name = DRV_NAME,
6610 .id_table = ipw2100_pci_id_table, 6607 .id_table = ipw2100_pci_id_table,
6611 .probe = ipw2100_pci_init_one, 6608 .probe = ipw2100_pci_init_one,
6612 .remove = __devexit_p(ipw2100_pci_remove_one), 6609 .remove = ipw2100_pci_remove_one,
6613#ifdef CONFIG_PM 6610#ifdef CONFIG_PM
6614 .suspend = ipw2100_suspend, 6611 .suspend = ipw2100_suspend,
6615 .resume = ipw2100_resume, 6612 .resume = ipw2100_resume,
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 768bf612533e..844f201b7b70 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -6812,7 +6812,6 @@ static int ipw_wx_get_auth(struct net_device *dev,
6812 struct libipw_device *ieee = priv->ieee; 6812 struct libipw_device *ieee = priv->ieee;
6813 struct lib80211_crypt_data *crypt; 6813 struct lib80211_crypt_data *crypt;
6814 struct iw_param *param = &wrqu->param; 6814 struct iw_param *param = &wrqu->param;
6815 int ret = 0;
6816 6815
6817 switch (param->flags & IW_AUTH_INDEX) { 6816 switch (param->flags & IW_AUTH_INDEX) {
6818 case IW_AUTH_WPA_VERSION: 6817 case IW_AUTH_WPA_VERSION:
@@ -6822,8 +6821,7 @@ static int ipw_wx_get_auth(struct net_device *dev,
6822 /* 6821 /*
6823 * wpa_supplicant will control these internally 6822 * wpa_supplicant will control these internally
6824 */ 6823 */
6825 ret = -EOPNOTSUPP; 6824 return -EOPNOTSUPP;
6826 break;
6827 6825
6828 case IW_AUTH_TKIP_COUNTERMEASURES: 6826 case IW_AUTH_TKIP_COUNTERMEASURES:
6829 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; 6827 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
@@ -10774,7 +10772,7 @@ static void ipw_bg_link_down(struct work_struct *work)
10774 mutex_unlock(&priv->mutex); 10772 mutex_unlock(&priv->mutex);
10775} 10773}
10776 10774
10777static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) 10775static int ipw_setup_deferred_work(struct ipw_priv *priv)
10778{ 10776{
10779 int ret = 0; 10777 int ret = 0;
10780 10778
@@ -11269,10 +11267,31 @@ static const struct libipw_geo ipw_geos[] = {
11269 } 11267 }
11270}; 11268};
11271 11269
11270static void ipw_set_geo(struct ipw_priv *priv)
11271{
11272 int j;
11273
11274 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11275 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11276 ipw_geos[j].name, 3))
11277 break;
11278 }
11279
11280 if (j == ARRAY_SIZE(ipw_geos)) {
11281 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11282 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11283 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11284 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11285 j = 0;
11286 }
11287
11288 libipw_set_geo(priv->ieee, &ipw_geos[j]);
11289}
11290
11272#define MAX_HW_RESTARTS 5 11291#define MAX_HW_RESTARTS 5
11273static int ipw_up(struct ipw_priv *priv) 11292static int ipw_up(struct ipw_priv *priv)
11274{ 11293{
11275 int rc, i, j; 11294 int rc, i;
11276 11295
11277 /* Age scan list entries found before suspend */ 11296 /* Age scan list entries found before suspend */
11278 if (priv->suspend_time) { 11297 if (priv->suspend_time) {
@@ -11310,22 +11329,7 @@ static int ipw_up(struct ipw_priv *priv)
11310 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11329 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11311 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN); 11330 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11312 11331
11313 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) { 11332 ipw_set_geo(priv);
11314 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11315 ipw_geos[j].name, 3))
11316 break;
11317 }
11318 if (j == ARRAY_SIZE(ipw_geos)) {
11319 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11320 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11321 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11322 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11323 j = 0;
11324 }
11325 if (libipw_set_geo(priv->ieee, &ipw_geos[j])) {
11326 IPW_WARNING("Could not set geography.");
11327 return 0;
11328 }
11329 11333
11330 if (priv->status & STATUS_RF_KILL_SW) { 11334 if (priv->status & STATUS_RF_KILL_SW) {
11331 IPW_WARNING("Radio disabled by module parameter.\n"); 11335 IPW_WARNING("Radio disabled by module parameter.\n");
@@ -11722,7 +11726,7 @@ static const struct net_device_ops ipw_netdev_ops = {
11722 .ndo_validate_addr = eth_validate_addr, 11726 .ndo_validate_addr = eth_validate_addr,
11723}; 11727};
11724 11728
11725static int __devinit ipw_pci_probe(struct pci_dev *pdev, 11729static int ipw_pci_probe(struct pci_dev *pdev,
11726 const struct pci_device_id *ent) 11730 const struct pci_device_id *ent)
11727{ 11731{
11728 int err = 0; 11732 int err = 0;
@@ -11895,7 +11899,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11895 return err; 11899 return err;
11896} 11900}
11897 11901
11898static void __devexit ipw_pci_remove(struct pci_dev *pdev) 11902static void ipw_pci_remove(struct pci_dev *pdev)
11899{ 11903{
11900 struct ipw_priv *priv = pci_get_drvdata(pdev); 11904 struct ipw_priv *priv = pci_get_drvdata(pdev);
11901 struct list_head *p, *q; 11905 struct list_head *p, *q;
@@ -12057,7 +12061,7 @@ static struct pci_driver ipw_driver = {
12057 .name = DRV_NAME, 12061 .name = DRV_NAME,
12058 .id_table = card_ids, 12062 .id_table = card_ids,
12059 .probe = ipw_pci_probe, 12063 .probe = ipw_pci_probe,
12060 .remove = __devexit_p(ipw_pci_remove), 12064 .remove = ipw_pci_remove,
12061#ifdef CONFIG_PM 12065#ifdef CONFIG_PM
12062 .suspend = ipw_pci_suspend, 12066 .suspend = ipw_pci_suspend,
12063 .resume = ipw_pci_resume, 12067 .resume = ipw_pci_resume,
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 0b22fb421735..6eede52ad8c0 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -978,7 +978,7 @@ extern void libipw_network_reset(struct libipw_network *network);
978/* libipw_geo.c */ 978/* libipw_geo.c */
979extern const struct libipw_geo *libipw_get_geo(struct libipw_device 979extern const struct libipw_geo *libipw_get_geo(struct libipw_device
980 *ieee); 980 *ieee);
981extern int libipw_set_geo(struct libipw_device *ieee, 981extern void libipw_set_geo(struct libipw_device *ieee,
982 const struct libipw_geo *geo); 982 const struct libipw_geo *geo);
983 983
984extern int libipw_is_valid_channel(struct libipw_device *ieee, 984extern int libipw_is_valid_channel(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_geo.c b/drivers/net/wireless/ipw2x00/libipw_geo.c
index c9fe3c99cb00..218f2a32de21 100644
--- a/drivers/net/wireless/ipw2x00/libipw_geo.c
+++ b/drivers/net/wireless/ipw2x00/libipw_geo.c
@@ -132,7 +132,7 @@ u8 libipw_freq_to_channel(struct libipw_device * ieee, u32 freq)
132 return 0; 132 return 0;
133} 133}
134 134
135int libipw_set_geo(struct libipw_device *ieee, 135void libipw_set_geo(struct libipw_device *ieee,
136 const struct libipw_geo *geo) 136 const struct libipw_geo *geo)
137{ 137{
138 memcpy(ieee->geo.name, geo->name, 3); 138 memcpy(ieee->geo.name, geo->name, 3);
@@ -143,7 +143,6 @@ int libipw_set_geo(struct libipw_device *ieee,
143 sizeof(struct libipw_channel)); 143 sizeof(struct libipw_channel));
144 memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels * 144 memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
145 sizeof(struct libipw_channel)); 145 sizeof(struct libipw_channel));
146 return 0;
147} 146}
148 147
149const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee) 148const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee)
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 02e057923236..95a1ca1e895c 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -1108,7 +1108,7 @@ static const char *get_info_element_string(u16 id)
1108 MFIE_STRING(ERP_INFO); 1108 MFIE_STRING(ERP_INFO);
1109 MFIE_STRING(RSN); 1109 MFIE_STRING(RSN);
1110 MFIE_STRING(EXT_SUPP_RATES); 1110 MFIE_STRING(EXT_SUPP_RATES);
1111 MFIE_STRING(GENERIC); 1111 MFIE_STRING(VENDOR_SPECIFIC);
1112 MFIE_STRING(QOS_PARAMETER); 1112 MFIE_STRING(QOS_PARAMETER);
1113 default: 1113 default:
1114 return "UNKNOWN"; 1114 return "UNKNOWN";
@@ -1248,8 +1248,8 @@ static int libipw_parse_info_param(struct libipw_info_element
1248 LIBIPW_DEBUG_MGMT("WLAN_EID_CHALLENGE: ignored\n"); 1248 LIBIPW_DEBUG_MGMT("WLAN_EID_CHALLENGE: ignored\n");
1249 break; 1249 break;
1250 1250
1251 case WLAN_EID_GENERIC: 1251 case WLAN_EID_VENDOR_SPECIFIC:
1252 LIBIPW_DEBUG_MGMT("WLAN_EID_GENERIC: %d bytes\n", 1252 LIBIPW_DEBUG_MGMT("WLAN_EID_VENDOR_SPECIFIC: %d bytes\n",
1253 info_element->len); 1253 info_element->len);
1254 if (!libipw_parse_qos_info_param_IE(info_element, 1254 if (!libipw_parse_qos_info_param_IE(info_element,
1255 network)) 1255 network))
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index e252acb9c862..d604b4036a76 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3794,7 +3794,7 @@ out:
3794 return err; 3794 return err;
3795} 3795}
3796 3796
3797static void __devexit 3797static void
3798il3945_pci_remove(struct pci_dev *pdev) 3798il3945_pci_remove(struct pci_dev *pdev)
3799{ 3799{
3800 struct il_priv *il = pci_get_drvdata(pdev); 3800 struct il_priv *il = pci_get_drvdata(pdev);
@@ -3884,7 +3884,7 @@ static struct pci_driver il3945_driver = {
3884 .name = DRV_NAME, 3884 .name = DRV_NAME,
3885 .id_table = il3945_hw_card_ids, 3885 .id_table = il3945_hw_card_ids,
3886 .probe = il3945_pci_probe, 3886 .probe = il3945_pci_probe,
3887 .remove = __devexit_p(il3945_pci_remove), 3887 .remove = il3945_pci_remove,
3888 .driver.pm = IL_LEGACY_PM_OPS, 3888 .driver.pm = IL_LEGACY_PM_OPS,
3889}; 3889};
3890 3890
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index 87e539894330..e0b9d7fa5de0 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -516,7 +516,7 @@ static void
516il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) 516il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
517{ 517{
518 struct ieee80211_hdr *header; 518 struct ieee80211_hdr *header;
519 struct ieee80211_rx_status rx_status; 519 struct ieee80211_rx_status rx_status = {};
520 struct il_rx_pkt *pkt = rxb_addr(rxb); 520 struct il_rx_pkt *pkt = rxb_addr(rxb);
521 struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt); 521 struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
522 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt); 522 struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index eac4dc8bc879..c3fbf6717564 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -613,7 +613,7 @@ void
613il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) 613il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
614{ 614{
615 struct ieee80211_hdr *header; 615 struct ieee80211_hdr *header;
616 struct ieee80211_rx_status rx_status; 616 struct ieee80211_rx_status rx_status = {};
617 struct il_rx_pkt *pkt = rxb_addr(rxb); 617 struct il_rx_pkt *pkt = rxb_addr(rxb);
618 struct il_rx_phy_res *phy_res; 618 struct il_rx_phy_res *phy_res;
619 __le32 rx_pkt_status; 619 __le32 rx_pkt_status;
@@ -686,7 +686,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
686 686
687 /* TSF isn't reliable. In order to allow smooth user experience, 687 /* TSF isn't reliable. In order to allow smooth user experience,
688 * this W/A doesn't propagate it to the mac80211 */ 688 * this W/A doesn't propagate it to the mac80211 */
689 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */ 689 /*rx_status.flag |= RX_FLAG_MACTIME_START; */
690 690
691 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); 691 il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
692 692
@@ -6664,7 +6664,7 @@ out:
6664 return err; 6664 return err;
6665} 6665}
6666 6666
6667static void __devexit 6667static void
6668il4965_pci_remove(struct pci_dev *pdev) 6668il4965_pci_remove(struct pci_dev *pdev)
6669{ 6669{
6670 struct il_priv *il = pci_get_drvdata(pdev); 6670 struct il_priv *il = pci_get_drvdata(pdev);
@@ -6772,7 +6772,7 @@ static struct pci_driver il4965_driver = {
6772 .name = DRV_NAME, 6772 .name = DRV_NAME,
6773 .id_table = il4965_hw_card_ids, 6773 .id_table = il4965_hw_card_ids,
6774 .probe = il4965_pci_probe, 6774 .probe = il4965_pci_probe,
6775 .remove = __devexit_p(il4965_pci_remove), 6775 .remove = il4965_pci_remove,
6776 .driver.pm = IL_LEGACY_PM_OPS, 6776 .driver.pm = IL_LEGACY_PM_OPS,
6777}; 6777};
6778 6778
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index b4bb813362bd..e254cba4557a 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -2919,9 +2919,8 @@ do { \
2919#define IL_DBG(level, fmt, args...) \ 2919#define IL_DBG(level, fmt, args...) \
2920do { \ 2920do { \
2921 if (il_get_debug_level(il) & level) \ 2921 if (il_get_debug_level(il) & level) \
2922 dev_printk(KERN_ERR, &il->hw->wiphy->dev, \ 2922 dev_err(&il->hw->wiphy->dev, "%c %s " fmt, \
2923 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ 2923 in_interrupt() ? 'I' : 'U', __func__ , ##args); \
2924 __func__ , ## args); \
2925} while (0) 2924} while (0)
2926 2925
2927#define il_print_hex_dump(il, level, p, len) \ 2926#define il_print_hex_dump(il, level, p, len) \
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 727fbb5db9da..5cf43236421e 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -133,12 +133,3 @@ config IWLWIFI_P2P
133 support when it is loaded. 133 support when it is loaded.
134 134
135 Say Y only if you want to experiment with P2P. 135 Say Y only if you want to experiment with P2P.
136
137config IWLWIFI_EXPERIMENTAL_MFP
138 bool "support MFP (802.11w) even if uCode doesn't advertise"
139 depends on IWLWIFI
140 help
141 This option enables experimental MFP (802.11W) support
142 even if the microcode doesn't advertise it.
143
144 Say Y only if you want to experiment with MFP.
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 75e12f29d9eb..33b3ad2e546b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -176,8 +176,8 @@ int iwlagn_hw_valid_rtc_data_addr(u32 addr);
176/* lib */ 176/* lib */
177int iwlagn_send_tx_power(struct iwl_priv *priv); 177int iwlagn_send_tx_power(struct iwl_priv *priv);
178void iwlagn_temperature(struct iwl_priv *priv); 178void iwlagn_temperature(struct iwl_priv *priv);
179int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 179int iwlagn_txfifo_flush(struct iwl_priv *priv);
180void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 180void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
181int iwlagn_send_beacon_cmd(struct iwl_priv *priv); 181int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
182int iwl_send_statistics_request(struct iwl_priv *priv, 182int iwl_send_statistics_request(struct iwl_priv *priv,
183 u8 flags, bool clear); 183 u8 flags, bool clear);
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index f2dd671d7dc8..de54713b680c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -833,14 +833,14 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
833 * To be safe, simply mask out any chains that we know 833 * To be safe, simply mask out any chains that we know
834 * are not on the device. 834 * are not on the device.
835 */ 835 */
836 active_chains &= priv->eeprom_data->valid_rx_ant; 836 active_chains &= priv->nvm_data->valid_rx_ant;
837 837
838 num_tx_chains = 0; 838 num_tx_chains = 0;
839 for (i = 0; i < NUM_RX_CHAINS; i++) { 839 for (i = 0; i < NUM_RX_CHAINS; i++) {
840 /* loops on all the bits of 840 /* loops on all the bits of
841 * priv->hw_setting.valid_tx_ant */ 841 * priv->hw_setting.valid_tx_ant */
842 u8 ant_msk = (1 << i); 842 u8 ant_msk = (1 << i);
843 if (!(priv->eeprom_data->valid_tx_ant & ant_msk)) 843 if (!(priv->nvm_data->valid_tx_ant & ant_msk))
844 continue; 844 continue;
845 845
846 num_tx_chains++; 846 num_tx_chains++;
@@ -854,7 +854,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
854 * connect the first valid tx chain 854 * connect the first valid tx chain
855 */ 855 */
856 first_chain = 856 first_chain =
857 find_first_chain(priv->eeprom_data->valid_tx_ant); 857 find_first_chain(priv->nvm_data->valid_tx_ant);
858 data->disconn_array[first_chain] = 0; 858 data->disconn_array[first_chain] = 0;
859 active_chains |= BIT(first_chain); 859 active_chains |= BIT(first_chain);
860 IWL_DEBUG_CALIB(priv, 860 IWL_DEBUG_CALIB(priv,
@@ -864,13 +864,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
864 } 864 }
865 } 865 }
866 866
867 if (active_chains != priv->eeprom_data->valid_rx_ant && 867 if (active_chains != priv->nvm_data->valid_rx_ant &&
868 active_chains != priv->chain_noise_data.active_chains) 868 active_chains != priv->chain_noise_data.active_chains)
869 IWL_DEBUG_CALIB(priv, 869 IWL_DEBUG_CALIB(priv,
870 "Detected that not all antennas are connected! " 870 "Detected that not all antennas are connected! "
871 "Connected: %#x, valid: %#x.\n", 871 "Connected: %#x, valid: %#x.\n",
872 active_chains, 872 active_chains,
873 priv->eeprom_data->valid_rx_ant); 873 priv->nvm_data->valid_rx_ant);
874 874
875 /* Save for use within RXON, TX, SCAN commands, etc. */ 875 /* Save for use within RXON, TX, SCAN commands, etc. */
876 data->active_chains = active_chains; 876 data->active_chains = active_chains;
@@ -1055,7 +1055,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1055 priv->cfg->bt_params->advanced_bt_coexist) { 1055 priv->cfg->bt_params->advanced_bt_coexist) {
1056 /* Disable disconnected antenna algorithm for advanced 1056 /* Disable disconnected antenna algorithm for advanced
1057 bt coex, assuming valid antennas are connected */ 1057 bt coex, assuming valid antennas are connected */
1058 data->active_chains = priv->eeprom_data->valid_rx_ant; 1058 data->active_chains = priv->nvm_data->valid_rx_ant;
1059 for (i = 0; i < NUM_RX_CHAINS; i++) 1059 for (i = 0; i < NUM_RX_CHAINS; i++)
1060 if (!(data->active_chains & (1<<i))) 1060 if (!(data->active_chains & (1<<i)))
1061 data->disconn_array[i] = 1; 1061 data->disconn_array[i] = 1;
@@ -1086,7 +1086,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1086 1086
1087 iwlagn_gain_computation( 1087 iwlagn_gain_computation(
1088 priv, average_noise, 1088 priv, average_noise,
1089 find_first_chain(priv->eeprom_data->valid_rx_ant)); 1089 find_first_chain(priv->nvm_data->valid_rx_ant));
1090 1090
1091 /* Some power changes may have been made during the calibration. 1091 /* Some power changes may have been made during the calibration.
1092 * Update and commit the RXON 1092 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 01128c96b5d8..71ab76b2b39d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -986,8 +986,7 @@ struct iwl_rem_sta_cmd {
986 986
987#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) 987#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
988 988
989#define IWL_DROP_SINGLE 0 989#define IWL_DROP_ALL BIT(1)
990#define IWL_DROP_ALL (BIT(IWL_RXON_CTX_BSS) | BIT(IWL_RXON_CTX_PAN))
991 990
992/* 991/*
993 * REPLY_TXFIFO_FLUSH = 0x1e(command and response) 992 * REPLY_TXFIFO_FLUSH = 0x1e(command and response)
@@ -1004,14 +1003,14 @@ struct iwl_rem_sta_cmd {
1004 * the flush operation ends when both the scheduler DMA done and TXFIFO empty 1003 * the flush operation ends when both the scheduler DMA done and TXFIFO empty
1005 * are set. 1004 * are set.
1006 * 1005 *
1007 * @fifo_control: bit mask for which queues to flush 1006 * @queue_control: bit mask for which queues to flush
1008 * @flush_control: flush controls 1007 * @flush_control: flush controls
1009 * 0: Dump single MSDU 1008 * 0: Dump single MSDU
1010 * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable. 1009 * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
1011 * 2: Dump all FIFO 1010 * 2: Dump all FIFO
1012 */ 1011 */
1013struct iwl_txfifo_flush_cmd { 1012struct iwl_txfifo_flush_cmd {
1014 __le32 fifo_control; 1013 __le32 queue_control;
1015 __le16 flush_control; 1014 __le16 flush_control;
1016 __le16 reserved; 1015 __le16 reserved;
1017} __packed; 1016} __packed;
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 1a98fa3ab06d..5b9533eef54d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -305,7 +305,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
305 int pos = 0, ofs = 0, buf_size = 0; 305 int pos = 0, ofs = 0, buf_size = 0;
306 const u8 *ptr; 306 const u8 *ptr;
307 char *buf; 307 char *buf;
308 u16 eeprom_ver; 308 u16 nvm_ver;
309 size_t eeprom_len = priv->eeprom_blob_size; 309 size_t eeprom_len = priv->eeprom_blob_size;
310 buf_size = 4 * eeprom_len + 256; 310 buf_size = 4 * eeprom_len + 256;
311 311
@@ -321,9 +321,9 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
321 if (!buf) 321 if (!buf)
322 return -ENOMEM; 322 return -ENOMEM;
323 323
324 eeprom_ver = priv->eeprom_data->eeprom_version; 324 nvm_ver = priv->nvm_data->nvm_version;
325 pos += scnprintf(buf + pos, buf_size - pos, 325 pos += scnprintf(buf + pos, buf_size - pos,
326 "NVM version: 0x%x\n", eeprom_ver); 326 "NVM version: 0x%x\n", nvm_ver);
327 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { 327 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
328 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 328 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
329 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, 329 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -1333,17 +1333,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1333 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { 1333 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1334 pos += scnprintf(buf + pos, bufsz - pos, 1334 pos += scnprintf(buf + pos, bufsz - pos,
1335 "tx power: (1/2 dB step)\n"); 1335 "tx power: (1/2 dB step)\n");
1336 if ((priv->eeprom_data->valid_tx_ant & ANT_A) && 1336 if ((priv->nvm_data->valid_tx_ant & ANT_A) &&
1337 tx->tx_power.ant_a) 1337 tx->tx_power.ant_a)
1338 pos += scnprintf(buf + pos, bufsz - pos, 1338 pos += scnprintf(buf + pos, bufsz - pos,
1339 fmt_hex, "antenna A:", 1339 fmt_hex, "antenna A:",
1340 tx->tx_power.ant_a); 1340 tx->tx_power.ant_a);
1341 if ((priv->eeprom_data->valid_tx_ant & ANT_B) && 1341 if ((priv->nvm_data->valid_tx_ant & ANT_B) &&
1342 tx->tx_power.ant_b) 1342 tx->tx_power.ant_b)
1343 pos += scnprintf(buf + pos, bufsz - pos, 1343 pos += scnprintf(buf + pos, bufsz - pos,
1344 fmt_hex, "antenna B:", 1344 fmt_hex, "antenna B:",
1345 tx->tx_power.ant_b); 1345 tx->tx_power.ant_b);
1346 if ((priv->eeprom_data->valid_tx_ant & ANT_C) && 1346 if ((priv->nvm_data->valid_tx_ant & ANT_C) &&
1347 tx->tx_power.ant_c) 1347 tx->tx_power.ant_c)
1348 pos += scnprintf(buf + pos, bufsz - pos, 1348 pos += scnprintf(buf + pos, bufsz - pos,
1349 fmt_hex, "antenna C:", 1349 fmt_hex, "antenna C:",
@@ -2101,7 +2101,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
2101 if (iwl_is_rfkill(priv)) 2101 if (iwl_is_rfkill(priv))
2102 return -EFAULT; 2102 return -EFAULT;
2103 2103
2104 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); 2104 iwlagn_dev_txfifo_flush(priv);
2105 2105
2106 return count; 2106 return count;
2107} 2107}
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 8141f91c3725..2653a891cc7e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -789,7 +789,6 @@ struct iwl_priv {
789 /* remain-on-channel offload support */ 789 /* remain-on-channel offload support */
790 struct ieee80211_channel *hw_roc_channel; 790 struct ieee80211_channel *hw_roc_channel;
791 struct delayed_work hw_roc_disable_work; 791 struct delayed_work hw_roc_disable_work;
792 enum nl80211_channel_type hw_roc_chantype;
793 int hw_roc_duration; 792 int hw_roc_duration;
794 bool hw_roc_setup, hw_roc_start_notified; 793 bool hw_roc_setup, hw_roc_start_notified;
795 794
@@ -844,7 +843,7 @@ struct iwl_priv {
844 void *wowlan_sram; 843 void *wowlan_sram;
845#endif /* CONFIG_IWLWIFI_DEBUGFS */ 844#endif /* CONFIG_IWLWIFI_DEBUGFS */
846 845
847 struct iwl_eeprom_data *eeprom_data; 846 struct iwl_nvm_data *nvm_data;
848 /* eeprom blob for debugfs/testmode */ 847 /* eeprom blob for debugfs/testmode */
849 u8 *eeprom_blob; 848 u8 *eeprom_blob;
850 size_t eeprom_blob_size; 849 size_t eeprom_blob_size;
diff --git a/drivers/net/wireless/iwlwifi/dvm/devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index da5862064195..8c72be3f37c1 100644
--- a/drivers/net/wireless/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -305,8 +305,8 @@ static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
305{ 305{
306 u16 temperature, voltage; 306 u16 temperature, voltage;
307 307
308 temperature = le16_to_cpu(priv->eeprom_data->kelvin_temperature); 308 temperature = le16_to_cpu(priv->nvm_data->kelvin_temperature);
309 voltage = le16_to_cpu(priv->eeprom_data->kelvin_voltage); 309 voltage = le16_to_cpu(priv->nvm_data->kelvin_voltage);
310 310
311 /* offset = temp - volt / coeff */ 311 /* offset = temp - volt / coeff */
312 return (s32)(temperature - 312 return (s32)(temperature -
@@ -460,13 +460,13 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
460 break; 460 break;
461 case IWL_DEVICE_FAMILY_6050: 461 case IWL_DEVICE_FAMILY_6050:
462 /* Indicate calibration version to uCode. */ 462 /* Indicate calibration version to uCode. */
463 if (priv->eeprom_data->calib_version >= 6) 463 if (priv->nvm_data->calib_version >= 6)
464 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 464 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
465 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 465 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
466 break; 466 break;
467 case IWL_DEVICE_FAMILY_6150: 467 case IWL_DEVICE_FAMILY_6150:
468 /* Indicate calibration version to uCode. */ 468 /* Indicate calibration version to uCode. */
469 if (priv->eeprom_data->calib_version >= 6) 469 if (priv->nvm_data->calib_version >= 6)
470 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 470 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
471 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 471 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
472 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG, 472 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index bef88c1a2c9b..6ff46605ad4f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -59,7 +59,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
59 /* half dBm need to multiply */ 59 /* half dBm need to multiply */
60 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 60 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
61 61
62 if (tx_power_cmd.global_lmt > priv->eeprom_data->max_tx_pwr_half_dbm) { 62 if (tx_power_cmd.global_lmt > priv->nvm_data->max_tx_pwr_half_dbm) {
63 /* 63 /*
64 * For the newer devices which using enhanced/extend tx power 64 * For the newer devices which using enhanced/extend tx power
65 * table in EEPROM, the format is in half dBm. driver need to 65 * table in EEPROM, the format is in half dBm. driver need to
@@ -72,7 +72,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
72 * half-dBm format), lower the tx power based on EEPROM 72 * half-dBm format), lower the tx power based on EEPROM
73 */ 73 */
74 tx_power_cmd.global_lmt = 74 tx_power_cmd.global_lmt =
75 priv->eeprom_data->max_tx_pwr_half_dbm; 75 priv->nvm_data->max_tx_pwr_half_dbm;
76 } 76 }
77 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED; 77 tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
78 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO; 78 tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
@@ -136,7 +136,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
136 * 1. acquire mutex before calling 136 * 1. acquire mutex before calling
137 * 2. make sure rf is on and not in exit state 137 * 2. make sure rf is on and not in exit state
138 */ 138 */
139int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) 139int iwlagn_txfifo_flush(struct iwl_priv *priv)
140{ 140{
141 struct iwl_txfifo_flush_cmd flush_cmd; 141 struct iwl_txfifo_flush_cmd flush_cmd;
142 struct iwl_host_cmd cmd = { 142 struct iwl_host_cmd cmd = {
@@ -146,35 +146,34 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
146 .data = { &flush_cmd, }, 146 .data = { &flush_cmd, },
147 }; 147 };
148 148
149 might_sleep();
150
151 memset(&flush_cmd, 0, sizeof(flush_cmd)); 149 memset(&flush_cmd, 0, sizeof(flush_cmd));
152 if (flush_control & BIT(IWL_RXON_CTX_BSS)) 150
153 flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | 151 flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
154 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | 152 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
155 IWL_SCD_MGMT_MSK; 153 IWL_SCD_MGMT_MSK;
156 if ((flush_control & BIT(IWL_RXON_CTX_PAN)) && 154 if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
157 (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))) 155 flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK |
158 flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK | 156 IWL_PAN_SCD_VI_MSK |
159 IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK | 157 IWL_PAN_SCD_BE_MSK |
160 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | 158 IWL_PAN_SCD_BK_MSK |
161 IWL_PAN_SCD_MULTICAST_MSK; 159 IWL_PAN_SCD_MGMT_MSK |
162 160 IWL_PAN_SCD_MULTICAST_MSK;
163 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE) 161
164 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; 162 if (priv->nvm_data->sku_cap_11n_enable)
165 163 flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK;
166 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", 164
167 flush_cmd.fifo_control); 165 IWL_DEBUG_INFO(priv, "queue control: 0x%x\n",
168 flush_cmd.flush_control = cpu_to_le16(flush_control); 166 flush_cmd.queue_control);
167 flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL);
169 168
170 return iwl_dvm_send_cmd(priv, &cmd); 169 return iwl_dvm_send_cmd(priv, &cmd);
171} 170}
172 171
173void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control) 172void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
174{ 173{
175 mutex_lock(&priv->mutex); 174 mutex_lock(&priv->mutex);
176 ieee80211_stop_queues(priv->hw); 175 ieee80211_stop_queues(priv->hw);
177 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { 176 if (iwlagn_txfifo_flush(priv)) {
178 IWL_ERR(priv, "flush request fail\n"); 177 IWL_ERR(priv, "flush request fail\n");
179 goto done; 178 goto done;
180 } 179 }
@@ -826,7 +825,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
826 if (priv->chain_noise_data.active_chains) 825 if (priv->chain_noise_data.active_chains)
827 active_chains = priv->chain_noise_data.active_chains; 826 active_chains = priv->chain_noise_data.active_chains;
828 else 827 else
829 active_chains = priv->eeprom_data->valid_rx_ant; 828 active_chains = priv->nvm_data->valid_rx_ant;
830 829
831 if (priv->cfg->bt_params && 830 if (priv->cfg->bt_params &&
832 priv->cfg->bt_params->advanced_bt_coexist && 831 priv->cfg->bt_params->advanced_bt_coexist &&
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 2d9eee93c743..3163e0f38c25 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -164,14 +164,17 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
164 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 164 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
165 */ 165 */
166 166
167 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE) 167 if (priv->nvm_data->sku_cap_11n_enable)
168 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 168 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
169 IEEE80211_HW_SUPPORTS_STATIC_SMPS; 169 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
170 170
171#ifndef CONFIG_IWLWIFI_EXPERIMENTAL_MFP 171 /*
172 /* enable 11w if the uCode advertise */ 172 * Enable 11w if advertised by firmware and software crypto
173 if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP) 173 * is not enabled (as the firmware will interpret some mgmt
174#endif /* !CONFIG_IWLWIFI_EXPERIMENTAL_MFP */ 174 * packets, so enabling it with software crypto isn't safe)
175 */
176 if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
177 !iwlwifi_mod_params.sw_crypto)
175 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 178 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
176 179
177 hw->sta_data_size = sizeof(struct iwl_station_priv); 180 hw->sta_data_size = sizeof(struct iwl_station_priv);
@@ -239,12 +242,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
239 242
240 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 243 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
241 244
242 if (priv->eeprom_data->bands[IEEE80211_BAND_2GHZ].n_channels) 245 if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
243 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 246 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
244 &priv->eeprom_data->bands[IEEE80211_BAND_2GHZ]; 247 &priv->nvm_data->bands[IEEE80211_BAND_2GHZ];
245 if (priv->eeprom_data->bands[IEEE80211_BAND_5GHZ].n_channels) 248 if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
246 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 249 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
247 &priv->eeprom_data->bands[IEEE80211_BAND_5GHZ]; 250 &priv->nvm_data->bands[IEEE80211_BAND_5GHZ];
248 251
249 hw->wiphy->hw_version = priv->trans->hw_id; 252 hw->wiphy->hw_version = priv->trans->hw_id;
250 253
@@ -651,7 +654,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
651 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 654 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
652 sta->addr, tid); 655 sta->addr, tid);
653 656
654 if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)) 657 if (!(priv->nvm_data->sku_cap_11n_enable))
655 return -EACCES; 658 return -EACCES;
656 659
657 IWL_DEBUG_MAC80211(priv, "enter\n"); 660 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1019,7 +1022,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
1019 */ 1022 */
1020 if (drop) { 1023 if (drop) {
1021 IWL_DEBUG_MAC80211(priv, "send flush command\n"); 1024 IWL_DEBUG_MAC80211(priv, "send flush command\n");
1022 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { 1025 if (iwlagn_txfifo_flush(priv)) {
1023 IWL_ERR(priv, "flush request fail\n"); 1026 IWL_ERR(priv, "flush request fail\n");
1024 goto done; 1027 goto done;
1025 } 1028 }
@@ -1032,8 +1035,8 @@ done:
1032} 1035}
1033 1036
1034static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, 1037static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1038 struct ieee80211_vif *vif,
1035 struct ieee80211_channel *channel, 1039 struct ieee80211_channel *channel,
1036 enum nl80211_channel_type channel_type,
1037 int duration) 1040 int duration)
1038{ 1041{
1039 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1042 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1065,7 +1068,6 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1065 } 1068 }
1066 1069
1067 priv->hw_roc_channel = channel; 1070 priv->hw_roc_channel = channel;
1068 priv->hw_roc_chantype = channel_type;
1069 /* convert from ms to TU */ 1071 /* convert from ms to TU */
1070 priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024); 1072 priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
1071 priv->hw_roc_start_notified = false; 1073 priv->hw_roc_start_notified = false;
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 408132cf83c1..faa05932efae 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -185,7 +185,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
185 rate = info->control.rates[0].idx; 185 rate = info->control.rates[0].idx;
186 186
187 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 187 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
188 priv->eeprom_data->valid_tx_ant); 188 priv->nvm_data->valid_tx_ant);
189 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 189 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
190 190
191 /* In mac80211, rates for 5 GHz start at 0 */ 191 /* In mac80211, rates for 5 GHz start at 0 */
@@ -511,7 +511,7 @@ static void iwl_bg_tx_flush(struct work_struct *work)
511 return; 511 return;
512 512
513 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); 513 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
514 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); 514 iwlagn_dev_txfifo_flush(priv);
515} 515}
516 516
517/* 517/*
@@ -776,7 +776,7 @@ int iwl_alive_start(struct iwl_priv *priv)
776 ieee80211_wake_queues(priv->hw); 776 ieee80211_wake_queues(priv->hw);
777 777
778 /* Configure Tx antenna selection based on H/W config */ 778 /* Configure Tx antenna selection based on H/W config */
779 iwlagn_send_tx_ant_config(priv, priv->eeprom_data->valid_tx_ant); 779 iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant);
780 780
781 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { 781 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
782 struct iwl_rxon_cmd *active_rxon = 782 struct iwl_rxon_cmd *active_rxon =
@@ -1191,36 +1191,38 @@ static void iwl_option_config(struct iwl_priv *priv)
1191 1191
1192static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) 1192static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
1193{ 1193{
1194 u16 radio_cfg; 1194 struct iwl_nvm_data *data = priv->nvm_data;
1195 char *debug_msg;
1195 1196
1196 priv->eeprom_data->sku = priv->eeprom_data->sku; 1197 if (data->sku_cap_11n_enable &&
1197
1198 if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
1199 !priv->cfg->ht_params) { 1198 !priv->cfg->ht_params) {
1200 IWL_ERR(priv, "Invalid 11n configuration\n"); 1199 IWL_ERR(priv, "Invalid 11n configuration\n");
1201 return -EINVAL; 1200 return -EINVAL;
1202 } 1201 }
1203 1202
1204 if (!priv->eeprom_data->sku) { 1203 if (!data->sku_cap_11n_enable && !data->sku_cap_band_24GHz_enable &&
1204 !data->sku_cap_band_52GHz_enable) {
1205 IWL_ERR(priv, "Invalid device sku\n"); 1205 IWL_ERR(priv, "Invalid device sku\n");
1206 return -EINVAL; 1206 return -EINVAL;
1207 } 1207 }
1208 1208
1209 IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku); 1209 debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
1210 1210 IWL_DEBUG_INFO(priv, debug_msg,
1211 radio_cfg = priv->eeprom_data->radio_cfg; 1211 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
1212 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
1213 data->sku_cap_11n_enable ? "" : "NOT", "enabled");
1212 1214
1213 priv->hw_params.tx_chains_num = 1215 priv->hw_params.tx_chains_num =
1214 num_of_ant(priv->eeprom_data->valid_tx_ant); 1216 num_of_ant(data->valid_tx_ant);
1215 if (priv->cfg->rx_with_siso_diversity) 1217 if (priv->cfg->rx_with_siso_diversity)
1216 priv->hw_params.rx_chains_num = 1; 1218 priv->hw_params.rx_chains_num = 1;
1217 else 1219 else
1218 priv->hw_params.rx_chains_num = 1220 priv->hw_params.rx_chains_num =
1219 num_of_ant(priv->eeprom_data->valid_rx_ant); 1221 num_of_ant(data->valid_rx_ant);
1220 1222
1221 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n", 1223 IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
1222 priv->eeprom_data->valid_tx_ant, 1224 data->valid_tx_ant,
1223 priv->eeprom_data->valid_rx_ant); 1225 data->valid_rx_ant);
1224 1226
1225 return 0; 1227 return 0;
1226} 1228}
@@ -1235,7 +1237,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1235 struct iwl_op_mode *op_mode; 1237 struct iwl_op_mode *op_mode;
1236 u16 num_mac; 1238 u16 num_mac;
1237 u32 ucode_flags; 1239 u32 ucode_flags;
1238 struct iwl_trans_config trans_cfg; 1240 struct iwl_trans_config trans_cfg = {};
1239 static const u8 no_reclaim_cmds[] = { 1241 static const u8 no_reclaim_cmds[] = {
1240 REPLY_RX_PHY_CMD, 1242 REPLY_RX_PHY_CMD,
1241 REPLY_RX_MPDU_CMD, 1243 REPLY_RX_MPDU_CMD,
@@ -1334,6 +1336,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1334 /* Configure transport layer */ 1336 /* Configure transport layer */
1335 iwl_trans_configure(priv->trans, &trans_cfg); 1337 iwl_trans_configure(priv->trans, &trans_cfg);
1336 1338
1339 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
1340 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
1341
1337 /* At this point both hw and priv are allocated. */ 1342 /* At this point both hw and priv are allocated. */
1338 1343
1339 SET_IEEE80211_DEV(priv->hw, priv->trans->dev); 1344 SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
@@ -1377,24 +1382,24 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1377 /* Reset chip to save power until we load uCode during "up". */ 1382 /* Reset chip to save power until we load uCode during "up". */
1378 iwl_trans_stop_hw(priv->trans, false); 1383 iwl_trans_stop_hw(priv->trans, false);
1379 1384
1380 priv->eeprom_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg, 1385 priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
1381 priv->eeprom_blob, 1386 priv->eeprom_blob,
1382 priv->eeprom_blob_size); 1387 priv->eeprom_blob_size);
1383 if (!priv->eeprom_data) 1388 if (!priv->nvm_data)
1384 goto out_free_eeprom_blob; 1389 goto out_free_eeprom_blob;
1385 1390
1386 if (iwl_eeprom_check_version(priv->eeprom_data, priv->trans)) 1391 if (iwl_nvm_check_version(priv->nvm_data, priv->trans))
1387 goto out_free_eeprom; 1392 goto out_free_eeprom;
1388 1393
1389 if (iwl_eeprom_init_hw_params(priv)) 1394 if (iwl_eeprom_init_hw_params(priv))
1390 goto out_free_eeprom; 1395 goto out_free_eeprom;
1391 1396
1392 /* extract MAC Address */ 1397 /* extract MAC Address */
1393 memcpy(priv->addresses[0].addr, priv->eeprom_data->hw_addr, ETH_ALEN); 1398 memcpy(priv->addresses[0].addr, priv->nvm_data->hw_addr, ETH_ALEN);
1394 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 1399 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1395 priv->hw->wiphy->addresses = priv->addresses; 1400 priv->hw->wiphy->addresses = priv->addresses;
1396 priv->hw->wiphy->n_addresses = 1; 1401 priv->hw->wiphy->n_addresses = 1;
1397 num_mac = priv->eeprom_data->n_hw_addrs; 1402 num_mac = priv->nvm_data->n_hw_addrs;
1398 if (num_mac > 1) { 1403 if (num_mac > 1) {
1399 memcpy(priv->addresses[1].addr, priv->addresses[0].addr, 1404 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1400 ETH_ALEN); 1405 ETH_ALEN);
@@ -1407,7 +1412,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1407 ************************/ 1412 ************************/
1408 iwl_set_hw_params(priv); 1413 iwl_set_hw_params(priv);
1409 1414
1410 if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) { 1415 if (!(priv->nvm_data->sku_cap_ipan_enable)) {
1411 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN"); 1416 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
1412 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1417 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1413 /* 1418 /*
@@ -1489,7 +1494,7 @@ out_destroy_workqueue:
1489out_free_eeprom_blob: 1494out_free_eeprom_blob:
1490 kfree(priv->eeprom_blob); 1495 kfree(priv->eeprom_blob);
1491out_free_eeprom: 1496out_free_eeprom:
1492 iwl_free_eeprom_data(priv->eeprom_data); 1497 iwl_free_nvm_data(priv->nvm_data);
1493out_free_hw: 1498out_free_hw:
1494 ieee80211_free_hw(priv->hw); 1499 ieee80211_free_hw(priv->hw);
1495out: 1500out:
@@ -1508,12 +1513,8 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1508 1513
1509 iwl_tt_exit(priv); 1514 iwl_tt_exit(priv);
1510 1515
1511 /*This will stop the queues, move the device to low power state */
1512 priv->ucode_loaded = false;
1513 iwl_trans_stop_device(priv->trans);
1514
1515 kfree(priv->eeprom_blob); 1516 kfree(priv->eeprom_blob);
1516 iwl_free_eeprom_data(priv->eeprom_data); 1517 iwl_free_nvm_data(priv->nvm_data);
1517 1518
1518 /*netif_stop_queue(dev); */ 1519 /*netif_stop_queue(dev); */
1519 flush_workqueue(priv->workqueue); 1520 flush_workqueue(priv->workqueue);
@@ -1927,8 +1928,6 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
1927 * commands by clearing the ready bit */ 1928 * commands by clearing the ready bit */
1928 clear_bit(STATUS_READY, &priv->status); 1929 clear_bit(STATUS_READY, &priv->status);
1929 1930
1930 wake_up(&priv->trans->wait_command_queue);
1931
1932 if (!ondemand) { 1931 if (!ondemand) {
1933 /* 1932 /*
1934 * If firmware keep reloading, then it indicate something 1933 * If firmware keep reloading, then it indicate something
@@ -1989,7 +1988,6 @@ static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
1989static void iwl_nic_config(struct iwl_op_mode *op_mode) 1988static void iwl_nic_config(struct iwl_op_mode *op_mode)
1990{ 1989{
1991 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1990 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1992 u16 radio_cfg = priv->eeprom_data->radio_cfg;
1993 1991
1994 /* SKU Control */ 1992 /* SKU Control */
1995 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, 1993 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
@@ -2001,13 +1999,13 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
2001 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH)); 1999 CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
2002 2000
2003 /* write radio config values to register */ 2001 /* write radio config values to register */
2004 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { 2002 if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
2005 u32 reg_val = 2003 u32 reg_val =
2006 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) << 2004 priv->nvm_data->radio_cfg_type <<
2007 CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE | 2005 CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
2008 EEPROM_RF_CFG_STEP_MSK(radio_cfg) << 2006 priv->nvm_data->radio_cfg_step <<
2009 CSR_HW_IF_CONFIG_REG_POS_PHY_STEP | 2007 CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
2010 EEPROM_RF_CFG_DASH_MSK(radio_cfg) << 2008 priv->nvm_data->radio_cfg_dash <<
2011 CSR_HW_IF_CONFIG_REG_POS_PHY_DASH; 2009 CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
2012 2010
2013 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG, 2011 iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
@@ -2016,9 +2014,9 @@ static void iwl_nic_config(struct iwl_op_mode *op_mode)
2016 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val); 2014 CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
2017 2015
2018 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n", 2016 IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
2019 EEPROM_RF_CFG_TYPE_MSK(radio_cfg), 2017 priv->nvm_data->radio_cfg_type,
2020 EEPROM_RF_CFG_STEP_MSK(radio_cfg), 2018 priv->nvm_data->radio_cfg_step,
2021 EEPROM_RF_CFG_DASH_MSK(radio_cfg)); 2019 priv->nvm_data->radio_cfg_dash);
2022 } else { 2020 } else {
2023 WARN_ON(1); 2021 WARN_ON(1);
2024 } 2022 }
@@ -2152,8 +2150,6 @@ static int __init iwl_init(void)
2152{ 2150{
2153 2151
2154 int ret; 2152 int ret;
2155 pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
2156 pr_info(DRV_COPYRIGHT "\n");
2157 2153
2158 ret = iwlagn_rate_control_register(); 2154 ret = iwlagn_rate_control_register();
2159 if (ret) { 2155 if (ret) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index a82f46c10f5e..f3dd0da60d8a 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -820,7 +820,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
820 820
821 if (num_of_ant(tbl->ant_type) > 1) 821 if (num_of_ant(tbl->ant_type) > 1)
822 tbl->ant_type = 822 tbl->ant_type =
823 first_antenna(priv->eeprom_data->valid_tx_ant); 823 first_antenna(priv->nvm_data->valid_tx_ant);
824 824
825 tbl->is_ht40 = 0; 825 tbl->is_ht40 = 0;
826 tbl->is_SGI = 0; 826 tbl->is_SGI = 0;
@@ -1448,7 +1448,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1448 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1448 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1449 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1449 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1450 u8 start_action; 1450 u8 start_action;
1451 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant; 1451 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1452 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1452 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1453 int ret = 0; 1453 int ret = 0;
1454 u8 update_search_tbl_counter = 0; 1454 u8 update_search_tbl_counter = 0;
@@ -1466,7 +1466,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1466 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1466 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1467 /* avoid antenna B and MIMO */ 1467 /* avoid antenna B and MIMO */
1468 valid_tx_ant = 1468 valid_tx_ant =
1469 first_antenna(priv->eeprom_data->valid_tx_ant); 1469 first_antenna(priv->nvm_data->valid_tx_ant);
1470 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && 1470 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1471 tbl->action != IWL_LEGACY_SWITCH_SISO) 1471 tbl->action != IWL_LEGACY_SWITCH_SISO)
1472 tbl->action = IWL_LEGACY_SWITCH_SISO; 1472 tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1490,7 +1490,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1490 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1490 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1491 tbl->action = IWL_LEGACY_SWITCH_SISO; 1491 tbl->action = IWL_LEGACY_SWITCH_SISO;
1492 valid_tx_ant = 1492 valid_tx_ant =
1493 first_antenna(priv->eeprom_data->valid_tx_ant); 1493 first_antenna(priv->nvm_data->valid_tx_ant);
1494 } 1494 }
1495 1495
1496 start_action = tbl->action; 1496 start_action = tbl->action;
@@ -1624,7 +1624,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1624 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1624 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1625 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1625 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1626 u8 start_action; 1626 u8 start_action;
1627 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant; 1627 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1628 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1628 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1629 u8 update_search_tbl_counter = 0; 1629 u8 update_search_tbl_counter = 0;
1630 int ret; 1630 int ret;
@@ -1642,7 +1642,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1642 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1642 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1643 /* avoid antenna B and MIMO */ 1643 /* avoid antenna B and MIMO */
1644 valid_tx_ant = 1644 valid_tx_ant =
1645 first_antenna(priv->eeprom_data->valid_tx_ant); 1645 first_antenna(priv->nvm_data->valid_tx_ant);
1646 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) 1646 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1647 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1647 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1648 break; 1648 break;
@@ -1660,7 +1660,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1660 /* configure as 1x1 if bt full concurrency */ 1660 /* configure as 1x1 if bt full concurrency */
1661 if (priv->bt_full_concurrent) { 1661 if (priv->bt_full_concurrent) {
1662 valid_tx_ant = 1662 valid_tx_ant =
1663 first_antenna(priv->eeprom_data->valid_tx_ant); 1663 first_antenna(priv->nvm_data->valid_tx_ant);
1664 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1664 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1665 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1665 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1666 } 1666 }
@@ -1796,7 +1796,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1796 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1796 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1797 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1797 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1798 u8 start_action; 1798 u8 start_action;
1799 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant; 1799 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1800 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1800 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1801 u8 update_search_tbl_counter = 0; 1801 u8 update_search_tbl_counter = 0;
1802 int ret; 1802 int ret;
@@ -1966,7 +1966,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1966 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1966 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1967 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1967 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1968 u8 start_action; 1968 u8 start_action;
1969 u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant; 1969 u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
1970 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1970 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1971 int ret; 1971 int ret;
1972 u8 update_search_tbl_counter = 0; 1972 u8 update_search_tbl_counter = 0;
@@ -2700,7 +2700,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2700 2700
2701 i = lq_sta->last_txrate_idx; 2701 i = lq_sta->last_txrate_idx;
2702 2702
2703 valid_tx_ant = priv->eeprom_data->valid_tx_ant; 2703 valid_tx_ant = priv->nvm_data->valid_tx_ant;
2704 2704
2705 if (!lq_sta->search_better_tbl) 2705 if (!lq_sta->search_better_tbl)
2706 active_tbl = lq_sta->active_tbl; 2706 active_tbl = lq_sta->active_tbl;
@@ -2894,15 +2894,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2894 2894
2895 /* These values will be overridden later */ 2895 /* These values will be overridden later */
2896 lq_sta->lq.general_params.single_stream_ant_msk = 2896 lq_sta->lq.general_params.single_stream_ant_msk =
2897 first_antenna(priv->eeprom_data->valid_tx_ant); 2897 first_antenna(priv->nvm_data->valid_tx_ant);
2898 lq_sta->lq.general_params.dual_stream_ant_msk = 2898 lq_sta->lq.general_params.dual_stream_ant_msk =
2899 priv->eeprom_data->valid_tx_ant & 2899 priv->nvm_data->valid_tx_ant &
2900 ~first_antenna(priv->eeprom_data->valid_tx_ant); 2900 ~first_antenna(priv->nvm_data->valid_tx_ant);
2901 if (!lq_sta->lq.general_params.dual_stream_ant_msk) { 2901 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2902 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2902 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2903 } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) { 2903 } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
2904 lq_sta->lq.general_params.dual_stream_ant_msk = 2904 lq_sta->lq.general_params.dual_stream_ant_msk =
2905 priv->eeprom_data->valid_tx_ant; 2905 priv->nvm_data->valid_tx_ant;
2906 } 2906 }
2907 2907
2908 /* as default allow aggregation for all tids */ 2908 /* as default allow aggregation for all tids */
@@ -2948,7 +2948,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2948 if (priv && priv->bt_full_concurrent) { 2948 if (priv && priv->bt_full_concurrent) {
2949 /* 1x1 only */ 2949 /* 1x1 only */
2950 tbl_type.ant_type = 2950 tbl_type.ant_type =
2951 first_antenna(priv->eeprom_data->valid_tx_ant); 2951 first_antenna(priv->nvm_data->valid_tx_ant);
2952 } 2952 }
2953 2953
2954 /* How many times should we repeat the initial rate? */ 2954 /* How many times should we repeat the initial rate? */
@@ -2980,7 +2980,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2980 if (priv->bt_full_concurrent) 2980 if (priv->bt_full_concurrent)
2981 valid_tx_ant = ANT_A; 2981 valid_tx_ant = ANT_A;
2982 else 2982 else
2983 valid_tx_ant = priv->eeprom_data->valid_tx_ant; 2983 valid_tx_ant = priv->nvm_data->valid_tx_ant;
2984 } 2984 }
2985 2985
2986 /* Fill rest of rate table */ 2986 /* Fill rest of rate table */
@@ -3014,7 +3014,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3014 if (priv && priv->bt_full_concurrent) { 3014 if (priv && priv->bt_full_concurrent) {
3015 /* 1x1 only */ 3015 /* 1x1 only */
3016 tbl_type.ant_type = 3016 tbl_type.ant_type =
3017 first_antenna(priv->eeprom_data->valid_tx_ant); 3017 first_antenna(priv->nvm_data->valid_tx_ant);
3018 } 3018 }
3019 3019
3020 /* Indicate to uCode which entries might be MIMO. 3020 /* Indicate to uCode which entries might be MIMO.
@@ -3101,7 +3101,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3101 u8 ant_sel_tx; 3101 u8 ant_sel_tx;
3102 3102
3103 priv = lq_sta->drv; 3103 priv = lq_sta->drv;
3104 valid_tx_ant = priv->eeprom_data->valid_tx_ant; 3104 valid_tx_ant = priv->nvm_data->valid_tx_ant;
3105 if (lq_sta->dbg_fixed_rate) { 3105 if (lq_sta->dbg_fixed_rate) {
3106 ant_sel_tx = 3106 ant_sel_tx =
3107 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) 3107 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3172,9 +3172,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3172 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 3172 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3173 lq_sta->dbg_fixed_rate); 3173 lq_sta->dbg_fixed_rate);
3174 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 3174 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3175 (priv->eeprom_data->valid_tx_ant & ANT_A) ? "ANT_A," : "", 3175 (priv->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
3176 (priv->eeprom_data->valid_tx_ant & ANT_B) ? "ANT_B," : "", 3176 (priv->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
3177 (priv->eeprom_data->valid_tx_ant & ANT_C) ? "ANT_C" : ""); 3177 (priv->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
3178 desc += sprintf(buff+desc, "lq type %s\n", 3178 desc += sprintf(buff+desc, "lq type %s\n",
3179 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 3179 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3180 if (is_Ht(tbl->lq_type)) { 3180 if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 5a9c325804f6..cac4f37cc427 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -631,8 +631,6 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
631 test_bit(STATUS_RF_KILL_HW, &priv->status))) 631 test_bit(STATUS_RF_KILL_HW, &priv->status)))
632 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 632 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
633 test_bit(STATUS_RF_KILL_HW, &priv->status)); 633 test_bit(STATUS_RF_KILL_HW, &priv->status));
634 else
635 wake_up(&priv->trans->wait_command_queue);
636 return 0; 634 return 0;
637} 635}
638 636
@@ -901,7 +899,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
901 struct iwl_device_cmd *cmd) 899 struct iwl_device_cmd *cmd)
902{ 900{
903 struct ieee80211_hdr *header; 901 struct ieee80211_hdr *header;
904 struct ieee80211_rx_status rx_status; 902 struct ieee80211_rx_status rx_status = {};
905 struct iwl_rx_packet *pkt = rxb_addr(rxb); 903 struct iwl_rx_packet *pkt = rxb_addr(rxb);
906 struct iwl_rx_phy_res *phy_res; 904 struct iwl_rx_phy_res *phy_res;
907 __le32 rx_pkt_status; 905 __le32 rx_pkt_status;
@@ -951,7 +949,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
951 949
952 /* TSF isn't reliable. In order to allow smooth user experience, 950 /* TSF isn't reliable. In order to allow smooth user experience,
953 * this W/A doesn't propagate it to the mac80211 */ 951 * this W/A doesn't propagate it to the mac80211 */
954 /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ 952 /*rx_status.flag |= RX_FLAG_MACTIME_START;*/
955 953
956 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); 954 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
957 955
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 2830ea290502..9a891e6e60e8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -420,10 +420,10 @@ static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
420 return -EINVAL; 420 return -EINVAL;
421 } 421 }
422 422
423 if (tx_power > DIV_ROUND_UP(priv->eeprom_data->max_tx_pwr_half_dbm, 2)) { 423 if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
424 IWL_WARN(priv, 424 IWL_WARN(priv,
425 "Requested user TXPOWER %d above upper limit %d.\n", 425 "Requested user TXPOWER %d above upper limit %d.\n",
426 tx_power, priv->eeprom_data->max_tx_pwr_half_dbm); 426 tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
427 return -EINVAL; 427 return -EINVAL;
428 } 428 }
429 429
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index bb9f6252d28f..610ed2204e1f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -660,12 +660,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
660 u16 rx_chain = 0; 660 u16 rx_chain = 0;
661 enum ieee80211_band band; 661 enum ieee80211_band band;
662 u8 n_probes = 0; 662 u8 n_probes = 0;
663 u8 rx_ant = priv->eeprom_data->valid_rx_ant; 663 u8 rx_ant = priv->nvm_data->valid_rx_ant;
664 u8 rate; 664 u8 rate;
665 bool is_active = false; 665 bool is_active = false;
666 int chan_mod; 666 int chan_mod;
667 u8 active_chains; 667 u8 active_chains;
668 u8 scan_tx_antennas = priv->eeprom_data->valid_tx_ant; 668 u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant;
669 int ret; 669 int ret;
670 int scan_cmd_size = sizeof(struct iwl_scan_cmd) + 670 int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
671 MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) + 671 MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
@@ -673,8 +673,9 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
673 const u8 *ssid = NULL; 673 const u8 *ssid = NULL;
674 u8 ssid_len = 0; 674 u8 ssid_len = 0;
675 675
676 if (WARN_ON_ONCE(priv->scan_request && 676 if (WARN_ON(priv->scan_type == IWL_SCAN_NORMAL &&
677 priv->scan_request->n_channels > MAX_SCAN_CHANNEL)) 677 (!priv->scan_request ||
678 priv->scan_request->n_channels > MAX_SCAN_CHANNEL)))
678 return -EINVAL; 679 return -EINVAL;
679 680
680 lockdep_assert_held(&priv->mutex); 681 lockdep_assert_held(&priv->mutex);
@@ -881,7 +882,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
881 882
882 /* MIMO is not used here, but value is required */ 883 /* MIMO is not used here, but value is required */
883 rx_chain |= 884 rx_chain |=
884 priv->eeprom_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 885 priv->nvm_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
885 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 886 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
886 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 887 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
887 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 888 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -998,7 +999,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
998 999
999void iwl_init_scan_params(struct iwl_priv *priv) 1000void iwl_init_scan_params(struct iwl_priv *priv)
1000{ 1001{
1001 u8 ant_idx = fls(priv->eeprom_data->valid_tx_ant) - 1; 1002 u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
1002 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) 1003 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
1003 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 1004 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
1004 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 1005 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index cd9b6de4273e..bdba9543c351 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -634,23 +634,23 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
634 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) 634 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
635 rate_flags |= RATE_MCS_CCK_MSK; 635 rate_flags |= RATE_MCS_CCK_MSK;
636 636
637 rate_flags |= first_antenna(priv->eeprom_data->valid_tx_ant) << 637 rate_flags |= first_antenna(priv->nvm_data->valid_tx_ant) <<
638 RATE_MCS_ANT_POS; 638 RATE_MCS_ANT_POS;
639 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); 639 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
640 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 640 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
641 link_cmd->rs_table[i].rate_n_flags = rate_n_flags; 641 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
642 642
643 link_cmd->general_params.single_stream_ant_msk = 643 link_cmd->general_params.single_stream_ant_msk =
644 first_antenna(priv->eeprom_data->valid_tx_ant); 644 first_antenna(priv->nvm_data->valid_tx_ant);
645 645
646 link_cmd->general_params.dual_stream_ant_msk = 646 link_cmd->general_params.dual_stream_ant_msk =
647 priv->eeprom_data->valid_tx_ant & 647 priv->nvm_data->valid_tx_ant &
648 ~first_antenna(priv->eeprom_data->valid_tx_ant); 648 ~first_antenna(priv->nvm_data->valid_tx_ant);
649 if (!link_cmd->general_params.dual_stream_ant_msk) { 649 if (!link_cmd->general_params.dual_stream_ant_msk) {
650 link_cmd->general_params.dual_stream_ant_msk = ANT_AB; 650 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
651 } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) { 651 } else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
652 link_cmd->general_params.dual_stream_ant_msk = 652 link_cmd->general_params.dual_stream_ant_msk =
653 priv->eeprom_data->valid_tx_ant; 653 priv->nvm_data->valid_tx_ant;
654 } 654 }
655 655
656 link_cmd->agg_params.agg_dis_start_th = 656 link_cmd->agg_params.agg_dis_start_th =
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index f5ca73a89870..da21328ca8ed 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -188,7 +188,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || 188 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) 189 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
190 rate_idx = rate_lowest_index( 190 rate_idx = rate_lowest_index(
191 &priv->eeprom_data->bands[info->band], sta); 191 &priv->nvm_data->bands[info->band], sta);
192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ 192 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
193 if (info->band == IEEE80211_BAND_5GHZ) 193 if (info->band == IEEE80211_BAND_5GHZ)
194 rate_idx += IWL_FIRST_OFDM_RATE; 194 rate_idx += IWL_FIRST_OFDM_RATE;
@@ -207,11 +207,11 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
207 priv->bt_full_concurrent) { 207 priv->bt_full_concurrent) {
208 /* operated as 1x1 in full concurrency mode */ 208 /* operated as 1x1 in full concurrency mode */
209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
210 first_antenna(priv->eeprom_data->valid_tx_ant)); 210 first_antenna(priv->nvm_data->valid_tx_ant));
211 } else 211 } else
212 priv->mgmt_tx_ant = iwl_toggle_tx_ant( 212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(
213 priv, priv->mgmt_tx_ant, 213 priv, priv->mgmt_tx_ant,
214 priv->eeprom_data->valid_tx_ant); 214 priv->nvm_data->valid_tx_ant);
215 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 215 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
216 216
217 /* Set the rate in the TX cmd */ 217 /* Set the rate in the TX cmd */
@@ -305,7 +305,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
305 u8 hdr_len; 305 u8 hdr_len;
306 u16 len, seq_number = 0; 306 u16 len, seq_number = 0;
307 u8 sta_id, tid = IWL_MAX_TID_COUNT; 307 u8 sta_id, tid = IWL_MAX_TID_COUNT;
308 bool is_agg = false; 308 bool is_agg = false, is_data_qos = false;
309 int txq_id; 309 int txq_id;
310 310
311 if (info->control.vif) 311 if (info->control.vif)
@@ -378,9 +378,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
378 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); 378 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
379 } 379 }
380 380
381 if (info->flags & IEEE80211_TX_CTL_AMPDU)
382 is_agg = true;
383
384 dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans); 381 dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
385 382
386 if (unlikely(!dev_cmd)) 383 if (unlikely(!dev_cmd))
@@ -442,6 +439,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
442 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 439 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
443 hdr->seq_ctrl |= cpu_to_le16(seq_number); 440 hdr->seq_ctrl |= cpu_to_le16(seq_number);
444 seq_number += 0x10; 441 seq_number += 0x10;
442
443 if (info->flags & IEEE80211_TX_CTL_AMPDU)
444 is_agg = true;
445 is_data_qos = true;
445 } 446 }
446 447
447 /* Copy MAC header from skb into command buffer */ 448 /* Copy MAC header from skb into command buffer */
@@ -474,8 +475,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
474 if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id)) 475 if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
475 goto drop_unlock_sta; 476 goto drop_unlock_sta;
476 477
477 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && 478 if (is_data_qos && !ieee80211_has_morefrags(fc))
478 !ieee80211_has_morefrags(fc))
479 priv->tid_data[sta_id][tid].seq_number = seq_number; 479 priv->tid_data[sta_id][tid].seq_number = seq_number;
480 480
481 spin_unlock(&priv->sta_lock); 481 spin_unlock(&priv->sta_lock);
@@ -1075,14 +1075,11 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
1075 1075
1076static void iwlagn_set_tx_status(struct iwl_priv *priv, 1076static void iwlagn_set_tx_status(struct iwl_priv *priv,
1077 struct ieee80211_tx_info *info, 1077 struct ieee80211_tx_info *info,
1078 struct iwlagn_tx_resp *tx_resp, 1078 struct iwlagn_tx_resp *tx_resp)
1079 bool is_agg)
1080{ 1079{
1081 u16 status = le16_to_cpu(tx_resp->status.status); 1080 u16 status = le16_to_cpu(tx_resp->status.status);
1082 1081
1083 info->status.rates[0].count = tx_resp->failure_frame + 1; 1082 info->status.rates[0].count = tx_resp->failure_frame + 1;
1084 if (is_agg)
1085 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1086 info->flags |= iwl_tx_status_to_mac80211(status); 1083 info->flags |= iwl_tx_status_to_mac80211(status);
1087 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), 1084 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1088 info); 1085 info);
@@ -1100,29 +1097,6 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
1100 } 1097 }
1101} 1098}
1102 1099
1103static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
1104 int txq_id, int ssn, struct sk_buff_head *skbs)
1105{
1106 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
1107 tid != IWL_TID_NON_QOS &&
1108 txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
1109 /*
1110 * FIXME: this is a uCode bug which need to be addressed,
1111 * log the information and return for now.
1112 * Since it is can possibly happen very often and in order
1113 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1114 */
1115 IWL_DEBUG_TX_QUEUES(priv,
1116 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1117 txq_id, sta_id, tid,
1118 priv->tid_data[sta_id][tid].agg.txq_id);
1119 return 1;
1120 }
1121
1122 iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs);
1123 return 0;
1124}
1125
1126int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, 1100int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1127 struct iwl_device_cmd *cmd) 1101 struct iwl_device_cmd *cmd)
1128{ 1102{
@@ -1184,9 +1158,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1184 next_reclaimed); 1158 next_reclaimed);
1185 } 1159 }
1186 1160
1187 /*we can free until ssn % q.n_bd not inclusive */ 1161 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
1188 WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid, 1162
1189 txq_id, ssn, &skbs));
1190 iwlagn_check_ratid_empty(priv, sta_id, tid); 1163 iwlagn_check_ratid_empty(priv, sta_id, tid);
1191 freed = 0; 1164 freed = 0;
1192 1165
@@ -1231,7 +1204,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1231 if (is_agg && !iwl_is_tx_success(status)) 1204 if (is_agg && !iwl_is_tx_success(status))
1232 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1205 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1233 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb), 1206 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
1234 tx_resp, is_agg); 1207 tx_resp);
1235 if (!is_agg) 1208 if (!is_agg)
1236 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); 1209 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1237 1210
@@ -1311,16 +1284,27 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1311 return 0; 1284 return 0;
1312 } 1285 }
1313 1286
1287 if (unlikely(scd_flow != agg->txq_id)) {
1288 /*
1289 * FIXME: this is a uCode bug which need to be addressed,
1290 * log the information and return for now.
1291 * Since it is can possibly happen very often and in order
1292 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1293 */
1294 IWL_DEBUG_TX_QUEUES(priv,
1295 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1296 scd_flow, sta_id, tid, agg->txq_id);
1297 spin_unlock(&priv->sta_lock);
1298 return 0;
1299 }
1300
1314 __skb_queue_head_init(&reclaimed_skbs); 1301 __skb_queue_head_init(&reclaimed_skbs);
1315 1302
1316 /* Release all TFDs before the SSN, i.e. all TFDs in front of 1303 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1317 * block-ack window (we assume that they've been successfully 1304 * block-ack window (we assume that they've been successfully
1318 * transmitted ... if not, it's too late anyway). */ 1305 * transmitted ... if not, it's too late anyway). */
1319 if (iwl_reclaim(priv, sta_id, tid, scd_flow, 1306 iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
1320 ba_resp_scd_ssn, &reclaimed_skbs)) { 1307 &reclaimed_skbs);
1321 spin_unlock(&priv->sta_lock);
1322 return 0;
1323 }
1324 1308
1325 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " 1309 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1326 "sta_id = %d\n", 1310 "sta_id = %d\n",
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 2cb1efbc5ed1..c6467e5554f5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -61,7 +61,7 @@ iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
61static int iwl_set_Xtal_calib(struct iwl_priv *priv) 61static int iwl_set_Xtal_calib(struct iwl_priv *priv)
62{ 62{
63 struct iwl_calib_xtal_freq_cmd cmd; 63 struct iwl_calib_xtal_freq_cmd cmd;
64 __le16 *xtal_calib = priv->eeprom_data->xtal_calib; 64 __le16 *xtal_calib = priv->nvm_data->xtal_calib;
65 65
66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); 66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); 67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -75,7 +75,7 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
75 75
76 memset(&cmd, 0, sizeof(cmd)); 76 memset(&cmd, 0, sizeof(cmd));
77 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 77 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
78 cmd.radio_sensor_offset = priv->eeprom_data->raw_temperature; 78 cmd.radio_sensor_offset = priv->nvm_data->raw_temperature;
79 if (!(cmd.radio_sensor_offset)) 79 if (!(cmd.radio_sensor_offset))
80 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET; 80 cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
81 81
@@ -90,14 +90,14 @@ static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
90 90
91 memset(&cmd, 0, sizeof(cmd)); 91 memset(&cmd, 0, sizeof(cmd));
92 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 92 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
93 cmd.radio_sensor_offset_high = priv->eeprom_data->kelvin_temperature; 93 cmd.radio_sensor_offset_high = priv->nvm_data->kelvin_temperature;
94 cmd.radio_sensor_offset_low = priv->eeprom_data->raw_temperature; 94 cmd.radio_sensor_offset_low = priv->nvm_data->raw_temperature;
95 if (!cmd.radio_sensor_offset_low) { 95 if (!cmd.radio_sensor_offset_low) {
96 IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n"); 96 IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
97 cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET; 97 cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
98 cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET; 98 cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
99 } 99 }
100 cmd.burntVoltageRef = priv->eeprom_data->calib_voltage; 100 cmd.burntVoltageRef = priv->nvm_data->calib_voltage;
101 101
102 IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n", 102 IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
103 le16_to_cpu(cmd.radio_sensor_offset_high)); 103 le16_to_cpu(cmd.radio_sensor_offset_high));
@@ -254,10 +254,10 @@ static int iwl_alive_notify(struct iwl_priv *priv)
254 int ret; 254 int ret;
255 int i; 255 int i;
256 256
257 iwl_trans_fw_alive(priv->trans); 257 iwl_trans_fw_alive(priv->trans, 0);
258 258
259 if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN && 259 if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
260 priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) { 260 priv->nvm_data->sku_cap_ipan_enable) {
261 n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo); 261 n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
262 queue_to_txf = iwlagn_ipan_queue_to_tx_fifo; 262 queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
263 } else { 263 } else {
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 87f465a49df1..864219d2136a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -150,7 +150,7 @@ enum iwl_led_mode {
150struct iwl_base_params { 150struct iwl_base_params {
151 int eeprom_size; 151 int eeprom_size;
152 int num_of_queues; /* def: HW dependent */ 152 int num_of_queues; /* def: HW dependent */
153 /* for iwl_apm_init() */ 153 /* for iwl_pcie_apm_init() */
154 u32 pll_cfg_val; 154 u32 pll_cfg_val;
155 155
156 const u16 max_ll_items; 156 const u16 max_ll_items;
@@ -226,8 +226,8 @@ struct iwl_eeprom_params {
226 * @max_data_size: The maximal length of the fw data section 226 * @max_data_size: The maximal length of the fw data section
227 * @valid_tx_ant: valid transmit antenna 227 * @valid_tx_ant: valid transmit antenna
228 * @valid_rx_ant: valid receive antenna 228 * @valid_rx_ant: valid receive antenna
229 * @eeprom_ver: EEPROM version 229 * @nvm_ver: NVM version
230 * @eeprom_calib_ver: EEPROM calibration version 230 * @nvm_calib_ver: NVM calibration version
231 * @lib: pointer to the lib ops 231 * @lib: pointer to the lib ops
232 * @base_params: pointer to basic parameters 232 * @base_params: pointer to basic parameters
233 * @ht_params: point to ht patameters 233 * @ht_params: point to ht patameters
@@ -257,8 +257,8 @@ struct iwl_cfg {
257 const u32 max_inst_size; 257 const u32 max_inst_size;
258 u8 valid_tx_ant; 258 u8 valid_tx_ant;
259 u8 valid_rx_ant; 259 u8 valid_rx_ant;
260 u16 eeprom_ver; 260 u16 nvm_ver;
261 u16 eeprom_calib_ver; 261 u16 nvm_calib_ver;
262 /* params not likely to change within a device family */ 262 /* params not likely to change within a device family */
263 const struct iwl_base_params *base_params; 263 const struct iwl_base_params *base_params;
264 /* params likely to change within a device family */ 264 /* params likely to change within a device family */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 59a5f78402fc..dc7e26b2f383 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -25,6 +25,39 @@
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) 27#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
28#include <linux/skbuff.h>
29#include <linux/ieee80211.h>
30#include <net/cfg80211.h>
31#include "iwl-trans.h"
32#if !defined(__IWLWIFI_DEVICE_TRACE)
33static inline bool iwl_trace_data(struct sk_buff *skb)
34{
35 struct ieee80211_hdr *hdr = (void *)skb->data;
36
37 if (ieee80211_is_data(hdr->frame_control))
38 return skb->protocol != cpu_to_be16(ETH_P_PAE);
39 return false;
40}
41
42static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
43 void *rxbuf, size_t len)
44{
45 struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
46 struct ieee80211_hdr *hdr;
47
48 if (cmd->cmd != trans->rx_mpdu_cmd)
49 return len;
50
51 hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
52 trans->rx_mpdu_cmd_hdr_size);
53 if (!ieee80211_is_data(hdr->frame_control))
54 return len;
55 /* maybe try to identify EAPOL frames? */
56 return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
57 ieee80211_hdrlen(hdr->frame_control);
58}
59#endif
60
28#define __IWLWIFI_DEVICE_TRACE 61#define __IWLWIFI_DEVICE_TRACE
29 62
30#include <linux/tracepoint.h> 63#include <linux/tracepoint.h>
@@ -100,6 +133,40 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
100 __get_str(dev), __entry->offs, __entry->val) 133 __get_str(dev), __entry->offs, __entry->val)
101); 134);
102 135
136TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
137 TP_PROTO(const struct device *dev, u32 offs, u32 val),
138 TP_ARGS(dev, offs, val),
139 TP_STRUCT__entry(
140 DEV_ENTRY
141 __field(u32, offs)
142 __field(u32, val)
143 ),
144 TP_fast_assign(
145 DEV_ASSIGN;
146 __entry->offs = offs;
147 __entry->val = val;
148 ),
149 TP_printk("[%s] write PRPH[%#x] = %#x)",
150 __get_str(dev), __entry->offs, __entry->val)
151);
152
153TRACE_EVENT(iwlwifi_dev_ioread_prph32,
154 TP_PROTO(const struct device *dev, u32 offs, u32 val),
155 TP_ARGS(dev, offs, val),
156 TP_STRUCT__entry(
157 DEV_ENTRY
158 __field(u32, offs)
159 __field(u32, val)
160 ),
161 TP_fast_assign(
162 DEV_ASSIGN;
163 __entry->offs = offs;
164 __entry->val = val;
165 ),
166 TP_printk("[%s] read PRPH[%#x] = %#x",
167 __get_str(dev), __entry->offs, __entry->val)
168);
169
103TRACE_EVENT(iwlwifi_dev_irq, 170TRACE_EVENT(iwlwifi_dev_irq,
104 TP_PROTO(const struct device *dev), 171 TP_PROTO(const struct device *dev),
105 TP_ARGS(dev), 172 TP_ARGS(dev),
@@ -235,6 +302,48 @@ TRACE_EVENT(iwlwifi_dbg,
235); 302);
236 303
237#undef TRACE_SYSTEM 304#undef TRACE_SYSTEM
305#define TRACE_SYSTEM iwlwifi_data
306
307TRACE_EVENT(iwlwifi_dev_tx_data,
308 TP_PROTO(const struct device *dev,
309 struct sk_buff *skb,
310 void *data, size_t data_len),
311 TP_ARGS(dev, skb, data, data_len),
312 TP_STRUCT__entry(
313 DEV_ENTRY
314
315 __dynamic_array(u8, data, iwl_trace_data(skb) ? data_len : 0)
316 ),
317 TP_fast_assign(
318 DEV_ASSIGN;
319 if (iwl_trace_data(skb))
320 memcpy(__get_dynamic_array(data), data, data_len);
321 ),
322 TP_printk("[%s] TX frame data", __get_str(dev))
323);
324
325TRACE_EVENT(iwlwifi_dev_rx_data,
326 TP_PROTO(const struct device *dev,
327 const struct iwl_trans *trans,
328 void *rxbuf, size_t len),
329 TP_ARGS(dev, trans, rxbuf, len),
330 TP_STRUCT__entry(
331 DEV_ENTRY
332
333 __dynamic_array(u8, data,
334 len - iwl_rx_trace_len(trans, rxbuf, len))
335 ),
336 TP_fast_assign(
337 size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
338 DEV_ASSIGN;
339 if (offs < len)
340 memcpy(__get_dynamic_array(data),
341 ((u8 *)rxbuf) + offs, len - offs);
342 ),
343 TP_printk("[%s] RX frame data", __get_str(dev))
344);
345
346#undef TRACE_SYSTEM
238#define TRACE_SYSTEM iwlwifi 347#define TRACE_SYSTEM iwlwifi
239 348
240TRACE_EVENT(iwlwifi_dev_hcmd, 349TRACE_EVENT(iwlwifi_dev_hcmd,
@@ -270,25 +379,28 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
270); 379);
271 380
272TRACE_EVENT(iwlwifi_dev_rx, 381TRACE_EVENT(iwlwifi_dev_rx,
273 TP_PROTO(const struct device *dev, void *rxbuf, size_t len), 382 TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
274 TP_ARGS(dev, rxbuf, len), 383 void *rxbuf, size_t len),
384 TP_ARGS(dev, trans, rxbuf, len),
275 TP_STRUCT__entry( 385 TP_STRUCT__entry(
276 DEV_ENTRY 386 DEV_ENTRY
277 __dynamic_array(u8, rxbuf, len) 387 __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, rxbuf, len))
278 ), 388 ),
279 TP_fast_assign( 389 TP_fast_assign(
280 DEV_ASSIGN; 390 DEV_ASSIGN;
281 memcpy(__get_dynamic_array(rxbuf), rxbuf, len); 391 memcpy(__get_dynamic_array(rxbuf), rxbuf,
392 iwl_rx_trace_len(trans, rxbuf, len));
282 ), 393 ),
283 TP_printk("[%s] RX cmd %#.2x", 394 TP_printk("[%s] RX cmd %#.2x",
284 __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4]) 395 __get_str(dev), ((u8 *)__get_dynamic_array(rxbuf))[4])
285); 396);
286 397
287TRACE_EVENT(iwlwifi_dev_tx, 398TRACE_EVENT(iwlwifi_dev_tx,
288 TP_PROTO(const struct device *dev, void *tfd, size_t tfdlen, 399 TP_PROTO(const struct device *dev, struct sk_buff *skb,
400 void *tfd, size_t tfdlen,
289 void *buf0, size_t buf0_len, 401 void *buf0, size_t buf0_len,
290 void *buf1, size_t buf1_len), 402 void *buf1, size_t buf1_len),
291 TP_ARGS(dev, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), 403 TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
292 TP_STRUCT__entry( 404 TP_STRUCT__entry(
293 DEV_ENTRY 405 DEV_ENTRY
294 406
@@ -301,14 +413,15 @@ TRACE_EVENT(iwlwifi_dev_tx,
301 * for the possible padding). 413 * for the possible padding).
302 */ 414 */
303 __dynamic_array(u8, buf0, buf0_len) 415 __dynamic_array(u8, buf0, buf0_len)
304 __dynamic_array(u8, buf1, buf1_len) 416 __dynamic_array(u8, buf1, iwl_trace_data(skb) ? 0 : buf1_len)
305 ), 417 ),
306 TP_fast_assign( 418 TP_fast_assign(
307 DEV_ASSIGN; 419 DEV_ASSIGN;
308 __entry->framelen = buf0_len + buf1_len; 420 __entry->framelen = buf0_len + buf1_len;
309 memcpy(__get_dynamic_array(tfd), tfd, tfdlen); 421 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
310 memcpy(__get_dynamic_array(buf0), buf0, buf0_len); 422 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
311 memcpy(__get_dynamic_array(buf1), buf1, buf1_len); 423 if (!iwl_trace_data(skb))
424 memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
312 ), 425 ),
313 TP_printk("[%s] TX %.2x (%zu bytes)", 426 TP_printk("[%s] TX %.2x (%zu bytes)",
314 __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0], 427 __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 198634b75ed0..d3549f493a17 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1032,6 +1032,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
1032 1032
1033 if (!drv->dbgfs_drv) { 1033 if (!drv->dbgfs_drv) {
1034 IWL_ERR(drv, "failed to create debugfs directory\n"); 1034 IWL_ERR(drv, "failed to create debugfs directory\n");
1035 ret = -ENOMEM;
1035 goto err_free_drv; 1036 goto err_free_drv;
1036 } 1037 }
1037 1038
@@ -1040,12 +1041,12 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
1040 1041
1041 if (!drv->trans->dbgfs_dir) { 1042 if (!drv->trans->dbgfs_dir) {
1042 IWL_ERR(drv, "failed to create transport debugfs directory\n"); 1043 IWL_ERR(drv, "failed to create transport debugfs directory\n");
1044 ret = -ENOMEM;
1043 goto err_free_dbgfs; 1045 goto err_free_dbgfs;
1044 } 1046 }
1045#endif 1047#endif
1046 1048
1047 ret = iwl_request_firmware(drv, true); 1049 ret = iwl_request_firmware(drv, true);
1048
1049 if (ret) { 1050 if (ret) {
1050 IWL_ERR(trans, "Couldn't request the fw\n"); 1051 IWL_ERR(trans, "Couldn't request the fw\n");
1051 goto err_fw; 1052 goto err_fw;
@@ -1060,9 +1061,8 @@ err_free_dbgfs:
1060err_free_drv: 1061err_free_drv:
1061#endif 1062#endif
1062 kfree(drv); 1063 kfree(drv);
1063 drv = NULL;
1064 1064
1065 return drv; 1065 return ERR_PTR(ret);
1066} 1066}
1067 1067
1068void iwl_drv_stop(struct iwl_drv *drv) 1068void iwl_drv_stop(struct iwl_drv *drv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index f10170fe8799..471986690cf0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -116,6 +116,24 @@ struct iwl_eeprom_calib_hdr {
116#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL) 116#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
117#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL) 117#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
118 118
119/* SKU Capabilities (actual values from EEPROM definition) */
120enum eeprom_sku_bits {
121 EEPROM_SKU_CAP_BAND_24GHZ = BIT(4),
122 EEPROM_SKU_CAP_BAND_52GHZ = BIT(5),
123 EEPROM_SKU_CAP_11N_ENABLE = BIT(6),
124 EEPROM_SKU_CAP_AMT_ENABLE = BIT(7),
125 EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8)
126};
127
128/* radio config bits (actual values from EEPROM definition) */
129#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
130#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
131#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
132#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
133#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
134#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
135
136
119/* 137/*
120 * EEPROM bands 138 * EEPROM bands
121 * These are the channel numbers from each band in the order 139 * These are the channel numbers from each band in the order
@@ -251,7 +269,7 @@ static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
251} 269}
252 270
253static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size, 271static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
254 struct iwl_eeprom_data *data) 272 struct iwl_nvm_data *data)
255{ 273{
256 struct iwl_eeprom_calib_hdr *hdr; 274 struct iwl_eeprom_calib_hdr *hdr;
257 275
@@ -330,7 +348,7 @@ struct iwl_eeprom_enhanced_txpwr {
330 s8 mimo3_max; 348 s8 mimo3_max;
331} __packed; 349} __packed;
332 350
333static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_eeprom_data *data, 351static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
334 struct iwl_eeprom_enhanced_txpwr *txp) 352 struct iwl_eeprom_enhanced_txpwr *txp)
335{ 353{
336 s8 result = 0; /* (.5 dBm) */ 354 s8 result = 0; /* (.5 dBm) */
@@ -364,7 +382,7 @@ static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_eeprom_data *data,
364 ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "") 382 ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
365 383
366static void 384static void
367iwl_eeprom_enh_txp_read_element(struct iwl_eeprom_data *data, 385iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
368 struct iwl_eeprom_enhanced_txpwr *txp, 386 struct iwl_eeprom_enhanced_txpwr *txp,
369 int n_channels, s8 max_txpower_avg) 387 int n_channels, s8 max_txpower_avg)
370{ 388{
@@ -392,7 +410,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_eeprom_data *data,
392} 410}
393 411
394static void iwl_eeprom_enhanced_txpower(struct device *dev, 412static void iwl_eeprom_enhanced_txpower(struct device *dev,
395 struct iwl_eeprom_data *data, 413 struct iwl_nvm_data *data,
396 const u8 *eeprom, size_t eeprom_size, 414 const u8 *eeprom, size_t eeprom_size,
397 int n_channels) 415 int n_channels)
398{ 416{
@@ -504,7 +522,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg,
504 ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "") 522 ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
505 523
506static void iwl_mod_ht40_chan_info(struct device *dev, 524static void iwl_mod_ht40_chan_info(struct device *dev,
507 struct iwl_eeprom_data *data, int n_channels, 525 struct iwl_nvm_data *data, int n_channels,
508 enum ieee80211_band band, u16 channel, 526 enum ieee80211_band band, u16 channel,
509 const struct iwl_eeprom_channel *eeprom_ch, 527 const struct iwl_eeprom_channel *eeprom_ch,
510 u8 clear_ht40_extension_channel) 528 u8 clear_ht40_extension_channel)
@@ -547,7 +565,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev,
547 ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "") 565 ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
548 566
549static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, 567static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
550 struct iwl_eeprom_data *data, 568 struct iwl_nvm_data *data,
551 const u8 *eeprom, size_t eeprom_size) 569 const u8 *eeprom, size_t eeprom_size)
552{ 570{
553 int band, ch_idx; 571 int band, ch_idx;
@@ -685,7 +703,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
685 return n_channels; 703 return n_channels;
686} 704}
687 705
688static int iwl_init_sband_channels(struct iwl_eeprom_data *data, 706static int iwl_init_sband_channels(struct iwl_nvm_data *data,
689 struct ieee80211_supported_band *sband, 707 struct ieee80211_supported_band *sband,
690 int n_channels, enum ieee80211_band band) 708 int n_channels, enum ieee80211_band band)
691{ 709{
@@ -711,7 +729,7 @@ static int iwl_init_sband_channels(struct iwl_eeprom_data *data,
711#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 729#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
712 730
713static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, 731static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
714 struct iwl_eeprom_data *data, 732 struct iwl_nvm_data *data,
715 struct ieee80211_sta_ht_cap *ht_info, 733 struct ieee80211_sta_ht_cap *ht_info,
716 enum ieee80211_band band) 734 enum ieee80211_band band)
717{ 735{
@@ -725,7 +743,7 @@ static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
725 else 743 else
726 rx_chains = hweight8(data->valid_rx_ant); 744 rx_chains = hweight8(data->valid_rx_ant);
727 745
728 if (!(data->sku & EEPROM_SKU_CAP_11N_ENABLE) || !cfg->ht_params) { 746 if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
729 ht_info->ht_supported = false; 747 ht_info->ht_supported = false;
730 return; 748 return;
731 } 749 }
@@ -773,7 +791,7 @@ static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
773} 791}
774 792
775static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 793static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
776 struct iwl_eeprom_data *data, 794 struct iwl_nvm_data *data,
777 const u8 *eeprom, size_t eeprom_size) 795 const u8 *eeprom, size_t eeprom_size)
778{ 796{
779 int n_channels = iwl_init_channel_map(dev, cfg, data, 797 int n_channels = iwl_init_channel_map(dev, cfg, data,
@@ -804,12 +822,13 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
804 822
805/* EEPROM data functions */ 823/* EEPROM data functions */
806 824
807struct iwl_eeprom_data * 825struct iwl_nvm_data *
808iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, 826iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
809 const u8 *eeprom, size_t eeprom_size) 827 const u8 *eeprom, size_t eeprom_size)
810{ 828{
811 struct iwl_eeprom_data *data; 829 struct iwl_nvm_data *data;
812 const void *tmp; 830 const void *tmp;
831 u16 radio_cfg, sku;
813 832
814 if (WARN_ON(!cfg || !cfg->eeprom_params)) 833 if (WARN_ON(!cfg || !cfg->eeprom_params))
815 return NULL; 834 return NULL;
@@ -849,18 +868,27 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
849 data->kelvin_temperature = *(__le16 *)tmp; 868 data->kelvin_temperature = *(__le16 *)tmp;
850 data->kelvin_voltage = *((__le16 *)tmp + 1); 869 data->kelvin_voltage = *((__le16 *)tmp + 1);
851 870
852 data->radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, 871 radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
853 EEPROM_RADIO_CONFIG); 872 EEPROM_RADIO_CONFIG);
854 data->sku = iwl_eeprom_query16(eeprom, eeprom_size, 873 data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
855 EEPROM_SKU_CAP); 874 data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
875 data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
876 data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
877 data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
878 data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
879
880 sku = iwl_eeprom_query16(eeprom, eeprom_size,
881 EEPROM_SKU_CAP);
882 data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
883 data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
884 data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
885 data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
886 data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
856 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 887 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
857 data->sku &= ~EEPROM_SKU_CAP_11N_ENABLE; 888 data->sku_cap_11n_enable = false;
858
859 data->eeprom_version = iwl_eeprom_query16(eeprom, eeprom_size,
860 EEPROM_VERSION);
861 889
862 data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(data->radio_cfg); 890 data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size,
863 data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(data->radio_cfg); 891 EEPROM_VERSION);
864 892
865 /* check overrides (some devices have wrong EEPROM) */ 893 /* check overrides (some devices have wrong EEPROM) */
866 if (cfg->valid_tx_ant) 894 if (cfg->valid_tx_ant)
@@ -884,20 +912,20 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
884EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data); 912EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data);
885 913
886/* helper functions */ 914/* helper functions */
887int iwl_eeprom_check_version(struct iwl_eeprom_data *data, 915int iwl_nvm_check_version(struct iwl_nvm_data *data,
888 struct iwl_trans *trans) 916 struct iwl_trans *trans)
889{ 917{
890 if (data->eeprom_version >= trans->cfg->eeprom_ver || 918 if (data->nvm_version >= trans->cfg->nvm_ver ||
891 data->calib_version >= trans->cfg->eeprom_calib_ver) { 919 data->calib_version >= trans->cfg->nvm_calib_ver) {
892 IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n", 920 IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
893 data->eeprom_version, data->calib_version); 921 data->nvm_version, data->calib_version);
894 return 0; 922 return 0;
895 } 923 }
896 924
897 IWL_ERR(trans, 925 IWL_ERR(trans,
898 "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", 926 "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
899 data->eeprom_version, trans->cfg->eeprom_ver, 927 data->nvm_version, trans->cfg->nvm_ver,
900 data->calib_version, trans->cfg->eeprom_calib_ver); 928 data->calib_version, trans->cfg->nvm_calib_ver);
901 return -EINVAL; 929 return -EINVAL;
902} 930}
903EXPORT_SYMBOL_GPL(iwl_eeprom_check_version); 931EXPORT_SYMBOL_GPL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index a5e425718f56..555f0eb61d48 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -66,22 +66,7 @@
66#include <linux/if_ether.h> 66#include <linux/if_ether.h>
67#include "iwl-trans.h" 67#include "iwl-trans.h"
68 68
69/* SKU Capabilities (actual values from EEPROM definition) */ 69struct iwl_nvm_data {
70#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
71#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
72#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
73#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
74#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
75
76/* radio config bits (actual values from EEPROM definition) */
77#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
78#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
79#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
80#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
81#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
82#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
83
84struct iwl_eeprom_data {
85 int n_hw_addrs; 70 int n_hw_addrs;
86 u8 hw_addr[ETH_ALEN]; 71 u8 hw_addr[ETH_ALEN];
87 72
@@ -93,13 +78,21 @@ struct iwl_eeprom_data {
93 __le16 kelvin_voltage; 78 __le16 kelvin_voltage;
94 __le16 xtal_calib[2]; 79 __le16 xtal_calib[2];
95 80
96 u16 sku; 81 bool sku_cap_band_24GHz_enable;
97 u16 radio_cfg; 82 bool sku_cap_band_52GHz_enable;
98 u16 eeprom_version; 83 bool sku_cap_11n_enable;
99 s8 max_tx_pwr_half_dbm; 84 bool sku_cap_amt_enable;
85 bool sku_cap_ipan_enable;
100 86
87 u8 radio_cfg_type;
88 u8 radio_cfg_step;
89 u8 radio_cfg_dash;
90 u8 radio_cfg_pnum;
101 u8 valid_tx_ant, valid_rx_ant; 91 u8 valid_tx_ant, valid_rx_ant;
102 92
93 u16 nvm_version;
94 s8 max_tx_pwr_half_dbm;
95
103 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 96 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
104 struct ieee80211_channel channels[]; 97 struct ieee80211_channel channels[];
105}; 98};
@@ -115,22 +108,22 @@ struct iwl_eeprom_data {
115 * This function parses all EEPROM values we need and then 108 * This function parses all EEPROM values we need and then
116 * returns a (newly allocated) struct containing all the 109 * returns a (newly allocated) struct containing all the
117 * relevant values for driver use. The struct must be freed 110 * relevant values for driver use. The struct must be freed
118 * later with iwl_free_eeprom_data(). 111 * later with iwl_free_nvm_data().
119 */ 112 */
120struct iwl_eeprom_data * 113struct iwl_nvm_data *
121iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, 114iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
122 const u8 *eeprom, size_t eeprom_size); 115 const u8 *eeprom, size_t eeprom_size);
123 116
124/** 117/**
125 * iwl_free_eeprom_data - free EEPROM data 118 * iwl_free_nvm_data - free NVM data
126 * @data: the data to free 119 * @data: the data to free
127 */ 120 */
128static inline void iwl_free_eeprom_data(struct iwl_eeprom_data *data) 121static inline void iwl_free_nvm_data(struct iwl_nvm_data *data)
129{ 122{
130 kfree(data); 123 kfree(data);
131} 124}
132 125
133int iwl_eeprom_check_version(struct iwl_eeprom_data *data, 126int iwl_nvm_check_version(struct iwl_nvm_data *data,
134 struct iwl_trans *trans); 127 struct iwl_trans *trans);
135 128
136#endif /* __iwl_eeprom_parse_h__ */ 129#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 806046641747..ec48563d3c6a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -267,7 +267,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
267 267
268#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) 268#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
269#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) 269#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
270#define RX_RB_TIMEOUT (0x10) 270#define RX_RB_TIMEOUT (0x11)
271 271
272#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) 272#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
273#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) 273#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3dfebfb8434f..cdaff9572059 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -214,84 +214,84 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
214} 214}
215EXPORT_SYMBOL_GPL(iwl_poll_direct_bit); 215EXPORT_SYMBOL_GPL(iwl_poll_direct_bit);
216 216
217static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg) 217static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
218{ 218{
219 iwl_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); 219 u32 val = iwl_trans_read_prph(trans, ofs);
220 return iwl_read32(trans, HBUS_TARG_PRPH_RDAT); 220 trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
221 return val;
221} 222}
222 223
223static inline void __iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val) 224static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
224{ 225{
225 iwl_write32(trans, HBUS_TARG_PRPH_WADDR, 226 trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
226 ((addr & 0x0000FFFF) | (3 << 24))); 227 iwl_trans_write_prph(trans, ofs, val);
227 iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
228} 228}
229 229
230u32 iwl_read_prph(struct iwl_trans *trans, u32 reg) 230u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
231{ 231{
232 unsigned long flags; 232 unsigned long flags;
233 u32 val; 233 u32 val;
234 234
235 spin_lock_irqsave(&trans->reg_lock, flags); 235 spin_lock_irqsave(&trans->reg_lock, flags);
236 iwl_grab_nic_access(trans); 236 iwl_grab_nic_access(trans);
237 val = __iwl_read_prph(trans, reg); 237 val = __iwl_read_prph(trans, ofs);
238 iwl_release_nic_access(trans); 238 iwl_release_nic_access(trans);
239 spin_unlock_irqrestore(&trans->reg_lock, flags); 239 spin_unlock_irqrestore(&trans->reg_lock, flags);
240 return val; 240 return val;
241} 241}
242EXPORT_SYMBOL_GPL(iwl_read_prph); 242EXPORT_SYMBOL_GPL(iwl_read_prph);
243 243
244void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val) 244void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
245{ 245{
246 unsigned long flags; 246 unsigned long flags;
247 247
248 spin_lock_irqsave(&trans->reg_lock, flags); 248 spin_lock_irqsave(&trans->reg_lock, flags);
249 if (likely(iwl_grab_nic_access(trans))) { 249 if (likely(iwl_grab_nic_access(trans))) {
250 __iwl_write_prph(trans, addr, val); 250 __iwl_write_prph(trans, ofs, val);
251 iwl_release_nic_access(trans); 251 iwl_release_nic_access(trans);
252 } 252 }
253 spin_unlock_irqrestore(&trans->reg_lock, flags); 253 spin_unlock_irqrestore(&trans->reg_lock, flags);
254} 254}
255EXPORT_SYMBOL_GPL(iwl_write_prph); 255EXPORT_SYMBOL_GPL(iwl_write_prph);
256 256
257void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask) 257void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
258{ 258{
259 unsigned long flags; 259 unsigned long flags;
260 260
261 spin_lock_irqsave(&trans->reg_lock, flags); 261 spin_lock_irqsave(&trans->reg_lock, flags);
262 if (likely(iwl_grab_nic_access(trans))) { 262 if (likely(iwl_grab_nic_access(trans))) {
263 __iwl_write_prph(trans, reg, 263 __iwl_write_prph(trans, ofs,
264 __iwl_read_prph(trans, reg) | mask); 264 __iwl_read_prph(trans, ofs) | mask);
265 iwl_release_nic_access(trans); 265 iwl_release_nic_access(trans);
266 } 266 }
267 spin_unlock_irqrestore(&trans->reg_lock, flags); 267 spin_unlock_irqrestore(&trans->reg_lock, flags);
268} 268}
269EXPORT_SYMBOL_GPL(iwl_set_bits_prph); 269EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
270 270
271void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg, 271void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
272 u32 bits, u32 mask) 272 u32 bits, u32 mask)
273{ 273{
274 unsigned long flags; 274 unsigned long flags;
275 275
276 spin_lock_irqsave(&trans->reg_lock, flags); 276 spin_lock_irqsave(&trans->reg_lock, flags);
277 if (likely(iwl_grab_nic_access(trans))) { 277 if (likely(iwl_grab_nic_access(trans))) {
278 __iwl_write_prph(trans, reg, 278 __iwl_write_prph(trans, ofs,
279 (__iwl_read_prph(trans, reg) & mask) | bits); 279 (__iwl_read_prph(trans, ofs) & mask) | bits);
280 iwl_release_nic_access(trans); 280 iwl_release_nic_access(trans);
281 } 281 }
282 spin_unlock_irqrestore(&trans->reg_lock, flags); 282 spin_unlock_irqrestore(&trans->reg_lock, flags);
283} 283}
284EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph); 284EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
285 285
286void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask) 286void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
287{ 287{
288 unsigned long flags; 288 unsigned long flags;
289 u32 val; 289 u32 val;
290 290
291 spin_lock_irqsave(&trans->reg_lock, flags); 291 spin_lock_irqsave(&trans->reg_lock, flags);
292 if (likely(iwl_grab_nic_access(trans))) { 292 if (likely(iwl_grab_nic_access(trans))) {
293 val = __iwl_read_prph(trans, reg); 293 val = __iwl_read_prph(trans, ofs);
294 __iwl_write_prph(trans, reg, (val & ~mask)); 294 __iwl_write_prph(trans, ofs, (val & ~mask));
295 iwl_release_nic_access(trans); 295 iwl_release_nic_access(trans);
296 } 296 }
297 spin_unlock_irqrestore(&trans->reg_lock, flags); 297 spin_unlock_irqrestore(&trans->reg_lock, flags);
@@ -327,11 +327,11 @@ u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
327EXPORT_SYMBOL_GPL(iwl_read_targ_mem); 327EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
328 328
329int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, 329int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
330 void *buf, int dwords) 330 const void *buf, int dwords)
331{ 331{
332 unsigned long flags; 332 unsigned long flags;
333 int offs, result = 0; 333 int offs, result = 0;
334 u32 *vals = buf; 334 const u32 *vals = buf;
335 335
336 spin_lock_irqsave(&trans->reg_lock, flags); 336 spin_lock_irqsave(&trans->reg_lock, flags);
337 if (likely(iwl_grab_nic_access(trans))) { 337 if (likely(iwl_grab_nic_access(trans))) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 50d3819739d1..48dc753e3742 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -69,12 +69,12 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
69void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); 69void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
70 70
71 71
72u32 iwl_read_prph(struct iwl_trans *trans, u32 reg); 72u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
73void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val); 73void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
74void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask); 74void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
75void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg, 75void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
76 u32 bits, u32 mask); 76 u32 bits, u32 mask);
77void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask); 77void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
78 78
79void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, 79void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
80 void *buf, int dwords); 80 void *buf, int dwords);
@@ -87,7 +87,7 @@ void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
87 } while (0) 87 } while (0)
88 88
89int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, 89int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
90 void *buf, int dwords); 90 const void *buf, int dwords);
91 91
92u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr); 92u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
93int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val); 93int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 9253ef1dba72..c3a4bb41e533 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -213,6 +213,9 @@
213#define SCD_CONTEXT_QUEUE_OFFSET(x)\ 213#define SCD_CONTEXT_QUEUE_OFFSET(x)\
214 (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8)) 214 (SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
215 215
216#define SCD_TX_STTS_QUEUE_OFFSET(x)\
217 (SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
218
216#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \ 219#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
217 ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) 220 ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
218 221
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index ff1154232885..b76532e238c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -221,14 +221,21 @@ struct iwl_device_cmd {
221/** 221/**
222 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command 222 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
223 * 223 *
224 * IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's 224 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
225 * ring. The transport layer doesn't map the command's buffer to DMA, but 225 * ring. The transport layer doesn't map the command's buffer to DMA, but
226 * rather copies it to an previously allocated DMA buffer. This flag tells 226 * rather copies it to an previously allocated DMA buffer. This flag tells
227 * the transport layer not to copy the command, but to map the existing 227 * the transport layer not to copy the command, but to map the existing
228 * buffer. This can save memcpy and is worth with very big comamnds. 228 * buffer (that is passed in) instead. This saves the memcpy and allows
229 * commands that are bigger than the fixed buffer to be submitted.
230 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
231 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
232 * chunk internally and free it again after the command completes. This
233 * can (currently) be used only once per command.
234 * Note that a TFD entry after a DUP one cannot be a normal copied one.
229 */ 235 */
230enum iwl_hcmd_dataflag { 236enum iwl_hcmd_dataflag {
231 IWL_HCMD_DFL_NOCOPY = BIT(0), 237 IWL_HCMD_DFL_NOCOPY = BIT(0),
238 IWL_HCMD_DFL_DUP = BIT(1),
232}; 239};
233 240
234/** 241/**
@@ -348,14 +355,17 @@ struct iwl_trans;
348 * @start_fw: allocates and inits all the resources for the transport 355 * @start_fw: allocates and inits all the resources for the transport
349 * layer. Also kick a fw image. 356 * layer. Also kick a fw image.
350 * May sleep 357 * May sleep
351 * @fw_alive: called when the fw sends alive notification 358 * @fw_alive: called when the fw sends alive notification. If the fw provides
359 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
352 * May sleep 360 * May sleep
353 * @stop_device:stops the whole device (embedded CPU put to reset) 361 * @stop_device:stops the whole device (embedded CPU put to reset)
354 * May sleep 362 * May sleep
355 * @wowlan_suspend: put the device into the correct mode for WoWLAN during 363 * @wowlan_suspend: put the device into the correct mode for WoWLAN during
356 * suspend. This is optional, if not implemented WoWLAN will not be 364 * suspend. This is optional, if not implemented WoWLAN will not be
357 * supported. This callback may sleep. 365 * supported. This callback may sleep.
358 * @send_cmd:send a host command 366 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
367 * If RFkill is asserted in the middle of a SYNC host command, it must
368 * return -ERFKILL straight away.
359 * May sleep only if CMD_SYNC is set 369 * May sleep only if CMD_SYNC is set
360 * @tx: send an skb 370 * @tx: send an skb
361 * Must be atomic 371 * Must be atomic
@@ -375,6 +385,8 @@ struct iwl_trans;
375 * @write8: write a u8 to a register at offset ofs from the BAR 385 * @write8: write a u8 to a register at offset ofs from the BAR
376 * @write32: write a u32 to a register at offset ofs from the BAR 386 * @write32: write a u32 to a register at offset ofs from the BAR
377 * @read32: read a u32 register at offset ofs from the BAR 387 * @read32: read a u32 register at offset ofs from the BAR
388 * @read_prph: read a DWORD from a periphery register
389 * @write_prph: write a DWORD to a periphery register
378 * @configure: configure parameters required by the transport layer from 390 * @configure: configure parameters required by the transport layer from
379 * the op_mode. May be called several times before start_fw, can't be 391 * the op_mode. May be called several times before start_fw, can't be
380 * called after that. 392 * called after that.
@@ -385,7 +397,7 @@ struct iwl_trans_ops {
385 int (*start_hw)(struct iwl_trans *iwl_trans); 397 int (*start_hw)(struct iwl_trans *iwl_trans);
386 void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving); 398 void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
387 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw); 399 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
388 void (*fw_alive)(struct iwl_trans *trans); 400 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
389 void (*stop_device)(struct iwl_trans *trans); 401 void (*stop_device)(struct iwl_trans *trans);
390 402
391 void (*wowlan_suspend)(struct iwl_trans *trans); 403 void (*wowlan_suspend)(struct iwl_trans *trans);
@@ -410,6 +422,8 @@ struct iwl_trans_ops {
410 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 422 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
411 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); 423 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
412 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 424 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
425 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
426 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
413 void (*configure)(struct iwl_trans *trans, 427 void (*configure)(struct iwl_trans *trans,
414 const struct iwl_trans_config *trans_cfg); 428 const struct iwl_trans_config *trans_cfg);
415 void (*set_pmi)(struct iwl_trans *trans, bool state); 429 void (*set_pmi)(struct iwl_trans *trans, bool state);
@@ -438,12 +452,15 @@ enum iwl_trans_state {
438 * Set during transport allocation. 452 * Set during transport allocation.
439 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 453 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
440 * @pm_support: set to true in start_hw if link pm is supported 454 * @pm_support: set to true in start_hw if link pm is supported
441 * @wait_command_queue: the wait_queue for SYNC host commands
442 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. 455 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
443 * The user should use iwl_trans_{alloc,free}_tx_cmd. 456 * The user should use iwl_trans_{alloc,free}_tx_cmd.
444 * @dev_cmd_headroom: room needed for the transport's private use before the 457 * @dev_cmd_headroom: room needed for the transport's private use before the
445 * device_cmd for Tx - for internal use only 458 * device_cmd for Tx - for internal use only
446 * The user should use iwl_trans_{alloc,free}_tx_cmd. 459 * The user should use iwl_trans_{alloc,free}_tx_cmd.
460 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
461 * starting the firmware, used for tracing
462 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
463 * start of the 802.11 header in the @rx_mpdu_cmd
447 */ 464 */
448struct iwl_trans { 465struct iwl_trans {
449 const struct iwl_trans_ops *ops; 466 const struct iwl_trans_ops *ops;
@@ -457,9 +474,9 @@ struct iwl_trans {
457 u32 hw_id; 474 u32 hw_id;
458 char hw_id_str[52]; 475 char hw_id_str[52];
459 476
460 bool pm_support; 477 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
461 478
462 wait_queue_head_t wait_command_queue; 479 bool pm_support;
463 480
464 /* The following fields are internal only */ 481 /* The following fields are internal only */
465 struct kmem_cache *dev_cmd_pool; 482 struct kmem_cache *dev_cmd_pool;
@@ -476,10 +493,6 @@ struct iwl_trans {
476static inline void iwl_trans_configure(struct iwl_trans *trans, 493static inline void iwl_trans_configure(struct iwl_trans *trans,
477 const struct iwl_trans_config *trans_cfg) 494 const struct iwl_trans_config *trans_cfg)
478{ 495{
479 /*
480 * only set the op_mode for the moment. Later on, this function will do
481 * more
482 */
483 trans->op_mode = trans_cfg->op_mode; 496 trans->op_mode = trans_cfg->op_mode;
484 497
485 trans->ops->configure(trans, trans_cfg); 498 trans->ops->configure(trans, trans_cfg);
@@ -499,16 +512,19 @@ static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
499 512
500 trans->ops->stop_hw(trans, op_mode_leaving); 513 trans->ops->stop_hw(trans, op_mode_leaving);
501 514
515 if (op_mode_leaving)
516 trans->op_mode = NULL;
517
502 trans->state = IWL_TRANS_NO_FW; 518 trans->state = IWL_TRANS_NO_FW;
503} 519}
504 520
505static inline void iwl_trans_fw_alive(struct iwl_trans *trans) 521static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
506{ 522{
507 might_sleep(); 523 might_sleep();
508 524
509 trans->state = IWL_TRANS_FW_ALIVE; 525 trans->state = IWL_TRANS_FW_ALIVE;
510 526
511 trans->ops->fw_alive(trans); 527 trans->ops->fw_alive(trans, scd_addr);
512} 528}
513 529
514static inline int iwl_trans_start_fw(struct iwl_trans *trans, 530static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -516,6 +532,8 @@ static inline int iwl_trans_start_fw(struct iwl_trans *trans,
516{ 532{
517 might_sleep(); 533 might_sleep();
518 534
535 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
536
519 return trans->ops->start_fw(trans, fw); 537 return trans->ops->start_fw(trans, fw);
520} 538}
521 539
@@ -650,6 +668,17 @@ static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
650 return trans->ops->read32(trans, ofs); 668 return trans->ops->read32(trans, ofs);
651} 669}
652 670
671static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
672{
673 return trans->ops->read_prph(trans, ofs);
674}
675
676static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
677 u32 val)
678{
679 return trans->ops->write_prph(trans, ofs, val);
680}
681
653static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) 682static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
654{ 683{
655 trans->ops->set_pmi(trans, state); 684 trans->ops->set_pmi(trans, state);
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
index 81b83f484f08..f8620ecae6b4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -94,8 +94,8 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
94 .device_family = IWL_DEVICE_FAMILY_1000, \ 94 .device_family = IWL_DEVICE_FAMILY_1000, \
95 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 95 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
96 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 96 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
97 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 97 .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
98 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 98 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
99 .base_params = &iwl1000_base_params, \ 99 .base_params = &iwl1000_base_params, \
100 .eeprom_params = &iwl1000_eeprom_params, \ 100 .eeprom_params = &iwl1000_eeprom_params, \
101 .led_mode = IWL_LED_BLINK 101 .led_mode = IWL_LED_BLINK
@@ -119,8 +119,8 @@ const struct iwl_cfg iwl1000_bg_cfg = {
119 .device_family = IWL_DEVICE_FAMILY_100, \ 119 .device_family = IWL_DEVICE_FAMILY_100, \
120 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 120 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
121 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 121 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
122 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 122 .nvm_ver = EEPROM_1000_EEPROM_VERSION, \
123 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 123 .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
124 .base_params = &iwl1000_base_params, \ 124 .base_params = &iwl1000_base_params, \
125 .eeprom_params = &iwl1000_eeprom_params, \ 125 .eeprom_params = &iwl1000_eeprom_params, \
126 .led_mode = IWL_LED_RF_STATE, \ 126 .led_mode = IWL_LED_RF_STATE, \
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
index 9fbde32f7559..244019cec3e1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -138,8 +138,8 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
138 .device_family = IWL_DEVICE_FAMILY_2000, \ 138 .device_family = IWL_DEVICE_FAMILY_2000, \
139 .max_inst_size = IWL60_RTC_INST_SIZE, \ 139 .max_inst_size = IWL60_RTC_INST_SIZE, \
140 .max_data_size = IWL60_RTC_DATA_SIZE, \ 140 .max_data_size = IWL60_RTC_DATA_SIZE, \
141 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 141 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
142 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 142 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
143 .base_params = &iwl2000_base_params, \ 143 .base_params = &iwl2000_base_params, \
144 .eeprom_params = &iwl20x0_eeprom_params, \ 144 .eeprom_params = &iwl20x0_eeprom_params, \
145 .need_temp_offset_calib = true, \ 145 .need_temp_offset_calib = true, \
@@ -166,8 +166,8 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
166 .device_family = IWL_DEVICE_FAMILY_2030, \ 166 .device_family = IWL_DEVICE_FAMILY_2030, \
167 .max_inst_size = IWL60_RTC_INST_SIZE, \ 167 .max_inst_size = IWL60_RTC_INST_SIZE, \
168 .max_data_size = IWL60_RTC_DATA_SIZE, \ 168 .max_data_size = IWL60_RTC_DATA_SIZE, \
169 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 169 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
170 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 170 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
171 .base_params = &iwl2030_base_params, \ 171 .base_params = &iwl2030_base_params, \
172 .bt_params = &iwl2030_bt_params, \ 172 .bt_params = &iwl2030_bt_params, \
173 .eeprom_params = &iwl20x0_eeprom_params, \ 173 .eeprom_params = &iwl20x0_eeprom_params, \
@@ -190,8 +190,8 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
190 .device_family = IWL_DEVICE_FAMILY_105, \ 190 .device_family = IWL_DEVICE_FAMILY_105, \
191 .max_inst_size = IWL60_RTC_INST_SIZE, \ 191 .max_inst_size = IWL60_RTC_INST_SIZE, \
192 .max_data_size = IWL60_RTC_DATA_SIZE, \ 192 .max_data_size = IWL60_RTC_DATA_SIZE, \
193 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 193 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
194 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 194 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
195 .base_params = &iwl2000_base_params, \ 195 .base_params = &iwl2000_base_params, \
196 .eeprom_params = &iwl20x0_eeprom_params, \ 196 .eeprom_params = &iwl20x0_eeprom_params, \
197 .need_temp_offset_calib = true, \ 197 .need_temp_offset_calib = true, \
@@ -220,8 +220,8 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
220 .device_family = IWL_DEVICE_FAMILY_135, \ 220 .device_family = IWL_DEVICE_FAMILY_135, \
221 .max_inst_size = IWL60_RTC_INST_SIZE, \ 221 .max_inst_size = IWL60_RTC_INST_SIZE, \
222 .max_data_size = IWL60_RTC_DATA_SIZE, \ 222 .max_data_size = IWL60_RTC_DATA_SIZE, \
223 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 223 .nvm_ver = EEPROM_2000_EEPROM_VERSION, \
224 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 224 .nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
225 .base_params = &iwl2030_base_params, \ 225 .base_params = &iwl2030_base_params, \
226 .bt_params = &iwl2030_bt_params, \ 226 .bt_params = &iwl2030_bt_params, \
227 .eeprom_params = &iwl20x0_eeprom_params, \ 227 .eeprom_params = &iwl20x0_eeprom_params, \
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
index d1665fa6d15a..83ca40321ff1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -92,8 +92,8 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
92 .device_family = IWL_DEVICE_FAMILY_5000, \ 92 .device_family = IWL_DEVICE_FAMILY_5000, \
93 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 93 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
94 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 94 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
95 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ 95 .nvm_ver = EEPROM_5000_EEPROM_VERSION, \
96 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 96 .nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
97 .base_params = &iwl5000_base_params, \ 97 .base_params = &iwl5000_base_params, \
98 .eeprom_params = &iwl5000_eeprom_params, \ 98 .eeprom_params = &iwl5000_eeprom_params, \
99 .led_mode = IWL_LED_BLINK 99 .led_mode = IWL_LED_BLINK
@@ -139,8 +139,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
139 .device_family = IWL_DEVICE_FAMILY_5000, 139 .device_family = IWL_DEVICE_FAMILY_5000,
140 .max_inst_size = IWLAGN_RTC_INST_SIZE, 140 .max_inst_size = IWLAGN_RTC_INST_SIZE,
141 .max_data_size = IWLAGN_RTC_DATA_SIZE, 141 .max_data_size = IWLAGN_RTC_DATA_SIZE,
142 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 142 .nvm_ver = EEPROM_5050_EEPROM_VERSION,
143 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 143 .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,
144 .base_params = &iwl5000_base_params, 144 .base_params = &iwl5000_base_params,
145 .eeprom_params = &iwl5000_eeprom_params, 145 .eeprom_params = &iwl5000_eeprom_params,
146 .ht_params = &iwl5000_ht_params, 146 .ht_params = &iwl5000_ht_params,
@@ -156,8 +156,8 @@ const struct iwl_cfg iwl5350_agn_cfg = {
156 .device_family = IWL_DEVICE_FAMILY_5150, \ 156 .device_family = IWL_DEVICE_FAMILY_5150, \
157 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 157 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
158 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 158 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
159 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ 159 .nvm_ver = EEPROM_5050_EEPROM_VERSION, \
160 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ 160 .nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
161 .base_params = &iwl5000_base_params, \ 161 .base_params = &iwl5000_base_params, \
162 .eeprom_params = &iwl5000_eeprom_params, \ 162 .eeprom_params = &iwl5000_eeprom_params, \
163 .no_xtal_calib = true, \ 163 .no_xtal_calib = true, \
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index 4a57624afc40..d4df976d4709 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -160,8 +160,8 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
160 .device_family = IWL_DEVICE_FAMILY_6005, \ 160 .device_family = IWL_DEVICE_FAMILY_6005, \
161 .max_inst_size = IWL60_RTC_INST_SIZE, \ 161 .max_inst_size = IWL60_RTC_INST_SIZE, \
162 .max_data_size = IWL60_RTC_DATA_SIZE, \ 162 .max_data_size = IWL60_RTC_DATA_SIZE, \
163 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ 163 .nvm_ver = EEPROM_6005_EEPROM_VERSION, \
164 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 164 .nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
165 .base_params = &iwl6000_g2_base_params, \ 165 .base_params = &iwl6000_g2_base_params, \
166 .eeprom_params = &iwl6000_eeprom_params, \ 166 .eeprom_params = &iwl6000_eeprom_params, \
167 .need_temp_offset_calib = true, \ 167 .need_temp_offset_calib = true, \
@@ -215,8 +215,8 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
215 .device_family = IWL_DEVICE_FAMILY_6030, \ 215 .device_family = IWL_DEVICE_FAMILY_6030, \
216 .max_inst_size = IWL60_RTC_INST_SIZE, \ 216 .max_inst_size = IWL60_RTC_INST_SIZE, \
217 .max_data_size = IWL60_RTC_DATA_SIZE, \ 217 .max_data_size = IWL60_RTC_DATA_SIZE, \
218 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ 218 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
219 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 219 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
220 .base_params = &iwl6000_g2_base_params, \ 220 .base_params = &iwl6000_g2_base_params, \
221 .bt_params = &iwl6000_bt_params, \ 221 .bt_params = &iwl6000_bt_params, \
222 .eeprom_params = &iwl6000_eeprom_params, \ 222 .eeprom_params = &iwl6000_eeprom_params, \
@@ -254,8 +254,8 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
254 .device_family = IWL_DEVICE_FAMILY_6030, \ 254 .device_family = IWL_DEVICE_FAMILY_6030, \
255 .max_inst_size = IWL60_RTC_INST_SIZE, \ 255 .max_inst_size = IWL60_RTC_INST_SIZE, \
256 .max_data_size = IWL60_RTC_DATA_SIZE, \ 256 .max_data_size = IWL60_RTC_DATA_SIZE, \
257 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ 257 .nvm_ver = EEPROM_6030_EEPROM_VERSION, \
258 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 258 .nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
259 .base_params = &iwl6000_g2_base_params, \ 259 .base_params = &iwl6000_g2_base_params, \
260 .bt_params = &iwl6000_bt_params, \ 260 .bt_params = &iwl6000_bt_params, \
261 .eeprom_params = &iwl6000_eeprom_params, \ 261 .eeprom_params = &iwl6000_eeprom_params, \
@@ -306,8 +306,8 @@ const struct iwl_cfg iwl130_bg_cfg = {
306 .max_data_size = IWL60_RTC_DATA_SIZE, \ 306 .max_data_size = IWL60_RTC_DATA_SIZE, \
307 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ 307 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
308 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ 308 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
309 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ 309 .nvm_ver = EEPROM_6000_EEPROM_VERSION, \
310 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 310 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
311 .base_params = &iwl6000_base_params, \ 311 .base_params = &iwl6000_base_params, \
312 .eeprom_params = &iwl6000_eeprom_params, \ 312 .eeprom_params = &iwl6000_eeprom_params, \
313 .led_mode = IWL_LED_BLINK 313 .led_mode = IWL_LED_BLINK
@@ -337,8 +337,8 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
337 .max_data_size = IWL60_RTC_DATA_SIZE, \ 337 .max_data_size = IWL60_RTC_DATA_SIZE, \
338 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ 338 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
339 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ 339 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
340 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 340 .nvm_ver = EEPROM_6050_EEPROM_VERSION, \
341 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 341 .nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
342 .base_params = &iwl6050_base_params, \ 342 .base_params = &iwl6050_base_params, \
343 .eeprom_params = &iwl6000_eeprom_params, \ 343 .eeprom_params = &iwl6000_eeprom_params, \
344 .led_mode = IWL_LED_BLINK, \ 344 .led_mode = IWL_LED_BLINK, \
@@ -362,8 +362,8 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
362 .device_family = IWL_DEVICE_FAMILY_6150, \ 362 .device_family = IWL_DEVICE_FAMILY_6150, \
363 .max_inst_size = IWL60_RTC_INST_SIZE, \ 363 .max_inst_size = IWL60_RTC_INST_SIZE, \
364 .max_data_size = IWL60_RTC_DATA_SIZE, \ 364 .max_data_size = IWL60_RTC_DATA_SIZE, \
365 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ 365 .nvm_ver = EEPROM_6150_EEPROM_VERSION, \
366 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ 366 .nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
367 .base_params = &iwl6050_base_params, \ 367 .base_params = &iwl6050_base_params, \
368 .eeprom_params = &iwl6000_eeprom_params, \ 368 .eeprom_params = &iwl6000_eeprom_params, \
369 .led_mode = IWL_LED_BLINK, \ 369 .led_mode = IWL_LED_BLINK, \
@@ -389,8 +389,8 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
389 .device_family = IWL_DEVICE_FAMILY_6000, 389 .device_family = IWL_DEVICE_FAMILY_6000,
390 .max_inst_size = IWL60_RTC_INST_SIZE, 390 .max_inst_size = IWL60_RTC_INST_SIZE,
391 .max_data_size = IWL60_RTC_DATA_SIZE, 391 .max_data_size = IWL60_RTC_DATA_SIZE,
392 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 392 .nvm_ver = EEPROM_6000_EEPROM_VERSION,
393 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 393 .nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
394 .base_params = &iwl6000_base_params, 394 .base_params = &iwl6000_base_params,
395 .eeprom_params = &iwl6000_eeprom_params, 395 .eeprom_params = &iwl6000_eeprom_params,
396 .ht_params = &iwl6000_ht_params, 396 .ht_params = &iwl6000_ht_params,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2a4675396707..c2e141af353c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -69,7 +69,6 @@
69 69
70#include "iwl-trans.h" 70#include "iwl-trans.h"
71#include "iwl-drv.h" 71#include "iwl-drv.h"
72#include "iwl-trans.h"
73 72
74#include "cfg.h" 73#include "cfg.h"
75#include "internal.h" 74#include "internal.h"
@@ -268,6 +267,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
268 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 267 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
269 struct iwl_trans *iwl_trans; 268 struct iwl_trans *iwl_trans;
270 struct iwl_trans_pcie *trans_pcie; 269 struct iwl_trans_pcie *trans_pcie;
270 int ret;
271 271
272 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); 272 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
273 if (iwl_trans == NULL) 273 if (iwl_trans == NULL)
@@ -277,11 +277,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
277 277
278 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); 278 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
279 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); 279 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
280 if (!trans_pcie->drv) 280
281 if (IS_ERR_OR_NULL(trans_pcie->drv)) {
282 ret = PTR_ERR(trans_pcie->drv);
281 goto out_free_trans; 283 goto out_free_trans;
284 }
282 285
283 /* register transport layer debugfs here */ 286 /* register transport layer debugfs here */
284 if (iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir)) 287 ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
288 if (ret)
285 goto out_free_drv; 289 goto out_free_drv;
286 290
287 return 0; 291 return 0;
@@ -291,10 +295,10 @@ out_free_drv:
291out_free_trans: 295out_free_trans:
292 iwl_trans_pcie_free(iwl_trans); 296 iwl_trans_pcie_free(iwl_trans);
293 pci_set_drvdata(pdev, NULL); 297 pci_set_drvdata(pdev, NULL);
294 return -EFAULT; 298 return ret;
295} 299}
296 300
297static void __devexit iwl_pci_remove(struct pci_dev *pdev) 301static void iwl_pci_remove(struct pci_dev *pdev)
298{ 302{
299 struct iwl_trans *trans = pci_get_drvdata(pdev); 303 struct iwl_trans *trans = pci_get_drvdata(pdev);
300 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 304 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -353,7 +357,7 @@ static struct pci_driver iwl_pci_driver = {
353 .name = DRV_NAME, 357 .name = DRV_NAME,
354 .id_table = iwl_hw_card_ids, 358 .id_table = iwl_hw_card_ids,
355 .probe = iwl_pci_probe, 359 .probe = iwl_pci_probe,
356 .remove = __devexit_p(iwl_pci_remove), 360 .remove = iwl_pci_remove,
357 .driver.pm = IWL_PM_OPS, 361 .driver.pm = IWL_PM_OPS,
358}; 362};
359 363
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 401178f44a3b..d91d2e8c62f5 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -73,7 +73,7 @@ struct isr_statistics {
73}; 73};
74 74
75/** 75/**
76 * struct iwl_rx_queue - Rx queue 76 * struct iwl_rxq - Rx queue
77 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 77 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
78 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 78 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
79 * @pool: 79 * @pool:
@@ -91,7 +91,7 @@ struct isr_statistics {
91 * 91 *
92 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 92 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
93 */ 93 */
94struct iwl_rx_queue { 94struct iwl_rxq {
95 __le32 *bd; 95 __le32 *bd;
96 dma_addr_t bd_dma; 96 dma_addr_t bd_dma;
97 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 97 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
@@ -157,8 +157,8 @@ struct iwl_cmd_meta {
157 * 32 since we don't need so many commands pending. Since the HW 157 * 32 since we don't need so many commands pending. Since the HW
158 * still uses 256 BDs for DMA though, n_bd stays 256. As a result, 158 * still uses 256 BDs for DMA though, n_bd stays 256. As a result,
159 * the software buffers (in the variables @meta, @txb in struct 159 * the software buffers (in the variables @meta, @txb in struct
160 * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds 160 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
161 * in the same struct) have 256. 161 * the same struct) have 256.
162 * This means that we end up with the following: 162 * This means that we end up with the following:
163 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 163 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
164 * SW entries: | 0 | ... | 31 | 164 * SW entries: | 0 | ... | 31 |
@@ -182,15 +182,17 @@ struct iwl_queue {
182#define TFD_TX_CMD_SLOTS 256 182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32 183#define TFD_CMD_SLOTS 32
184 184
185struct iwl_pcie_tx_queue_entry { 185struct iwl_pcie_txq_entry {
186 struct iwl_device_cmd *cmd; 186 struct iwl_device_cmd *cmd;
187 struct iwl_device_cmd *copy_cmd; 187 struct iwl_device_cmd *copy_cmd;
188 struct sk_buff *skb; 188 struct sk_buff *skb;
189 /* buffer to free after command completes */
190 const void *free_buf;
189 struct iwl_cmd_meta meta; 191 struct iwl_cmd_meta meta;
190}; 192};
191 193
192/** 194/**
193 * struct iwl_tx_queue - Tx Queue for DMA 195 * struct iwl_txq - Tx Queue for DMA
194 * @q: generic Rx/Tx queue descriptor 196 * @q: generic Rx/Tx queue descriptor
195 * @tfds: transmit frame descriptors (DMA memory) 197 * @tfds: transmit frame descriptors (DMA memory)
196 * @entries: transmit entries (driver state) 198 * @entries: transmit entries (driver state)
@@ -203,10 +205,10 @@ struct iwl_pcie_tx_queue_entry {
203 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 205 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
204 * descriptors) and required locking structures. 206 * descriptors) and required locking structures.
205 */ 207 */
206struct iwl_tx_queue { 208struct iwl_txq {
207 struct iwl_queue q; 209 struct iwl_queue q;
208 struct iwl_tfd *tfds; 210 struct iwl_tfd *tfds;
209 struct iwl_pcie_tx_queue_entry *entries; 211 struct iwl_pcie_txq_entry *entries;
210 spinlock_t lock; 212 spinlock_t lock;
211 struct timer_list stuck_timer; 213 struct timer_list stuck_timer;
212 struct iwl_trans_pcie *trans_pcie; 214 struct iwl_trans_pcie *trans_pcie;
@@ -236,7 +238,7 @@ struct iwl_tx_queue {
236 * @wd_timeout: queue watchdog timeout (jiffies) 238 * @wd_timeout: queue watchdog timeout (jiffies)
237 */ 239 */
238struct iwl_trans_pcie { 240struct iwl_trans_pcie {
239 struct iwl_rx_queue rxq; 241 struct iwl_rxq rxq;
240 struct work_struct rx_replenish; 242 struct work_struct rx_replenish;
241 struct iwl_trans *trans; 243 struct iwl_trans *trans;
242 struct iwl_drv *drv; 244 struct iwl_drv *drv;
@@ -258,7 +260,7 @@ struct iwl_trans_pcie {
258 struct iwl_dma_ptr scd_bc_tbls; 260 struct iwl_dma_ptr scd_bc_tbls;
259 struct iwl_dma_ptr kw; 261 struct iwl_dma_ptr kw;
260 262
261 struct iwl_tx_queue *txq; 263 struct iwl_txq *txq;
262 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 264 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
263 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 265 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
264 266
@@ -268,6 +270,8 @@ struct iwl_trans_pcie {
268 270
269 bool ucode_write_complete; 271 bool ucode_write_complete;
270 wait_queue_head_t ucode_write_waitq; 272 wait_queue_head_t ucode_write_waitq;
273 wait_queue_head_t wait_command_queue;
274
271 unsigned long status; 275 unsigned long status;
272 u8 cmd_queue; 276 u8 cmd_queue;
273 u8 cmd_fifo; 277 u8 cmd_fifo;
@@ -283,13 +287,23 @@ struct iwl_trans_pcie {
283 unsigned long wd_timeout; 287 unsigned long wd_timeout;
284}; 288};
285 289
286/***************************************************** 290/**
287* DRIVER STATUS FUNCTIONS 291 * enum iwl_pcie_status: status of the PCIe transport
288******************************************************/ 292 * @STATUS_HCMD_ACTIVE: a SYNC command is being processed
289#define STATUS_HCMD_ACTIVE 0 293 * @STATUS_DEVICE_ENABLED: APM is enabled
290#define STATUS_DEVICE_ENABLED 1 294 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
291#define STATUS_TPOWER_PMI 2 295 * @STATUS_INT_ENABLED: interrupts are enabled
292#define STATUS_INT_ENABLED 3 296 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
297 * @STATUS_FW_ERROR: the fw is in error state
298 */
299enum iwl_pcie_status {
300 STATUS_HCMD_ACTIVE,
301 STATUS_DEVICE_ENABLED,
302 STATUS_TPOWER_PMI,
303 STATUS_INT_ENABLED,
304 STATUS_RFKILL,
305 STATUS_FW_ERROR,
306};
293 307
294#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ 308#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
295 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) 309 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
@@ -301,6 +315,10 @@ iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
301 trans_specific); 315 trans_specific);
302} 316}
303 317
318/*
319 * Convention: trans API functions: iwl_trans_pcie_XXX
320 * Other functions: iwl_pcie_XXX
321 */
304struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 322struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
305 const struct pci_device_id *ent, 323 const struct pci_device_id *ent,
306 const struct iwl_cfg *cfg); 324 const struct iwl_cfg *cfg);
@@ -309,50 +327,43 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
309/***************************************************** 327/*****************************************************
310* RX 328* RX
311******************************************************/ 329******************************************************/
312void iwl_bg_rx_replenish(struct work_struct *data); 330int iwl_pcie_rx_init(struct iwl_trans *trans);
313void iwl_irq_tasklet(struct iwl_trans *trans); 331void iwl_pcie_tasklet(struct iwl_trans *trans);
314void iwl_rx_replenish(struct iwl_trans *trans); 332int iwl_pcie_rx_stop(struct iwl_trans *trans);
315void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 333void iwl_pcie_rx_free(struct iwl_trans *trans);
316 struct iwl_rx_queue *q);
317 334
318/***************************************************** 335/*****************************************************
319* ICT 336* ICT - interrupt handling
320******************************************************/ 337******************************************************/
321void iwl_reset_ict(struct iwl_trans *trans); 338irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
322void iwl_disable_ict(struct iwl_trans *trans); 339int iwl_pcie_alloc_ict(struct iwl_trans *trans);
323int iwl_alloc_isr_ict(struct iwl_trans *trans); 340void iwl_pcie_free_ict(struct iwl_trans *trans);
324void iwl_free_isr_ict(struct iwl_trans *trans); 341void iwl_pcie_reset_ict(struct iwl_trans *trans);
325irqreturn_t iwl_isr_ict(int irq, void *data); 342void iwl_pcie_disable_ict(struct iwl_trans *trans);
326 343
327/***************************************************** 344/*****************************************************
328* TX / HCMD 345* TX / HCMD
329******************************************************/ 346******************************************************/
330void iwl_txq_update_write_ptr(struct iwl_trans *trans, 347int iwl_pcie_tx_init(struct iwl_trans *trans);
331 struct iwl_tx_queue *txq); 348void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
332int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, 349int iwl_pcie_tx_stop(struct iwl_trans *trans);
333 struct iwl_tx_queue *txq, 350void iwl_pcie_tx_free(struct iwl_trans *trans);
334 dma_addr_t addr, u16 len, u8 reset);
335int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
336int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
337void iwl_tx_cmd_complete(struct iwl_trans *trans,
338 struct iwl_rx_cmd_buffer *rxb, int handler_status);
339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
340 struct iwl_tx_queue *txq,
341 u16 byte_cnt);
342void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, 351void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
343 int sta_id, int tid, int frame_limit, u16 ssn); 352 int sta_id, int tid, int frame_limit, u16 ssn);
344void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); 353void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
345void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 354int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
346 enum dma_data_direction dma_dir); 355 struct iwl_device_cmd *dev_cmd, int txq_id);
347int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 356void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
348 struct sk_buff_head *skbs); 357int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
349int iwl_queue_space(const struct iwl_queue *q); 358void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
350 359 struct iwl_rx_cmd_buffer *rxb, int handler_status);
360void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
361 struct sk_buff_head *skbs);
351/***************************************************** 362/*****************************************************
352* Error handling 363* Error handling
353******************************************************/ 364******************************************************/
354int iwl_dump_fh(struct iwl_trans *trans, char **buf); 365int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
355void iwl_dump_csr(struct iwl_trans *trans); 366void iwl_pcie_dump_csr(struct iwl_trans *trans);
356 367
357/***************************************************** 368/*****************************************************
358* Helpers 369* Helpers
@@ -388,7 +399,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
388} 399}
389 400
390static inline void iwl_wake_queue(struct iwl_trans *trans, 401static inline void iwl_wake_queue(struct iwl_trans *trans,
391 struct iwl_tx_queue *txq) 402 struct iwl_txq *txq)
392{ 403{
393 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 404 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
394 405
@@ -399,7 +410,7 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
399} 410}
400 411
401static inline void iwl_stop_queue(struct iwl_trans *trans, 412static inline void iwl_stop_queue(struct iwl_trans *trans,
402 struct iwl_tx_queue *txq) 413 struct iwl_txq *txq)
403{ 414{
404 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 415 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
405 416
@@ -411,7 +422,7 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
411 txq->q.id); 422 txq->q.id);
412} 423}
413 424
414static inline int iwl_queue_used(const struct iwl_queue *q, int i) 425static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
415{ 426{
416 return q->write_ptr >= q->read_ptr ? 427 return q->write_ptr >= q->read_ptr ?
417 (i >= q->read_ptr && i < q->write_ptr) : 428 (i >= q->read_ptr && i < q->write_ptr) :
@@ -423,8 +434,8 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
423 return index & (q->n_window - 1); 434 return index & (q->n_window - 1);
424} 435}
425 436
426static inline const char * 437static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie,
427trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd) 438 u8 cmd)
428{ 439{
429 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) 440 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
430 return "UNKNOWN"; 441 return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index bb69f8f90b3b..dad4c4aad91f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -76,7 +76,7 @@
76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When 76 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled 77 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
78 * to replenish the iwl->rxq->rx_free. 78 * to replenish the iwl->rxq->rx_free.
79 * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the 79 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80 * iwl->rxq is replenished and the READ INDEX is updated (updating the 80 * iwl->rxq is replenished and the READ INDEX is updated (updating the
81 * 'processed' and 'read' driver indexes as well) 81 * 'processed' and 'read' driver indexes as well)
82 * + A received packet is processed and handed to the kernel network stack, 82 * + A received packet is processed and handed to the kernel network stack,
@@ -89,28 +89,28 @@
89 * 89 *
90 * Driver sequence: 90 * Driver sequence:
91 * 91 *
92 * iwl_rx_queue_alloc() Allocates rx_free 92 * iwl_rxq_alloc() Allocates rx_free
93 * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls 93 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
94 * iwl_rx_queue_restock 94 * iwl_pcie_rxq_restock
95 * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx 95 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
96 * queue, updates firmware pointers, and updates 96 * queue, updates firmware pointers, and updates
97 * the WRITE index. If insufficient rx_free buffers 97 * the WRITE index. If insufficient rx_free buffers
98 * are available, schedules iwl_rx_replenish 98 * are available, schedules iwl_pcie_rx_replenish
99 * 99 *
100 * -- enable interrupts -- 100 * -- enable interrupts --
101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 101 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
102 * READ INDEX, detaching the SKB from the pool. 102 * READ INDEX, detaching the SKB from the pool.
103 * Moves the packet buffer from queue to rx_used. 103 * Moves the packet buffer from queue to rx_used.
104 * Calls iwl_rx_queue_restock to refill any empty 104 * Calls iwl_pcie_rxq_restock to refill any empty
105 * slots. 105 * slots.
106 * ... 106 * ...
107 * 107 *
108 */ 108 */
109 109
110/** 110/*
111 * iwl_rx_queue_space - Return number of free slots available in queue. 111 * iwl_rxq_space - Return number of free slots available in queue.
112 */ 112 */
113static int iwl_rx_queue_space(const struct iwl_rx_queue *q) 113static int iwl_rxq_space(const struct iwl_rxq *q)
114{ 114{
115 int s = q->read - q->write; 115 int s = q->read - q->write;
116 if (s <= 0) 116 if (s <= 0)
@@ -122,11 +122,28 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
122 return s; 122 return s;
123} 123}
124 124
125/** 125/*
126 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 126 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
127 */
128static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
129{
130 return cpu_to_le32((u32)(dma_addr >> 8));
131}
132
133/*
134 * iwl_pcie_rx_stop - stops the Rx DMA
135 */
136int iwl_pcie_rx_stop(struct iwl_trans *trans)
137{
138 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
139 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
140 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
141}
142
143/*
144 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
127 */ 145 */
128void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans, 146static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *q)
129 struct iwl_rx_queue *q)
130{ 147{
131 unsigned long flags; 148 unsigned long flags;
132 u32 reg; 149 u32 reg;
@@ -176,16 +193,8 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
176 spin_unlock_irqrestore(&q->lock, flags); 193 spin_unlock_irqrestore(&q->lock, flags);
177} 194}
178 195
179/** 196/*
180 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 197 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
181 */
182static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
183{
184 return cpu_to_le32((u32)(dma_addr >> 8));
185}
186
187/**
188 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
189 * 198 *
190 * If there are slots in the RX queue that need to be restocked, 199 * If there are slots in the RX queue that need to be restocked,
191 * and we have free pre-allocated buffers, fill the ranks as much 200 * and we have free pre-allocated buffers, fill the ranks as much
@@ -195,11 +204,10 @@ static inline __le32 iwl_dma_addr2rbd_ptr(dma_addr_t dma_addr)
195 * also updates the memory address in the firmware to reference the new 204 * also updates the memory address in the firmware to reference the new
196 * target buffer. 205 * target buffer.
197 */ 206 */
198static void iwl_rx_queue_restock(struct iwl_trans *trans) 207static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
199{ 208{
200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
201 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 210 struct iwl_rxq *rxq = &trans_pcie->rxq;
202 struct list_head *element;
203 struct iwl_rx_mem_buffer *rxb; 211 struct iwl_rx_mem_buffer *rxb;
204 unsigned long flags; 212 unsigned long flags;
205 213
@@ -215,18 +223,18 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
215 return; 223 return;
216 224
217 spin_lock_irqsave(&rxq->lock, flags); 225 spin_lock_irqsave(&rxq->lock, flags);
218 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { 226 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
219 /* The overwritten rxb must be a used one */ 227 /* The overwritten rxb must be a used one */
220 rxb = rxq->queue[rxq->write]; 228 rxb = rxq->queue[rxq->write];
221 BUG_ON(rxb && rxb->page); 229 BUG_ON(rxb && rxb->page);
222 230
223 /* Get next free Rx buffer, remove from free list */ 231 /* Get next free Rx buffer, remove from free list */
224 element = rxq->rx_free.next; 232 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
225 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 233 list);
226 list_del(element); 234 list_del(&rxb->list);
227 235
228 /* Point to Rx buffer via next RBD in circular buffer */ 236 /* Point to Rx buffer via next RBD in circular buffer */
229 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(rxb->page_dma); 237 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
230 rxq->queue[rxq->write] = rxb; 238 rxq->queue[rxq->write] = rxb;
231 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 239 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
232 rxq->free_count--; 240 rxq->free_count--;
@@ -243,24 +251,23 @@ static void iwl_rx_queue_restock(struct iwl_trans *trans)
243 spin_lock_irqsave(&rxq->lock, flags); 251 spin_lock_irqsave(&rxq->lock, flags);
244 rxq->need_update = 1; 252 rxq->need_update = 1;
245 spin_unlock_irqrestore(&rxq->lock, flags); 253 spin_unlock_irqrestore(&rxq->lock, flags);
246 iwl_rx_queue_update_write_ptr(trans, rxq); 254 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
247 } 255 }
248} 256}
249 257
250/* 258/*
251 * iwl_rx_allocate - allocate a page for each used RBD 259 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
252 * 260 *
253 * A used RBD is an Rx buffer that has been given to the stack. To use it again 261 * A used RBD is an Rx buffer that has been given to the stack. To use it again
254 * a page must be allocated and the RBD must point to the page. This function 262 * a page must be allocated and the RBD must point to the page. This function
255 * doesn't change the HW pointer but handles the list of pages that is used by 263 * doesn't change the HW pointer but handles the list of pages that is used by
256 * iwl_rx_queue_restock. The latter function will update the HW to use the newly 264 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
257 * allocated buffers. 265 * allocated buffers.
258 */ 266 */
259static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority) 267static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
260{ 268{
261 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
262 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 270 struct iwl_rxq *rxq = &trans_pcie->rxq;
263 struct list_head *element;
264 struct iwl_rx_mem_buffer *rxb; 271 struct iwl_rx_mem_buffer *rxb;
265 struct page *page; 272 struct page *page;
266 unsigned long flags; 273 unsigned long flags;
@@ -308,10 +315,9 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
308 __free_pages(page, trans_pcie->rx_page_order); 315 __free_pages(page, trans_pcie->rx_page_order);
309 return; 316 return;
310 } 317 }
311 element = rxq->rx_used.next; 318 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
312 rxb = list_entry(element, struct iwl_rx_mem_buffer, list); 319 list);
313 list_del(element); 320 list_del(&rxb->list);
314
315 spin_unlock_irqrestore(&rxq->lock, flags); 321 spin_unlock_irqrestore(&rxq->lock, flags);
316 322
317 BUG_ON(rxb->page); 323 BUG_ON(rxb->page);
@@ -343,47 +349,227 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
343 } 349 }
344} 350}
345 351
352static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
353{
354 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
355 struct iwl_rxq *rxq = &trans_pcie->rxq;
356 int i;
357
358 /* Fill the rx_used queue with _all_ of the Rx buffers */
359 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
360 /* In the reset function, these buffers may have been allocated
361 * to an SKB, so we need to unmap and free potential storage */
362 if (rxq->pool[i].page != NULL) {
363 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
364 PAGE_SIZE << trans_pcie->rx_page_order,
365 DMA_FROM_DEVICE);
366 __free_pages(rxq->pool[i].page,
367 trans_pcie->rx_page_order);
368 rxq->pool[i].page = NULL;
369 }
370 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
371 }
372}
373
346/* 374/*
347 * iwl_rx_replenish - Move all used buffers from rx_used to rx_free 375 * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
348 * 376 *
349 * When moving to rx_free an page is allocated for the slot. 377 * When moving to rx_free an page is allocated for the slot.
350 * 378 *
351 * Also restock the Rx queue via iwl_rx_queue_restock. 379 * Also restock the Rx queue via iwl_pcie_rxq_restock.
352 * This is called as a scheduled work item (except for during initialization) 380 * This is called as a scheduled work item (except for during initialization)
353 */ 381 */
354void iwl_rx_replenish(struct iwl_trans *trans) 382static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
355{ 383{
356 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 384 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
357 unsigned long flags; 385 unsigned long flags;
358 386
359 iwl_rx_allocate(trans, GFP_KERNEL); 387 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
360 388
361 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 389 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
362 iwl_rx_queue_restock(trans); 390 iwl_pcie_rxq_restock(trans);
363 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 391 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
364} 392}
365 393
366static void iwl_rx_replenish_now(struct iwl_trans *trans) 394static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
367{ 395{
368 iwl_rx_allocate(trans, GFP_ATOMIC); 396 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
369 397
370 iwl_rx_queue_restock(trans); 398 iwl_pcie_rxq_restock(trans);
371} 399}
372 400
373void iwl_bg_rx_replenish(struct work_struct *data) 401static void iwl_pcie_rx_replenish_work(struct work_struct *data)
374{ 402{
375 struct iwl_trans_pcie *trans_pcie = 403 struct iwl_trans_pcie *trans_pcie =
376 container_of(data, struct iwl_trans_pcie, rx_replenish); 404 container_of(data, struct iwl_trans_pcie, rx_replenish);
377 405
378 iwl_rx_replenish(trans_pcie->trans); 406 iwl_pcie_rx_replenish(trans_pcie->trans);
407}
408
409static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
410{
411 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
412 struct iwl_rxq *rxq = &trans_pcie->rxq;
413 struct device *dev = trans->dev;
414
415 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
416
417 spin_lock_init(&rxq->lock);
418
419 if (WARN_ON(rxq->bd || rxq->rb_stts))
420 return -EINVAL;
421
422 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
423 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
424 &rxq->bd_dma, GFP_KERNEL);
425 if (!rxq->bd)
426 goto err_bd;
427
428 /*Allocate the driver's pointer to receive buffer status */
429 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
430 &rxq->rb_stts_dma, GFP_KERNEL);
431 if (!rxq->rb_stts)
432 goto err_rb_stts;
433
434 return 0;
435
436err_rb_stts:
437 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
438 rxq->bd, rxq->bd_dma);
439 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
440 rxq->bd = NULL;
441err_bd:
442 return -ENOMEM;
443}
444
445static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
446{
447 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
448 u32 rb_size;
449 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
450
451 if (trans_pcie->rx_buf_size_8k)
452 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
453 else
454 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
455
456 /* Stop Rx DMA */
457 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
458
459 /* Reset driver's Rx queue write index */
460 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
461
462 /* Tell device where to find RBD circular buffer in DRAM */
463 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
464 (u32)(rxq->bd_dma >> 8));
465
466 /* Tell device where in DRAM to update its Rx status */
467 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
468 rxq->rb_stts_dma >> 4);
469
470 /* Enable Rx DMA
471 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
472 * the credit mechanism in 5000 HW RX FIFO
473 * Direct rx interrupts to hosts
474 * Rx buffer size 4 or 8k
475 * RB timeout 0x10
476 * 256 RBDs
477 */
478 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
479 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
480 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
481 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
482 rb_size|
483 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
484 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
485
486 /* Set interrupt coalescing timer to default (2048 usecs) */
487 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
488}
489
490int iwl_pcie_rx_init(struct iwl_trans *trans)
491{
492 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
493 struct iwl_rxq *rxq = &trans_pcie->rxq;
494
495 int i, err;
496 unsigned long flags;
497
498 if (!rxq->bd) {
499 err = iwl_pcie_rx_alloc(trans);
500 if (err)
501 return err;
502 }
503
504 spin_lock_irqsave(&rxq->lock, flags);
505 INIT_LIST_HEAD(&rxq->rx_free);
506 INIT_LIST_HEAD(&rxq->rx_used);
507
508 INIT_WORK(&trans_pcie->rx_replenish,
509 iwl_pcie_rx_replenish_work);
510
511 iwl_pcie_rxq_free_rbs(trans);
512
513 for (i = 0; i < RX_QUEUE_SIZE; i++)
514 rxq->queue[i] = NULL;
515
516 /* Set us so that we have processed and used all buffers, but have
517 * not restocked the Rx queue with fresh buffers */
518 rxq->read = rxq->write = 0;
519 rxq->write_actual = 0;
520 rxq->free_count = 0;
521 spin_unlock_irqrestore(&rxq->lock, flags);
522
523 iwl_pcie_rx_replenish(trans);
524
525 iwl_pcie_rx_hw_init(trans, rxq);
526
527 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
528 rxq->need_update = 1;
529 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
530 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
531
532 return 0;
379} 533}
380 534
381static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, 535void iwl_pcie_rx_free(struct iwl_trans *trans)
536{
537 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
538 struct iwl_rxq *rxq = &trans_pcie->rxq;
539 unsigned long flags;
540
541 /*if rxq->bd is NULL, it means that nothing has been allocated,
542 * exit now */
543 if (!rxq->bd) {
544 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
545 return;
546 }
547
548 spin_lock_irqsave(&rxq->lock, flags);
549 iwl_pcie_rxq_free_rbs(trans);
550 spin_unlock_irqrestore(&rxq->lock, flags);
551
552 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
553 rxq->bd, rxq->bd_dma);
554 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
555 rxq->bd = NULL;
556
557 if (rxq->rb_stts)
558 dma_free_coherent(trans->dev,
559 sizeof(struct iwl_rb_status),
560 rxq->rb_stts, rxq->rb_stts_dma);
561 else
562 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
563 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
564 rxq->rb_stts = NULL;
565}
566
567static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
382 struct iwl_rx_mem_buffer *rxb) 568 struct iwl_rx_mem_buffer *rxb)
383{ 569{
384 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
385 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 571 struct iwl_rxq *rxq = &trans_pcie->rxq;
386 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 572 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
387 unsigned long flags; 573 unsigned long flags;
388 bool page_stolen = false; 574 bool page_stolen = false;
389 int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 575 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
@@ -413,13 +599,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
413 break; 599 break;
414 600
415 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n", 601 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
416 rxcb._offset, 602 rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
417 trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
418 pkt->hdr.cmd); 603 pkt->hdr.cmd);
419 604
420 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 605 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
421 len += sizeof(u32); /* account for status word */ 606 len += sizeof(u32); /* account for status word */
422 trace_iwlwifi_dev_rx(trans->dev, pkt, len); 607 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
608 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
423 609
424 /* Reclaim a command buffer only if this packet is a response 610 /* Reclaim a command buffer only if this packet is a response
425 * to a (driver-originated) command. 611 * to a (driver-originated) command.
@@ -445,7 +631,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
445 cmd_index = get_cmd_index(&txq->q, index); 631 cmd_index = get_cmd_index(&txq->q, index);
446 632
447 if (reclaim) { 633 if (reclaim) {
448 struct iwl_pcie_tx_queue_entry *ent; 634 struct iwl_pcie_txq_entry *ent;
449 ent = &txq->entries[cmd_index]; 635 ent = &txq->entries[cmd_index];
450 cmd = ent->copy_cmd; 636 cmd = ent->copy_cmd;
451 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD); 637 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
@@ -459,6 +645,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
459 /* The original command isn't needed any more */ 645 /* The original command isn't needed any more */
460 kfree(txq->entries[cmd_index].copy_cmd); 646 kfree(txq->entries[cmd_index].copy_cmd);
461 txq->entries[cmd_index].copy_cmd = NULL; 647 txq->entries[cmd_index].copy_cmd = NULL;
648 /* nor is the duplicated part of the command */
649 kfree(txq->entries[cmd_index].free_buf);
650 txq->entries[cmd_index].free_buf = NULL;
462 } 651 }
463 652
464 /* 653 /*
@@ -472,7 +661,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
472 * iwl_trans_send_cmd() 661 * iwl_trans_send_cmd()
473 * as we reclaim the driver command queue */ 662 * as we reclaim the driver command queue */
474 if (!rxcb._page_stolen) 663 if (!rxcb._page_stolen)
475 iwl_tx_cmd_complete(trans, &rxcb, err); 664 iwl_pcie_hcmd_complete(trans, &rxcb, err);
476 else 665 else
477 IWL_WARN(trans, "Claim null rxb?\n"); 666 IWL_WARN(trans, "Claim null rxb?\n");
478 } 667 }
@@ -514,17 +703,13 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
514 spin_unlock_irqrestore(&rxq->lock, flags); 703 spin_unlock_irqrestore(&rxq->lock, flags);
515} 704}
516 705
517/** 706/*
518 * iwl_rx_handle - Main entry function for receiving responses from uCode 707 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
519 *
520 * Uses the priv->rx_handlers callback function array to invoke
521 * the appropriate handlers, including command responses,
522 * frame-received notifications, and other notifications.
523 */ 708 */
524static void iwl_rx_handle(struct iwl_trans *trans) 709static void iwl_pcie_rx_handle(struct iwl_trans *trans)
525{ 710{
526 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 711 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
527 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 712 struct iwl_rxq *rxq = &trans_pcie->rxq;
528 u32 r, i; 713 u32 r, i;
529 u8 fill_rx = 0; 714 u8 fill_rx = 0;
530 u32 count = 8; 715 u32 count = 8;
@@ -532,7 +717,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
532 717
533 /* uCode's read index (stored in shared DRAM) indicates the last Rx 718 /* uCode's read index (stored in shared DRAM) indicates the last Rx
534 * buffer that the driver may process (last buffer filled by ucode). */ 719 * buffer that the driver may process (last buffer filled by ucode). */
535 r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; 720 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
536 i = rxq->read; 721 i = rxq->read;
537 722
538 /* Rx interrupt, but nothing sent from uCode */ 723 /* Rx interrupt, but nothing sent from uCode */
@@ -555,7 +740,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
555 740
556 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n", 741 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
557 r, i, rxb); 742 r, i, rxb);
558 iwl_rx_handle_rxbuf(trans, rxb); 743 iwl_pcie_rx_handle_rb(trans, rxb);
559 744
560 i = (i + 1) & RX_QUEUE_MASK; 745 i = (i + 1) & RX_QUEUE_MASK;
561 /* If there are a lot of unused frames, 746 /* If there are a lot of unused frames,
@@ -564,7 +749,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
564 count++; 749 count++;
565 if (count >= 8) { 750 if (count >= 8) {
566 rxq->read = i; 751 rxq->read = i;
567 iwl_rx_replenish_now(trans); 752 iwl_pcie_rx_replenish_now(trans);
568 count = 0; 753 count = 0;
569 } 754 }
570 } 755 }
@@ -573,39 +758,41 @@ static void iwl_rx_handle(struct iwl_trans *trans)
573 /* Backtrack one entry */ 758 /* Backtrack one entry */
574 rxq->read = i; 759 rxq->read = i;
575 if (fill_rx) 760 if (fill_rx)
576 iwl_rx_replenish_now(trans); 761 iwl_pcie_rx_replenish_now(trans);
577 else 762 else
578 iwl_rx_queue_restock(trans); 763 iwl_pcie_rxq_restock(trans);
579} 764}
580 765
581/** 766/*
582 * iwl_irq_handle_error - called for HW or SW error interrupt from card 767 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
583 */ 768 */
584static void iwl_irq_handle_error(struct iwl_trans *trans) 769static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
585{ 770{
771 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
772
586 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 773 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
587 if (trans->cfg->internal_wimax_coex && 774 if (trans->cfg->internal_wimax_coex &&
588 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 775 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
589 APMS_CLK_VAL_MRB_FUNC_MODE) || 776 APMS_CLK_VAL_MRB_FUNC_MODE) ||
590 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 777 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
591 APMG_PS_CTRL_VAL_RESET_REQ))) { 778 APMG_PS_CTRL_VAL_RESET_REQ))) {
592 struct iwl_trans_pcie *trans_pcie =
593 IWL_TRANS_GET_PCIE_TRANS(trans);
594
595 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 779 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
596 iwl_op_mode_wimax_active(trans->op_mode); 780 iwl_op_mode_wimax_active(trans->op_mode);
597 wake_up(&trans->wait_command_queue); 781 wake_up(&trans_pcie->wait_command_queue);
598 return; 782 return;
599 } 783 }
600 784
601 iwl_dump_csr(trans); 785 iwl_pcie_dump_csr(trans);
602 iwl_dump_fh(trans, NULL); 786 iwl_pcie_dump_fh(trans, NULL);
787
788 set_bit(STATUS_FW_ERROR, &trans_pcie->status);
789 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
790 wake_up(&trans_pcie->wait_command_queue);
603 791
604 iwl_op_mode_nic_error(trans->op_mode); 792 iwl_op_mode_nic_error(trans->op_mode);
605} 793}
606 794
607/* tasklet for iwlagn interrupt */ 795void iwl_pcie_tasklet(struct iwl_trans *trans)
608void iwl_irq_tasklet(struct iwl_trans *trans)
609{ 796{
610 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 797 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
611 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 798 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
@@ -657,7 +844,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
657 iwl_disable_interrupts(trans); 844 iwl_disable_interrupts(trans);
658 845
659 isr_stats->hw++; 846 isr_stats->hw++;
660 iwl_irq_handle_error(trans); 847 iwl_pcie_irq_handle_error(trans);
661 848
662 handled |= CSR_INT_BIT_HW_ERR; 849 handled |= CSR_INT_BIT_HW_ERR;
663 850
@@ -694,6 +881,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
694 isr_stats->rfkill++; 881 isr_stats->rfkill++;
695 882
696 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 883 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
884 if (hw_rfkill) {
885 set_bit(STATUS_RFKILL, &trans_pcie->status);
886 if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
887 &trans_pcie->status))
888 IWL_DEBUG_RF_KILL(trans,
889 "Rfkill while SYNC HCMD in flight\n");
890 wake_up(&trans_pcie->wait_command_queue);
891 } else {
892 clear_bit(STATUS_RFKILL, &trans_pcie->status);
893 }
697 894
698 handled |= CSR_INT_BIT_RF_KILL; 895 handled |= CSR_INT_BIT_RF_KILL;
699 } 896 }
@@ -710,17 +907,16 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
710 IWL_ERR(trans, "Microcode SW error detected. " 907 IWL_ERR(trans, "Microcode SW error detected. "
711 " Restarting 0x%X.\n", inta); 908 " Restarting 0x%X.\n", inta);
712 isr_stats->sw++; 909 isr_stats->sw++;
713 iwl_irq_handle_error(trans); 910 iwl_pcie_irq_handle_error(trans);
714 handled |= CSR_INT_BIT_SW_ERR; 911 handled |= CSR_INT_BIT_SW_ERR;
715 } 912 }
716 913
717 /* uCode wakes up after power-down sleep */ 914 /* uCode wakes up after power-down sleep */
718 if (inta & CSR_INT_BIT_WAKEUP) { 915 if (inta & CSR_INT_BIT_WAKEUP) {
719 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 916 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
720 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); 917 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
721 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) 918 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
722 iwl_txq_update_write_ptr(trans, 919 iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
723 &trans_pcie->txq[i]);
724 920
725 isr_stats->wakeup++; 921 isr_stats->wakeup++;
726 922
@@ -758,7 +954,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
758 iwl_write8(trans, CSR_INT_PERIODIC_REG, 954 iwl_write8(trans, CSR_INT_PERIODIC_REG,
759 CSR_INT_PERIODIC_DIS); 955 CSR_INT_PERIODIC_DIS);
760 956
761 iwl_rx_handle(trans); 957 iwl_pcie_rx_handle(trans);
762 958
763 /* 959 /*
764 * Enable periodic interrupt in 8 msec only if we received 960 * Enable periodic interrupt in 8 msec only if we received
@@ -816,7 +1012,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
816#define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1012#define ICT_COUNT (ICT_SIZE / sizeof(u32))
817 1013
818/* Free dram table */ 1014/* Free dram table */
819void iwl_free_isr_ict(struct iwl_trans *trans) 1015void iwl_pcie_free_ict(struct iwl_trans *trans)
820{ 1016{
821 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1017 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
822 1018
@@ -829,13 +1025,12 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
829 } 1025 }
830} 1026}
831 1027
832
833/* 1028/*
834 * allocate dram shared table, it is an aligned memory 1029 * allocate dram shared table, it is an aligned memory
835 * block of ICT_SIZE. 1030 * block of ICT_SIZE.
836 * also reset all data related to ICT table interrupt. 1031 * also reset all data related to ICT table interrupt.
837 */ 1032 */
838int iwl_alloc_isr_ict(struct iwl_trans *trans) 1033int iwl_pcie_alloc_ict(struct iwl_trans *trans)
839{ 1034{
840 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1035 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
841 1036
@@ -848,7 +1043,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
848 1043
849 /* just an API sanity check ... it is guaranteed to be aligned */ 1044 /* just an API sanity check ... it is guaranteed to be aligned */
850 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 1045 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
851 iwl_free_isr_ict(trans); 1046 iwl_pcie_free_ict(trans);
852 return -EINVAL; 1047 return -EINVAL;
853 } 1048 }
854 1049
@@ -869,7 +1064,7 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
869/* Device is going up inform it about using ICT interrupt table, 1064/* Device is going up inform it about using ICT interrupt table,
870 * also we need to tell the driver to start using ICT interrupt. 1065 * also we need to tell the driver to start using ICT interrupt.
871 */ 1066 */
872void iwl_reset_ict(struct iwl_trans *trans) 1067void iwl_pcie_reset_ict(struct iwl_trans *trans)
873{ 1068{
874 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1069 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
875 u32 val; 1070 u32 val;
@@ -899,7 +1094,7 @@ void iwl_reset_ict(struct iwl_trans *trans)
899} 1094}
900 1095
901/* Device is going down disable ict interrupt usage */ 1096/* Device is going down disable ict interrupt usage */
902void iwl_disable_ict(struct iwl_trans *trans) 1097void iwl_pcie_disable_ict(struct iwl_trans *trans)
903{ 1098{
904 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1099 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
905 unsigned long flags; 1100 unsigned long flags;
@@ -910,7 +1105,7 @@ void iwl_disable_ict(struct iwl_trans *trans)
910} 1105}
911 1106
912/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */ 1107/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
913static irqreturn_t iwl_isr(int irq, void *data) 1108static irqreturn_t iwl_pcie_isr(int irq, void *data)
914{ 1109{
915 struct iwl_trans *trans = data; 1110 struct iwl_trans *trans = data;
916 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1111 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -927,12 +1122,20 @@ static irqreturn_t iwl_isr(int irq, void *data)
927 * back-to-back ISRs and sporadic interrupts from our NIC. 1122 * back-to-back ISRs and sporadic interrupts from our NIC.
928 * If we have something to service, the tasklet will re-enable ints. 1123 * If we have something to service, the tasklet will re-enable ints.
929 * If we *don't* have something, we'll re-enable before leaving here. */ 1124 * If we *don't* have something, we'll re-enable before leaving here. */
930 inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ 1125 inta_mask = iwl_read32(trans, CSR_INT_MASK);
931 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 1126 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
932 1127
933 /* Discover which interrupts are active/pending */ 1128 /* Discover which interrupts are active/pending */
934 inta = iwl_read32(trans, CSR_INT); 1129 inta = iwl_read32(trans, CSR_INT);
935 1130
1131 if (inta & (~inta_mask)) {
1132 IWL_DEBUG_ISR(trans,
1133 "We got a masked interrupt (0x%08x)...Ack and ignore\n",
1134 inta & (~inta_mask));
1135 iwl_write32(trans, CSR_INT, inta & (~inta_mask));
1136 inta &= inta_mask;
1137 }
1138
936 /* Ignore interrupt if there's nothing in NIC to service. 1139 /* Ignore interrupt if there's nothing in NIC to service.
937 * This may be due to IRQ shared with another device, 1140 * This may be due to IRQ shared with another device,
938 * or due to sporadic interrupts thrown from our NIC. */ 1141 * or due to sporadic interrupts thrown from our NIC. */
@@ -957,7 +1160,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
957#endif 1160#endif
958 1161
959 trans_pcie->inta |= inta; 1162 trans_pcie->inta |= inta;
960 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 1163 /* iwl_pcie_tasklet() will service interrupts and re-enable them */
961 if (likely(inta)) 1164 if (likely(inta))
962 tasklet_schedule(&trans_pcie->irq_tasklet); 1165 tasklet_schedule(&trans_pcie->irq_tasklet);
963 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 1166 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
@@ -982,7 +1185,7 @@ none:
982 * the interrupt we need to service, driver will set the entries back to 0 and 1185 * the interrupt we need to service, driver will set the entries back to 0 and
983 * set index. 1186 * set index.
984 */ 1187 */
985irqreturn_t iwl_isr_ict(int irq, void *data) 1188irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
986{ 1189{
987 struct iwl_trans *trans = data; 1190 struct iwl_trans *trans = data;
988 struct iwl_trans_pcie *trans_pcie; 1191 struct iwl_trans_pcie *trans_pcie;
@@ -1002,23 +1205,21 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
1002 * use legacy interrupt. 1205 * use legacy interrupt.
1003 */ 1206 */
1004 if (unlikely(!trans_pcie->use_ict)) { 1207 if (unlikely(!trans_pcie->use_ict)) {
1005 irqreturn_t ret = iwl_isr(irq, data); 1208 irqreturn_t ret = iwl_pcie_isr(irq, data);
1006 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1209 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1007 return ret; 1210 return ret;
1008 } 1211 }
1009 1212
1010 trace_iwlwifi_dev_irq(trans->dev); 1213 trace_iwlwifi_dev_irq(trans->dev);
1011 1214
1012
1013 /* Disable (but don't clear!) interrupts here to avoid 1215 /* Disable (but don't clear!) interrupts here to avoid
1014 * back-to-back ISRs and sporadic interrupts from our NIC. 1216 * back-to-back ISRs and sporadic interrupts from our NIC.
1015 * If we have something to service, the tasklet will re-enable ints. 1217 * If we have something to service, the tasklet will re-enable ints.
1016 * If we *don't* have something, we'll re-enable before leaving here. 1218 * If we *don't* have something, we'll re-enable before leaving here.
1017 */ 1219 */
1018 inta_mask = iwl_read32(trans, CSR_INT_MASK); /* just for debug */ 1220 inta_mask = iwl_read32(trans, CSR_INT_MASK);
1019 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 1221 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1020 1222
1021
1022 /* Ignore interrupt if there's nothing in NIC to service. 1223 /* Ignore interrupt if there's nothing in NIC to service.
1023 * This may be due to IRQ shared with another device, 1224 * This may be due to IRQ shared with another device,
1024 * or due to sporadic interrupts thrown from our NIC. */ 1225 * or due to sporadic interrupts thrown from our NIC. */
@@ -1067,7 +1268,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
1067 inta &= trans_pcie->inta_mask; 1268 inta &= trans_pcie->inta_mask;
1068 trans_pcie->inta |= inta; 1269 trans_pcie->inta |= inta;
1069 1270
1070 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 1271 /* iwl_pcie_tasklet() will service interrupts and re-enable them */
1071 if (likely(inta)) 1272 if (likely(inta))
1072 tasklet_schedule(&trans_pcie->irq_tasklet); 1273 tasklet_schedule(&trans_pcie->irq_tasklet);
1073 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) && 1274 else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index fe0fffd04304..d66cad4a7d6a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -74,584 +74,8 @@
74#include "iwl-prph.h" 74#include "iwl-prph.h"
75#include "iwl-agn-hw.h" 75#include "iwl-agn-hw.h"
76#include "internal.h" 76#include "internal.h"
77/* FIXME: need to abstract out TX command (once we know what it looks like) */
78#include "dvm/commands.h"
79 77
80#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ 78static void iwl_pcie_set_pwr_vmain(struct iwl_trans *trans)
81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
82 (~(1<<(trans_pcie)->cmd_queue)))
83
84static int iwl_trans_rx_alloc(struct iwl_trans *trans)
85{
86 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
87 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
88 struct device *dev = trans->dev;
89
90 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
91
92 spin_lock_init(&rxq->lock);
93
94 if (WARN_ON(rxq->bd || rxq->rb_stts))
95 return -EINVAL;
96
97 /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
98 rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
99 &rxq->bd_dma, GFP_KERNEL);
100 if (!rxq->bd)
101 goto err_bd;
102
103 /*Allocate the driver's pointer to receive buffer status */
104 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
105 &rxq->rb_stts_dma, GFP_KERNEL);
106 if (!rxq->rb_stts)
107 goto err_rb_stts;
108
109 return 0;
110
111err_rb_stts:
112 dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
113 rxq->bd, rxq->bd_dma);
114 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
115 rxq->bd = NULL;
116err_bd:
117 return -ENOMEM;
118}
119
120static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
121{
122 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
123 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
124 int i;
125
126 /* Fill the rx_used queue with _all_ of the Rx buffers */
127 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
128 /* In the reset function, these buffers may have been allocated
129 * to an SKB, so we need to unmap and free potential storage */
130 if (rxq->pool[i].page != NULL) {
131 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
132 PAGE_SIZE << trans_pcie->rx_page_order,
133 DMA_FROM_DEVICE);
134 __free_pages(rxq->pool[i].page,
135 trans_pcie->rx_page_order);
136 rxq->pool[i].page = NULL;
137 }
138 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
139 }
140}
141
142static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
143 struct iwl_rx_queue *rxq)
144{
145 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
146 u32 rb_size;
147 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
148 u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
149
150 if (trans_pcie->rx_buf_size_8k)
151 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
152 else
153 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
154
155 /* Stop Rx DMA */
156 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157
158 /* Reset driver's Rx queue write index */
159 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
160
161 /* Tell device where to find RBD circular buffer in DRAM */
162 iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
163 (u32)(rxq->bd_dma >> 8));
164
165 /* Tell device where in DRAM to update its Rx status */
166 iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
167 rxq->rb_stts_dma >> 4);
168
169 /* Enable Rx DMA
170 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
171 * the credit mechanism in 5000 HW RX FIFO
172 * Direct rx interrupts to hosts
173 * Rx buffer size 4 or 8k
174 * RB timeout 0x10
175 * 256 RBDs
176 */
177 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
178 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
179 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
180 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
181 rb_size|
182 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
183 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
184
185 /* Set interrupt coalescing timer to default (2048 usecs) */
186 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
187}
188
189static int iwl_rx_init(struct iwl_trans *trans)
190{
191 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
192 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
193
194 int i, err;
195 unsigned long flags;
196
197 if (!rxq->bd) {
198 err = iwl_trans_rx_alloc(trans);
199 if (err)
200 return err;
201 }
202
203 spin_lock_irqsave(&rxq->lock, flags);
204 INIT_LIST_HEAD(&rxq->rx_free);
205 INIT_LIST_HEAD(&rxq->rx_used);
206
207 iwl_trans_rxq_free_rx_bufs(trans);
208
209 for (i = 0; i < RX_QUEUE_SIZE; i++)
210 rxq->queue[i] = NULL;
211
212 /* Set us so that we have processed and used all buffers, but have
213 * not restocked the Rx queue with fresh buffers */
214 rxq->read = rxq->write = 0;
215 rxq->write_actual = 0;
216 rxq->free_count = 0;
217 spin_unlock_irqrestore(&rxq->lock, flags);
218
219 iwl_rx_replenish(trans);
220
221 iwl_trans_rx_hw_init(trans, rxq);
222
223 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
224 rxq->need_update = 1;
225 iwl_rx_queue_update_write_ptr(trans, rxq);
226 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
227
228 return 0;
229}
230
231static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
232{
233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
234 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
235 unsigned long flags;
236
237 /*if rxq->bd is NULL, it means that nothing has been allocated,
238 * exit now */
239 if (!rxq->bd) {
240 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
241 return;
242 }
243
244 spin_lock_irqsave(&rxq->lock, flags);
245 iwl_trans_rxq_free_rx_bufs(trans);
246 spin_unlock_irqrestore(&rxq->lock, flags);
247
248 dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
249 rxq->bd, rxq->bd_dma);
250 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
251 rxq->bd = NULL;
252
253 if (rxq->rb_stts)
254 dma_free_coherent(trans->dev,
255 sizeof(struct iwl_rb_status),
256 rxq->rb_stts, rxq->rb_stts_dma);
257 else
258 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
259 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
260 rxq->rb_stts = NULL;
261}
262
263static int iwl_trans_rx_stop(struct iwl_trans *trans)
264{
265
266 /* stop Rx DMA */
267 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
268 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
269 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
270}
271
272static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
273 struct iwl_dma_ptr *ptr, size_t size)
274{
275 if (WARN_ON(ptr->addr))
276 return -EINVAL;
277
278 ptr->addr = dma_alloc_coherent(trans->dev, size,
279 &ptr->dma, GFP_KERNEL);
280 if (!ptr->addr)
281 return -ENOMEM;
282 ptr->size = size;
283 return 0;
284}
285
286static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
287 struct iwl_dma_ptr *ptr)
288{
289 if (unlikely(!ptr->addr))
290 return;
291
292 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
293 memset(ptr, 0, sizeof(*ptr));
294}
295
296static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
297{
298 struct iwl_tx_queue *txq = (void *)data;
299 struct iwl_queue *q = &txq->q;
300 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
301 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
302 u32 scd_sram_addr = trans_pcie->scd_base_addr +
303 SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
304 u8 buf[16];
305 int i;
306
307 spin_lock(&txq->lock);
308 /* check if triggered erroneously */
309 if (txq->q.read_ptr == txq->q.write_ptr) {
310 spin_unlock(&txq->lock);
311 return;
312 }
313 spin_unlock(&txq->lock);
314
315 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
316 jiffies_to_msecs(trans_pcie->wd_timeout));
317 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
318 txq->q.read_ptr, txq->q.write_ptr);
319
320 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
321
322 iwl_print_hex_error(trans, buf, sizeof(buf));
323
324 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
325 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
326 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
327
328 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
329 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
330 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
331 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
332 u32 tbl_dw =
333 iwl_read_targ_mem(trans,
334 trans_pcie->scd_base_addr +
335 SCD_TRANS_TBL_OFFSET_QUEUE(i));
336
337 if (i & 0x1)
338 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
339 else
340 tbl_dw = tbl_dw & 0x0000FFFF;
341
342 IWL_ERR(trans,
343 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
344 i, active ? "" : "in", fifo, tbl_dw,
345 iwl_read_prph(trans,
346 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
347 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
348 }
349
350 for (i = q->read_ptr; i != q->write_ptr;
351 i = iwl_queue_inc_wrap(i, q->n_bd)) {
352 struct iwl_tx_cmd *tx_cmd =
353 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
354 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
355 get_unaligned_le32(&tx_cmd->scratch));
356 }
357
358 iwl_op_mode_nic_error(trans->op_mode);
359}
360
361static int iwl_trans_txq_alloc(struct iwl_trans *trans,
362 struct iwl_tx_queue *txq, int slots_num,
363 u32 txq_id)
364{
365 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
366 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
367 int i;
368
369 if (WARN_ON(txq->entries || txq->tfds))
370 return -EINVAL;
371
372 setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
373 (unsigned long)txq);
374 txq->trans_pcie = trans_pcie;
375
376 txq->q.n_window = slots_num;
377
378 txq->entries = kcalloc(slots_num,
379 sizeof(struct iwl_pcie_tx_queue_entry),
380 GFP_KERNEL);
381
382 if (!txq->entries)
383 goto error;
384
385 if (txq_id == trans_pcie->cmd_queue)
386 for (i = 0; i < slots_num; i++) {
387 txq->entries[i].cmd =
388 kmalloc(sizeof(struct iwl_device_cmd),
389 GFP_KERNEL);
390 if (!txq->entries[i].cmd)
391 goto error;
392 }
393
394 /* Circular buffer of transmit frame descriptors (TFDs),
395 * shared with device */
396 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
397 &txq->q.dma_addr, GFP_KERNEL);
398 if (!txq->tfds) {
399 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
400 goto error;
401 }
402 txq->q.id = txq_id;
403
404 return 0;
405error:
406 if (txq->entries && txq_id == trans_pcie->cmd_queue)
407 for (i = 0; i < slots_num; i++)
408 kfree(txq->entries[i].cmd);
409 kfree(txq->entries);
410 txq->entries = NULL;
411
412 return -ENOMEM;
413
414}
415
416static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
417 int slots_num, u32 txq_id)
418{
419 int ret;
420
421 txq->need_update = 0;
422
423 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
424 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
425 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
426
427 /* Initialize queue's high/low-water marks, and head/tail indexes */
428 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
429 txq_id);
430 if (ret)
431 return ret;
432
433 spin_lock_init(&txq->lock);
434
435 /*
436 * Tell nic where to find circular buffer of Tx Frame Descriptors for
437 * given Tx queue, and enable the DMA channel used for that queue.
438 * Circular buffer (TFD queue in DRAM) physical base address */
439 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
440 txq->q.dma_addr >> 8);
441
442 return 0;
443}
444
445/**
446 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
447 */
448static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
449{
450 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
451 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
452 struct iwl_queue *q = &txq->q;
453 enum dma_data_direction dma_dir;
454
455 if (!q->n_bd)
456 return;
457
458 /* In the command queue, all the TBs are mapped as BIDI
459 * so unmap them as such.
460 */
461 if (txq_id == trans_pcie->cmd_queue)
462 dma_dir = DMA_BIDIRECTIONAL;
463 else
464 dma_dir = DMA_TO_DEVICE;
465
466 spin_lock_bh(&txq->lock);
467 while (q->write_ptr != q->read_ptr) {
468 iwl_txq_free_tfd(trans, txq, dma_dir);
469 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
470 }
471 spin_unlock_bh(&txq->lock);
472}
473
474/**
475 * iwl_tx_queue_free - Deallocate DMA queue.
476 * @txq: Transmit queue to deallocate.
477 *
478 * Empty queue by removing and destroying all BD's.
479 * Free all buffers.
480 * 0-fill, but do not free "txq" descriptor structure.
481 */
482static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
483{
484 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
485 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
486 struct device *dev = trans->dev;
487 int i;
488
489 if (WARN_ON(!txq))
490 return;
491
492 iwl_tx_queue_unmap(trans, txq_id);
493
494 /* De-alloc array of command/tx buffers */
495 if (txq_id == trans_pcie->cmd_queue)
496 for (i = 0; i < txq->q.n_window; i++) {
497 kfree(txq->entries[i].cmd);
498 kfree(txq->entries[i].copy_cmd);
499 }
500
501 /* De-alloc circular buffer of TFDs */
502 if (txq->q.n_bd) {
503 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
504 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
505 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
506 }
507
508 kfree(txq->entries);
509 txq->entries = NULL;
510
511 del_timer_sync(&txq->stuck_timer);
512
513 /* 0-fill queue descriptor structure */
514 memset(txq, 0, sizeof(*txq));
515}
516
517/**
518 * iwl_trans_tx_free - Free TXQ Context
519 *
520 * Destroy all TX DMA queues and structures
521 */
522static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
523{
524 int txq_id;
525 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
526
527 /* Tx queues */
528 if (trans_pcie->txq) {
529 for (txq_id = 0;
530 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
531 iwl_tx_queue_free(trans, txq_id);
532 }
533
534 kfree(trans_pcie->txq);
535 trans_pcie->txq = NULL;
536
537 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
538
539 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
540}
541
542/**
543 * iwl_trans_tx_alloc - allocate TX context
544 * Allocate all Tx DMA structures and initialize them
545 *
546 * @param priv
547 * @return error code
548 */
549static int iwl_trans_tx_alloc(struct iwl_trans *trans)
550{
551 int ret;
552 int txq_id, slots_num;
553 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
554
555 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
556 sizeof(struct iwlagn_scd_bc_tbl);
557
558 /*It is not allowed to alloc twice, so warn when this happens.
559 * We cannot rely on the previous allocation, so free and fail */
560 if (WARN_ON(trans_pcie->txq)) {
561 ret = -EINVAL;
562 goto error;
563 }
564
565 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
566 scd_bc_tbls_size);
567 if (ret) {
568 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
569 goto error;
570 }
571
572 /* Alloc keep-warm buffer */
573 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
574 if (ret) {
575 IWL_ERR(trans, "Keep Warm allocation failed\n");
576 goto error;
577 }
578
579 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
580 sizeof(struct iwl_tx_queue), GFP_KERNEL);
581 if (!trans_pcie->txq) {
582 IWL_ERR(trans, "Not enough memory for txq\n");
583 ret = ENOMEM;
584 goto error;
585 }
586
587 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
588 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
589 txq_id++) {
590 slots_num = (txq_id == trans_pcie->cmd_queue) ?
591 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
592 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
593 slots_num, txq_id);
594 if (ret) {
595 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
596 goto error;
597 }
598 }
599
600 return 0;
601
602error:
603 iwl_trans_pcie_tx_free(trans);
604
605 return ret;
606}
607static int iwl_tx_init(struct iwl_trans *trans)
608{
609 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
610 int ret;
611 int txq_id, slots_num;
612 unsigned long flags;
613 bool alloc = false;
614
615 if (!trans_pcie->txq) {
616 ret = iwl_trans_tx_alloc(trans);
617 if (ret)
618 goto error;
619 alloc = true;
620 }
621
622 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
623
624 /* Turn off all Tx DMA fifos */
625 iwl_write_prph(trans, SCD_TXFACT, 0);
626
627 /* Tell NIC where to find the "keep warm" buffer */
628 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
629 trans_pcie->kw.dma >> 4);
630
631 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
632
633 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
634 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
635 txq_id++) {
636 slots_num = (txq_id == trans_pcie->cmd_queue) ?
637 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
638 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
639 slots_num, txq_id);
640 if (ret) {
641 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
642 goto error;
643 }
644 }
645
646 return 0;
647error:
648 /*Upon error, free only if we allocated something */
649 if (alloc)
650 iwl_trans_pcie_tx_free(trans);
651 return ret;
652}
653
654static void iwl_set_pwr_vmain(struct iwl_trans *trans)
655{ 79{
656/* 80/*
657 * (for documentation purposes) 81 * (for documentation purposes)
@@ -673,18 +97,11 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
673#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 97#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
674#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 98#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
675 99
676static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans) 100static void iwl_pcie_apm_config(struct iwl_trans *trans)
677{ 101{
678 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
679 u16 pci_lnk_ctl; 103 u16 lctl;
680 104
681 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL,
682 &pci_lnk_ctl);
683 return pci_lnk_ctl;
684}
685
686static void iwl_apm_config(struct iwl_trans *trans)
687{
688 /* 105 /*
689 * HW bug W/A for instability in PCIe bus L0S->L1 transition. 106 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
690 * Check if BIOS (or OS) enabled L1-ASPM on this device. 107 * Check if BIOS (or OS) enabled L1-ASPM on this device.
@@ -693,29 +110,27 @@ static void iwl_apm_config(struct iwl_trans *trans)
693 * If not (unlikely), enable L0S, so there is at least some 110 * If not (unlikely), enable L0S, so there is at least some
694 * power savings, even without L1. 111 * power savings, even without L1.
695 */ 112 */
696 u16 lctl = iwl_pciexp_link_ctrl(trans); 113 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
697 114
698 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == 115 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
699 PCI_CFG_LINK_CTRL_VAL_L1_EN) { 116 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
700 /* L1-ASPM enabled; disable(!) L0S */ 117 /* L1-ASPM enabled; disable(!) L0S */
701 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 118 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
702 dev_printk(KERN_INFO, trans->dev, 119 dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
703 "L1 Enabled; Disabling L0S\n");
704 } else { 120 } else {
705 /* L1-ASPM disabled; enable(!) L0S */ 121 /* L1-ASPM disabled; enable(!) L0S */
706 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 122 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
707 dev_printk(KERN_INFO, trans->dev, 123 dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
708 "L1 Disabled; Enabling L0S\n");
709 } 124 }
710 trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); 125 trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
711} 126}
712 127
713/* 128/*
714 * Start up NIC's basic functionality after it has been reset 129 * Start up NIC's basic functionality after it has been reset
715 * (e.g. after platform boot, or shutdown via iwl_apm_stop()) 130 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
716 * NOTE: This does not load uCode nor start the embedded processor 131 * NOTE: This does not load uCode nor start the embedded processor
717 */ 132 */
718static int iwl_apm_init(struct iwl_trans *trans) 133static int iwl_pcie_apm_init(struct iwl_trans *trans)
719{ 134{
720 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 135 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
721 int ret = 0; 136 int ret = 0;
@@ -747,7 +162,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
747 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 162 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
748 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 163 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
749 164
750 iwl_apm_config(trans); 165 iwl_pcie_apm_config(trans);
751 166
752 /* Configure analog phase-lock-loop before activating to D0A */ 167 /* Configure analog phase-lock-loop before activating to D0A */
753 if (trans->cfg->base_params->pll_cfg_val) 168 if (trans->cfg->base_params->pll_cfg_val)
@@ -793,7 +208,7 @@ out:
793 return ret; 208 return ret;
794} 209}
795 210
796static int iwl_apm_stop_master(struct iwl_trans *trans) 211static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
797{ 212{
798 int ret = 0; 213 int ret = 0;
799 214
@@ -811,7 +226,7 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
811 return ret; 226 return ret;
812} 227}
813 228
814static void iwl_apm_stop(struct iwl_trans *trans) 229static void iwl_pcie_apm_stop(struct iwl_trans *trans)
815{ 230{
816 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 231 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
817 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 232 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
@@ -819,7 +234,7 @@ static void iwl_apm_stop(struct iwl_trans *trans)
819 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); 234 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
820 235
821 /* Stop device's DMA activity */ 236 /* Stop device's DMA activity */
822 iwl_apm_stop_master(trans); 237 iwl_pcie_apm_stop_master(trans);
823 238
824 /* Reset the entire device */ 239 /* Reset the entire device */
825 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 240 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
@@ -834,29 +249,29 @@ static void iwl_apm_stop(struct iwl_trans *trans)
834 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 249 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
835} 250}
836 251
837static int iwl_nic_init(struct iwl_trans *trans) 252static int iwl_pcie_nic_init(struct iwl_trans *trans)
838{ 253{
839 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 254 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
840 unsigned long flags; 255 unsigned long flags;
841 256
842 /* nic_init */ 257 /* nic_init */
843 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 258 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
844 iwl_apm_init(trans); 259 iwl_pcie_apm_init(trans);
845 260
846 /* Set interrupt coalescing calibration timer to default (512 usecs) */ 261 /* Set interrupt coalescing calibration timer to default (512 usecs) */
847 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); 262 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
848 263
849 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 264 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
850 265
851 iwl_set_pwr_vmain(trans); 266 iwl_pcie_set_pwr_vmain(trans);
852 267
853 iwl_op_mode_nic_config(trans->op_mode); 268 iwl_op_mode_nic_config(trans->op_mode);
854 269
855 /* Allocate the RX queue, or reset if it is already allocated */ 270 /* Allocate the RX queue, or reset if it is already allocated */
856 iwl_rx_init(trans); 271 iwl_pcie_rx_init(trans);
857 272
858 /* Allocate or reset and init all Tx and Command queues */ 273 /* Allocate or reset and init all Tx and Command queues */
859 if (iwl_tx_init(trans)) 274 if (iwl_pcie_tx_init(trans))
860 return -ENOMEM; 275 return -ENOMEM;
861 276
862 if (trans->cfg->base_params->shadow_reg_enable) { 277 if (trans->cfg->base_params->shadow_reg_enable) {
@@ -871,7 +286,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
871#define HW_READY_TIMEOUT (50) 286#define HW_READY_TIMEOUT (50)
872 287
873/* Note: returns poll_bit return value, which is >= 0 if success */ 288/* Note: returns poll_bit return value, which is >= 0 if success */
874static int iwl_set_hw_ready(struct iwl_trans *trans) 289static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
875{ 290{
876 int ret; 291 int ret;
877 292
@@ -889,14 +304,14 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
889} 304}
890 305
891/* Note: returns standard 0/-ERROR code */ 306/* Note: returns standard 0/-ERROR code */
892static int iwl_prepare_card_hw(struct iwl_trans *trans) 307static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
893{ 308{
894 int ret; 309 int ret;
895 int t = 0; 310 int t = 0;
896 311
897 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 312 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
898 313
899 ret = iwl_set_hw_ready(trans); 314 ret = iwl_pcie_set_hw_ready(trans);
900 /* If the card is ready, exit 0 */ 315 /* If the card is ready, exit 0 */
901 if (ret >= 0) 316 if (ret >= 0)
902 return 0; 317 return 0;
@@ -906,7 +321,7 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
906 CSR_HW_IF_CONFIG_REG_PREPARE); 321 CSR_HW_IF_CONFIG_REG_PREPARE);
907 322
908 do { 323 do {
909 ret = iwl_set_hw_ready(trans); 324 ret = iwl_pcie_set_hw_ready(trans);
910 if (ret >= 0) 325 if (ret >= 0)
911 return 0; 326 return 0;
912 327
@@ -920,7 +335,7 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
920/* 335/*
921 * ucode 336 * ucode
922 */ 337 */
923static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, 338static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
924 dma_addr_t phy_addr, u32 byte_cnt) 339 dma_addr_t phy_addr, u32 byte_cnt)
925{ 340{
926 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 341 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -967,7 +382,7 @@ static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
967 return 0; 382 return 0;
968} 383}
969 384
970static int iwl_load_section(struct iwl_trans *trans, u8 section_num, 385static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
971 const struct fw_desc *section) 386 const struct fw_desc *section)
972{ 387{
973 u8 *v_addr; 388 u8 *v_addr;
@@ -988,8 +403,9 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
988 copy_size = min_t(u32, PAGE_SIZE, section->len - offset); 403 copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
989 404
990 memcpy(v_addr, (u8 *)section->data + offset, copy_size); 405 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
991 ret = iwl_load_firmware_chunk(trans, section->offset + offset, 406 ret = iwl_pcie_load_firmware_chunk(trans,
992 p_addr, copy_size); 407 section->offset + offset,
408 p_addr, copy_size);
993 if (ret) { 409 if (ret) {
994 IWL_ERR(trans, 410 IWL_ERR(trans,
995 "Could not load the [%d] uCode section\n", 411 "Could not load the [%d] uCode section\n",
@@ -1002,7 +418,7 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
1002 return ret; 418 return ret;
1003} 419}
1004 420
1005static int iwl_load_given_ucode(struct iwl_trans *trans, 421static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
1006 const struct fw_img *image) 422 const struct fw_img *image)
1007{ 423{
1008 int i, ret = 0; 424 int i, ret = 0;
@@ -1011,7 +427,7 @@ static int iwl_load_given_ucode(struct iwl_trans *trans,
1011 if (!image->sec[i].data) 427 if (!image->sec[i].data)
1012 break; 428 break;
1013 429
1014 ret = iwl_load_section(trans, i, &image->sec[i]); 430 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
1015 if (ret) 431 if (ret)
1016 return ret; 432 return ret;
1017 } 433 }
@@ -1025,15 +441,18 @@ static int iwl_load_given_ucode(struct iwl_trans *trans,
1025static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 441static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1026 const struct fw_img *fw) 442 const struct fw_img *fw)
1027{ 443{
444 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028 int ret; 445 int ret;
1029 bool hw_rfkill; 446 bool hw_rfkill;
1030 447
1031 /* This may fail if AMT took ownership of the device */ 448 /* This may fail if AMT took ownership of the device */
1032 if (iwl_prepare_card_hw(trans)) { 449 if (iwl_pcie_prepare_card_hw(trans)) {
1033 IWL_WARN(trans, "Exit HW not ready\n"); 450 IWL_WARN(trans, "Exit HW not ready\n");
1034 return -EIO; 451 return -EIO;
1035 } 452 }
1036 453
454 clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
455
1037 iwl_enable_rfkill_int(trans); 456 iwl_enable_rfkill_int(trans);
1038 457
1039 /* If platform's RF_KILL switch is NOT set to KILL */ 458 /* If platform's RF_KILL switch is NOT set to KILL */
@@ -1044,7 +463,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1044 463
1045 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 464 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1046 465
1047 ret = iwl_nic_init(trans); 466 ret = iwl_pcie_nic_init(trans);
1048 if (ret) { 467 if (ret) {
1049 IWL_ERR(trans, "Unable to init nic\n"); 468 IWL_ERR(trans, "Unable to init nic\n");
1050 return ret; 469 return ret;
@@ -1064,125 +483,13 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1064 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 483 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1065 484
1066 /* Load the given image to the HW */ 485 /* Load the given image to the HW */
1067 return iwl_load_given_ucode(trans, fw); 486 return iwl_pcie_load_given_ucode(trans, fw);
1068}
1069
1070/*
1071 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1072 */
1073static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1074{
1075 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1076 IWL_TRANS_GET_PCIE_TRANS(trans);
1077
1078 iwl_write_prph(trans, SCD_TXFACT, mask);
1079}
1080
1081static void iwl_tx_start(struct iwl_trans *trans)
1082{
1083 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1084 u32 a;
1085 int chan;
1086 u32 reg_val;
1087
1088 /* make sure all queue are not stopped/used */
1089 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1090 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1091
1092 trans_pcie->scd_base_addr =
1093 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
1094 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
1095 /* reset conext data memory */
1096 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
1097 a += 4)
1098 iwl_write_targ_mem(trans, a, 0);
1099 /* reset tx status memory */
1100 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
1101 a += 4)
1102 iwl_write_targ_mem(trans, a, 0);
1103 for (; a < trans_pcie->scd_base_addr +
1104 SCD_TRANS_TBL_OFFSET_QUEUE(
1105 trans->cfg->base_params->num_of_queues);
1106 a += 4)
1107 iwl_write_targ_mem(trans, a, 0);
1108
1109 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
1110 trans_pcie->scd_bc_tbls.dma >> 10);
1111
1112 /* The chain extension of the SCD doesn't work well. This feature is
1113 * enabled by default by the HW, so we need to disable it manually.
1114 */
1115 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1116
1117 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
1118 trans_pcie->cmd_fifo);
1119
1120 /* Activate all Tx DMA/FIFO channels */
1121 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1122
1123 /* Enable DMA channel */
1124 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1125 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
1126 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1127 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1128
1129 /* Update FH chicken bits */
1130 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
1131 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
1132 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1133
1134 /* Enable L1-Active */
1135 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
1136 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1137} 487}
1138 488
1139static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) 489static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1140{
1141 iwl_reset_ict(trans);
1142 iwl_tx_start(trans);
1143}
1144
1145/**
1146 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
1147 */
1148static int iwl_trans_tx_stop(struct iwl_trans *trans)
1149{ 490{
1150 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 491 iwl_pcie_reset_ict(trans);
1151 int ch, txq_id, ret; 492 iwl_pcie_tx_start(trans, scd_addr);
1152 unsigned long flags;
1153
1154 /* Turn off all Tx DMA fifos */
1155 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1156
1157 iwl_trans_txq_set_sched(trans, 0);
1158
1159 /* Stop each Tx DMA channel, and wait for it to be idle */
1160 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
1161 iwl_write_direct32(trans,
1162 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
1163 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
1164 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
1165 if (ret < 0)
1166 IWL_ERR(trans,
1167 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
1168 ch,
1169 iwl_read_direct32(trans,
1170 FH_TSSR_TX_STATUS_REG));
1171 }
1172 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1173
1174 if (!trans_pcie->txq) {
1175 IWL_WARN(trans,
1176 "Stopping tx queues that aren't allocated...\n");
1177 return 0;
1178 }
1179
1180 /* Unmap DMA from host system and free skb's */
1181 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1182 txq_id++)
1183 iwl_tx_queue_unmap(trans, txq_id);
1184
1185 return 0;
1186} 493}
1187 494
1188static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 495static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
@@ -1196,7 +503,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1196 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 503 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1197 504
1198 /* device going down, Stop using ICT table */ 505 /* device going down, Stop using ICT table */
1199 iwl_disable_ict(trans); 506 iwl_pcie_disable_ict(trans);
1200 507
1201 /* 508 /*
1202 * If a HW restart happens during firmware loading, 509 * If a HW restart happens during firmware loading,
@@ -1206,8 +513,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1206 * already dead. 513 * already dead.
1207 */ 514 */
1208 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) { 515 if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
1209 iwl_trans_tx_stop(trans); 516 iwl_pcie_tx_stop(trans);
1210 iwl_trans_rx_stop(trans); 517 iwl_pcie_rx_stop(trans);
1211 518
1212 /* Power-down device's busmaster DMA clocks */ 519 /* Power-down device's busmaster DMA clocks */
1213 iwl_write_prph(trans, APMG_CLK_DIS_REG, 520 iwl_write_prph(trans, APMG_CLK_DIS_REG,
@@ -1220,7 +527,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1220 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 527 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1221 528
1222 /* Stop the device, and put it in low power state */ 529 /* Stop the device, and put it in low power state */
1223 iwl_apm_stop(trans); 530 iwl_pcie_apm_stop(trans);
1224 531
1225 /* Upon stop, the APM issues an interrupt if HW RF kill is set. 532 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1226 * Clean again the interrupt here 533 * Clean again the interrupt here
@@ -1245,6 +552,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1245 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status); 552 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
1246 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status); 553 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
1247 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status); 554 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
555 clear_bit(STATUS_RFKILL, &trans_pcie->status);
1248} 556}
1249 557
1250static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) 558static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
@@ -1258,169 +566,6 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1258 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 566 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1259} 567}
1260 568
1261static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1262 struct iwl_device_cmd *dev_cmd, int txq_id)
1263{
1264 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1265 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1266 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1267 struct iwl_cmd_meta *out_meta;
1268 struct iwl_tx_queue *txq;
1269 struct iwl_queue *q;
1270 dma_addr_t phys_addr = 0;
1271 dma_addr_t txcmd_phys;
1272 dma_addr_t scratch_phys;
1273 u16 len, firstlen, secondlen;
1274 u8 wait_write_ptr = 0;
1275 __le16 fc = hdr->frame_control;
1276 u8 hdr_len = ieee80211_hdrlen(fc);
1277 u16 __maybe_unused wifi_seq;
1278
1279 txq = &trans_pcie->txq[txq_id];
1280 q = &txq->q;
1281
1282 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1283 WARN_ON_ONCE(1);
1284 return -EINVAL;
1285 }
1286
1287 spin_lock(&txq->lock);
1288
1289 /* In AGG mode, the index in the ring must correspond to the WiFi
1290 * sequence number. This is a HW requirements to help the SCD to parse
1291 * the BA.
1292 * Check here that the packets are in the right place on the ring.
1293 */
1294#ifdef CONFIG_IWLWIFI_DEBUG
1295 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1296 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1297 ((wifi_seq & 0xff) != q->write_ptr),
1298 "Q: %d WiFi Seq %d tfdNum %d",
1299 txq_id, wifi_seq, q->write_ptr);
1300#endif
1301
1302 /* Set up driver data for this TFD */
1303 txq->entries[q->write_ptr].skb = skb;
1304 txq->entries[q->write_ptr].cmd = dev_cmd;
1305
1306 dev_cmd->hdr.cmd = REPLY_TX;
1307 dev_cmd->hdr.sequence =
1308 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1309 INDEX_TO_SEQ(q->write_ptr)));
1310
1311 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1312 out_meta = &txq->entries[q->write_ptr].meta;
1313
1314 /*
1315 * Use the first empty entry in this queue's command buffer array
1316 * to contain the Tx command and MAC header concatenated together
1317 * (payload data will be in another buffer).
1318 * Size of this varies, due to varying MAC header length.
1319 * If end is not dword aligned, we'll have 2 extra bytes at the end
1320 * of the MAC header (device reads on dword boundaries).
1321 * We'll tell device about this padding later.
1322 */
1323 len = sizeof(struct iwl_tx_cmd) +
1324 sizeof(struct iwl_cmd_header) + hdr_len;
1325 firstlen = (len + 3) & ~3;
1326
1327 /* Tell NIC about any 2-byte padding after MAC header */
1328 if (firstlen != len)
1329 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1330
1331 /* Physical address of this Tx command's header (not MAC header!),
1332 * within command buffer array. */
1333 txcmd_phys = dma_map_single(trans->dev,
1334 &dev_cmd->hdr, firstlen,
1335 DMA_BIDIRECTIONAL);
1336 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1337 goto out_err;
1338 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1339 dma_unmap_len_set(out_meta, len, firstlen);
1340
1341 if (!ieee80211_has_morefrags(fc)) {
1342 txq->need_update = 1;
1343 } else {
1344 wait_write_ptr = 1;
1345 txq->need_update = 0;
1346 }
1347
1348 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1349 * if any (802.11 null frames have no payload). */
1350 secondlen = skb->len - hdr_len;
1351 if (secondlen > 0) {
1352 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1353 secondlen, DMA_TO_DEVICE);
1354 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1355 dma_unmap_single(trans->dev,
1356 dma_unmap_addr(out_meta, mapping),
1357 dma_unmap_len(out_meta, len),
1358 DMA_BIDIRECTIONAL);
1359 goto out_err;
1360 }
1361 }
1362
1363 /* Attach buffers to TFD */
1364 iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
1365 if (secondlen > 0)
1366 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
1367 secondlen, 0);
1368
1369 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1370 offsetof(struct iwl_tx_cmd, scratch);
1371
1372 /* take back ownership of DMA buffer to enable update */
1373 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1374 DMA_BIDIRECTIONAL);
1375 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1376 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1377
1378 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1379 le16_to_cpu(dev_cmd->hdr.sequence));
1380 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1381
1382 /* Set up entry for this TFD in Tx byte-count array */
1383 iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1384
1385 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1386 DMA_BIDIRECTIONAL);
1387
1388 trace_iwlwifi_dev_tx(trans->dev,
1389 &txq->tfds[txq->q.write_ptr],
1390 sizeof(struct iwl_tfd),
1391 &dev_cmd->hdr, firstlen,
1392 skb->data + hdr_len, secondlen);
1393
1394 /* start timer if queue currently empty */
1395 if (txq->need_update && q->read_ptr == q->write_ptr &&
1396 trans_pcie->wd_timeout)
1397 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1398
1399 /* Tell device the write index *just past* this latest filled TFD */
1400 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1401 iwl_txq_update_write_ptr(trans, txq);
1402
1403 /*
1404 * At this point the frame is "transmitted" successfully
1405 * and we will get a TX status notification eventually,
1406 * regardless of the value of ret. "ret" only indicates
1407 * whether or not we should update the write pointer.
1408 */
1409 if (iwl_queue_space(q) < q->high_mark) {
1410 if (wait_write_ptr) {
1411 txq->need_update = 1;
1412 iwl_txq_update_write_ptr(trans, txq);
1413 } else {
1414 iwl_stop_queue(trans, txq);
1415 }
1416 }
1417 spin_unlock(&txq->lock);
1418 return 0;
1419 out_err:
1420 spin_unlock(&txq->lock);
1421 return -1;
1422}
1423
1424static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 569static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1425{ 570{
1426 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 571 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1431,29 +576,28 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1431 576
1432 if (!trans_pcie->irq_requested) { 577 if (!trans_pcie->irq_requested) {
1433 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long)) 578 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1434 iwl_irq_tasklet, (unsigned long)trans); 579 iwl_pcie_tasklet, (unsigned long)trans);
1435 580
1436 iwl_alloc_isr_ict(trans); 581 iwl_pcie_alloc_ict(trans);
1437 582
1438 err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED, 583 err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
1439 DRV_NAME, trans); 584 IRQF_SHARED, DRV_NAME, trans);
1440 if (err) { 585 if (err) {
1441 IWL_ERR(trans, "Error allocating IRQ %d\n", 586 IWL_ERR(trans, "Error allocating IRQ %d\n",
1442 trans_pcie->irq); 587 trans_pcie->irq);
1443 goto error; 588 goto error;
1444 } 589 }
1445 590
1446 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1447 trans_pcie->irq_requested = true; 591 trans_pcie->irq_requested = true;
1448 } 592 }
1449 593
1450 err = iwl_prepare_card_hw(trans); 594 err = iwl_pcie_prepare_card_hw(trans);
1451 if (err) { 595 if (err) {
1452 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 596 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1453 goto err_free_irq; 597 goto err_free_irq;
1454 } 598 }
1455 599
1456 iwl_apm_init(trans); 600 iwl_pcie_apm_init(trans);
1457 601
1458 /* From now on, the op_mode will be kept updated about RF kill state */ 602 /* From now on, the op_mode will be kept updated about RF kill state */
1459 iwl_enable_rfkill_int(trans); 603 iwl_enable_rfkill_int(trans);
@@ -1467,7 +611,7 @@ err_free_irq:
1467 trans_pcie->irq_requested = false; 611 trans_pcie->irq_requested = false;
1468 free_irq(trans_pcie->irq, trans); 612 free_irq(trans_pcie->irq, trans);
1469error: 613error:
1470 iwl_free_isr_ict(trans); 614 iwl_pcie_free_ict(trans);
1471 tasklet_kill(&trans_pcie->irq_tasklet); 615 tasklet_kill(&trans_pcie->irq_tasklet);
1472 return err; 616 return err;
1473} 617}
@@ -1483,12 +627,14 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1483 iwl_disable_interrupts(trans); 627 iwl_disable_interrupts(trans);
1484 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 628 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1485 629
1486 iwl_apm_stop(trans); 630 iwl_pcie_apm_stop(trans);
1487 631
1488 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 632 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1489 iwl_disable_interrupts(trans); 633 iwl_disable_interrupts(trans);
1490 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 634 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1491 635
636 iwl_pcie_disable_ict(trans);
637
1492 if (!op_mode_leaving) { 638 if (!op_mode_leaving) {
1493 /* 639 /*
1494 * Even if we stop the HW, we still want the RF kill 640 * Even if we stop the HW, we still want the RF kill
@@ -1507,28 +653,6 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1507 } 653 }
1508} 654}
1509 655
1510static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1511 struct sk_buff_head *skbs)
1512{
1513 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1514 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1515 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1516 int tfd_num = ssn & (txq->q.n_bd - 1);
1517 int freed = 0;
1518
1519 spin_lock(&txq->lock);
1520
1521 if (txq->q.read_ptr != tfd_num) {
1522 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1523 txq_id, txq->q.read_ptr, tfd_num, ssn);
1524 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1525 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1526 iwl_wake_queue(trans, txq);
1527 }
1528
1529 spin_unlock(&txq->lock);
1530}
1531
1532static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 656static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1533{ 657{
1534 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 658 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
@@ -1544,6 +668,20 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1544 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 668 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1545} 669}
1546 670
671static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
672{
673 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
674 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
675}
676
677static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
678 u32 val)
679{
680 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
681 ((addr & 0x0000FFFF) | (3 << 24)));
682 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
683}
684
1547static void iwl_trans_pcie_configure(struct iwl_trans *trans, 685static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1548 const struct iwl_trans_config *trans_cfg) 686 const struct iwl_trans_config *trans_cfg)
1549{ 687{
@@ -1575,12 +713,12 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1575{ 713{
1576 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 714 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1577 715
1578 iwl_trans_pcie_tx_free(trans); 716 iwl_pcie_tx_free(trans);
1579 iwl_trans_pcie_rx_free(trans); 717 iwl_pcie_rx_free(trans);
1580 718
1581 if (trans_pcie->irq_requested == true) { 719 if (trans_pcie->irq_requested == true) {
1582 free_irq(trans_pcie->irq, trans); 720 free_irq(trans_pcie->irq, trans);
1583 iwl_free_isr_ict(trans); 721 iwl_pcie_free_ict(trans);
1584 } 722 }
1585 723
1586 pci_disable_msi(trans_pcie->pci_dev); 724 pci_disable_msi(trans_pcie->pci_dev);
@@ -1626,10 +764,10 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1626 764
1627#define IWL_FLUSH_WAIT_MS 2000 765#define IWL_FLUSH_WAIT_MS 2000
1628 766
1629static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans) 767static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
1630{ 768{
1631 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 769 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1632 struct iwl_tx_queue *txq; 770 struct iwl_txq *txq;
1633 struct iwl_queue *q; 771 struct iwl_queue *q;
1634 int cnt; 772 int cnt;
1635 unsigned long now = jiffies; 773 unsigned long now = jiffies;
@@ -1673,7 +811,7 @@ static const char *get_fh_string(int cmd)
1673#undef IWL_CMD 811#undef IWL_CMD
1674} 812}
1675 813
1676int iwl_dump_fh(struct iwl_trans *trans, char **buf) 814int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
1677{ 815{
1678 int i; 816 int i;
1679 static const u32 fh_tbl[] = { 817 static const u32 fh_tbl[] = {
@@ -1752,7 +890,7 @@ static const char *get_csr_string(int cmd)
1752#undef IWL_CMD 890#undef IWL_CMD
1753} 891}
1754 892
1755void iwl_dump_csr(struct iwl_trans *trans) 893void iwl_pcie_dump_csr(struct iwl_trans *trans)
1756{ 894{
1757 int i; 895 int i;
1758 static const u32 csr_tbl[] = { 896 static const u32 csr_tbl[] = {
@@ -1809,7 +947,6 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1809 const char __user *user_buf, \ 947 const char __user *user_buf, \
1810 size_t count, loff_t *ppos); 948 size_t count, loff_t *ppos);
1811 949
1812
1813#define DEBUGFS_READ_FILE_OPS(name) \ 950#define DEBUGFS_READ_FILE_OPS(name) \
1814 DEBUGFS_READ_FUNC(name); \ 951 DEBUGFS_READ_FUNC(name); \
1815static const struct file_operations iwl_dbgfs_##name##_ops = { \ 952static const struct file_operations iwl_dbgfs_##name##_ops = { \
@@ -1842,7 +979,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1842{ 979{
1843 struct iwl_trans *trans = file->private_data; 980 struct iwl_trans *trans = file->private_data;
1844 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 981 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1845 struct iwl_tx_queue *txq; 982 struct iwl_txq *txq;
1846 struct iwl_queue *q; 983 struct iwl_queue *q;
1847 char *buf; 984 char *buf;
1848 int pos = 0; 985 int pos = 0;
@@ -1879,7 +1016,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1879{ 1016{
1880 struct iwl_trans *trans = file->private_data; 1017 struct iwl_trans *trans = file->private_data;
1881 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1018 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1882 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 1019 struct iwl_rxq *rxq = &trans_pcie->rxq;
1883 char buf[256]; 1020 char buf[256];
1884 int pos = 0; 1021 int pos = 0;
1885 const size_t bufsz = sizeof(buf); 1022 const size_t bufsz = sizeof(buf);
@@ -1998,7 +1135,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
1998 if (sscanf(buf, "%d", &csr) != 1) 1135 if (sscanf(buf, "%d", &csr) != 1)
1999 return -EFAULT; 1136 return -EFAULT;
2000 1137
2001 iwl_dump_csr(trans); 1138 iwl_pcie_dump_csr(trans);
2002 1139
2003 return count; 1140 return count;
2004} 1141}
@@ -2012,7 +1149,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2012 int pos = 0; 1149 int pos = 0;
2013 ssize_t ret = -EFAULT; 1150 ssize_t ret = -EFAULT;
2014 1151
2015 ret = pos = iwl_dump_fh(trans, &buf); 1152 ret = pos = iwl_pcie_dump_fh(trans, &buf);
2016 if (buf) { 1153 if (buf) {
2017 ret = simple_read_from_buffer(user_buf, 1154 ret = simple_read_from_buffer(user_buf,
2018 count, ppos, buf, pos); 1155 count, ppos, buf, pos);
@@ -2081,7 +1218,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
2081 1218
2082 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend, 1219 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
2083 1220
2084 .send_cmd = iwl_trans_pcie_send_cmd, 1221 .send_cmd = iwl_trans_pcie_send_hcmd,
2085 1222
2086 .tx = iwl_trans_pcie_tx, 1223 .tx = iwl_trans_pcie_tx,
2087 .reclaim = iwl_trans_pcie_reclaim, 1224 .reclaim = iwl_trans_pcie_reclaim,
@@ -2091,7 +1228,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
2091 1228
2092 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 1229 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2093 1230
2094 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, 1231 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2095 1232
2096#ifdef CONFIG_PM_SLEEP 1233#ifdef CONFIG_PM_SLEEP
2097 .suspend = iwl_trans_pcie_suspend, 1234 .suspend = iwl_trans_pcie_suspend,
@@ -2100,6 +1237,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
2100 .write8 = iwl_trans_pcie_write8, 1237 .write8 = iwl_trans_pcie_write8,
2101 .write32 = iwl_trans_pcie_write32, 1238 .write32 = iwl_trans_pcie_write32,
2102 .read32 = iwl_trans_pcie_read32, 1239 .read32 = iwl_trans_pcie_read32,
1240 .read_prph = iwl_trans_pcie_read_prph,
1241 .write_prph = iwl_trans_pcie_write_prph,
2103 .configure = iwl_trans_pcie_configure, 1242 .configure = iwl_trans_pcie_configure,
2104 .set_pmi = iwl_trans_pcie_set_pmi, 1243 .set_pmi = iwl_trans_pcie_set_pmi,
2105}; 1244};
@@ -2116,7 +1255,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2116 trans = kzalloc(sizeof(struct iwl_trans) + 1255 trans = kzalloc(sizeof(struct iwl_trans) +
2117 sizeof(struct iwl_trans_pcie), GFP_KERNEL); 1256 sizeof(struct iwl_trans_pcie), GFP_KERNEL);
2118 1257
2119 if (WARN_ON(!trans)) 1258 if (!trans)
2120 return NULL; 1259 return NULL;
2121 1260
2122 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1261 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2149,43 +1288,38 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2149 DMA_BIT_MASK(32)); 1288 DMA_BIT_MASK(32));
2150 /* both attempts failed: */ 1289 /* both attempts failed: */
2151 if (err) { 1290 if (err) {
2152 dev_printk(KERN_ERR, &pdev->dev, 1291 dev_err(&pdev->dev, "No suitable DMA available\n");
2153 "No suitable DMA available.\n");
2154 goto out_pci_disable_device; 1292 goto out_pci_disable_device;
2155 } 1293 }
2156 } 1294 }
2157 1295
2158 err = pci_request_regions(pdev, DRV_NAME); 1296 err = pci_request_regions(pdev, DRV_NAME);
2159 if (err) { 1297 if (err) {
2160 dev_printk(KERN_ERR, &pdev->dev, 1298 dev_err(&pdev->dev, "pci_request_regions failed\n");
2161 "pci_request_regions failed\n");
2162 goto out_pci_disable_device; 1299 goto out_pci_disable_device;
2163 } 1300 }
2164 1301
2165 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 1302 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2166 if (!trans_pcie->hw_base) { 1303 if (!trans_pcie->hw_base) {
2167 dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n"); 1304 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2168 err = -ENODEV; 1305 err = -ENODEV;
2169 goto out_pci_release_regions; 1306 goto out_pci_release_regions;
2170 } 1307 }
2171 1308
2172 dev_printk(KERN_INFO, &pdev->dev,
2173 "pci_resource_len = 0x%08llx\n",
2174 (unsigned long long) pci_resource_len(pdev, 0));
2175 dev_printk(KERN_INFO, &pdev->dev,
2176 "pci_resource_base = %p\n", trans_pcie->hw_base);
2177
2178 dev_printk(KERN_INFO, &pdev->dev,
2179 "HW Revision ID = 0x%X\n", pdev->revision);
2180
2181 /* We disable the RETRY_TIMEOUT register (0x41) to keep 1309 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2182 * PCI Tx retries from interfering with C3 CPU state */ 1310 * PCI Tx retries from interfering with C3 CPU state */
2183 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 1311 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2184 1312
2185 err = pci_enable_msi(pdev); 1313 err = pci_enable_msi(pdev);
2186 if (err) 1314 if (err) {
2187 dev_printk(KERN_ERR, &pdev->dev, 1315 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
2188 "pci_enable_msi failed(0X%x)\n", err); 1316 /* enable rfkill interrupt: hw bug w/a */
1317 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1318 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1319 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1320 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1321 }
1322 }
2189 1323
2190 trans->dev = &pdev->dev; 1324 trans->dev = &pdev->dev;
2191 trans_pcie->irq = pdev->irq; 1325 trans_pcie->irq = pdev->irq;
@@ -2195,16 +1329,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2195 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 1329 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2196 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 1330 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2197 1331
2198 /* TODO: Move this away, not needed if not MSI */
2199 /* enable rfkill interrupt: hw bug w/a */
2200 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2201 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2202 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2203 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2204 }
2205
2206 /* Initialize the wait queue for commands */ 1332 /* Initialize the wait queue for commands */
2207 init_waitqueue_head(&trans->wait_command_queue); 1333 init_waitqueue_head(&trans_pcie->wait_command_queue);
2208 spin_lock_init(&trans->reg_lock); 1334 spin_lock_init(&trans->reg_lock);
2209 1335
2210 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), 1336 snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 79a4ddc002d3..6c5b867c353a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -42,12 +42,170 @@
42#define IWL_TX_CRC_SIZE 4 42#define IWL_TX_CRC_SIZE 4
43#define IWL_TX_DELIMITER_SIZE 4 43#define IWL_TX_DELIMITER_SIZE 4
44 44
45/** 45/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
46 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 46 * DMA services
47 *
48 * Theory of operation
49 *
50 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
51 * of buffer descriptors, each of which points to one or more data buffers for
52 * the device to read from or fill. Driver and device exchange status of each
53 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
54 * entries in each circular buffer, to protect against confusing empty and full
55 * queue states.
56 *
57 * The device reads or writes the data in the queues via the device's several
58 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
59 *
60 * For Tx queue, there are low mark and high mark limits. If, after queuing
61 * the packet for Tx, free space become < low mark, Tx queue stopped. When
62 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
63 * Tx queue resumed.
64 *
65 ***************************************************/
66static int iwl_queue_space(const struct iwl_queue *q)
67{
68 int s = q->read_ptr - q->write_ptr;
69
70 if (q->read_ptr > q->write_ptr)
71 s -= q->n_bd;
72
73 if (s <= 0)
74 s += q->n_window;
75 /* keep some reserve to not confuse empty and full situations */
76 s -= 2;
77 if (s < 0)
78 s = 0;
79 return s;
80}
81
82/*
83 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
84 */
85static int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
86{
87 q->n_bd = count;
88 q->n_window = slots_num;
89 q->id = id;
90
91 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
92 * and iwl_queue_dec_wrap are broken. */
93 if (WARN_ON(!is_power_of_2(count)))
94 return -EINVAL;
95
96 /* slots_num must be power-of-two size, otherwise
97 * get_cmd_index is broken. */
98 if (WARN_ON(!is_power_of_2(slots_num)))
99 return -EINVAL;
100
101 q->low_mark = q->n_window / 4;
102 if (q->low_mark < 4)
103 q->low_mark = 4;
104
105 q->high_mark = q->n_window / 8;
106 if (q->high_mark < 2)
107 q->high_mark = 2;
108
109 q->write_ptr = 0;
110 q->read_ptr = 0;
111
112 return 0;
113}
114
115static int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
116 struct iwl_dma_ptr *ptr, size_t size)
117{
118 if (WARN_ON(ptr->addr))
119 return -EINVAL;
120
121 ptr->addr = dma_alloc_coherent(trans->dev, size,
122 &ptr->dma, GFP_KERNEL);
123 if (!ptr->addr)
124 return -ENOMEM;
125 ptr->size = size;
126 return 0;
127}
128
129static void iwl_pcie_free_dma_ptr(struct iwl_trans *trans,
130 struct iwl_dma_ptr *ptr)
131{
132 if (unlikely(!ptr->addr))
133 return;
134
135 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
136 memset(ptr, 0, sizeof(*ptr));
137}
138
139static void iwl_pcie_txq_stuck_timer(unsigned long data)
140{
141 struct iwl_txq *txq = (void *)data;
142 struct iwl_queue *q = &txq->q;
143 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
144 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
145 u32 scd_sram_addr = trans_pcie->scd_base_addr +
146 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
147 u8 buf[16];
148 int i;
149
150 spin_lock(&txq->lock);
151 /* check if triggered erroneously */
152 if (txq->q.read_ptr == txq->q.write_ptr) {
153 spin_unlock(&txq->lock);
154 return;
155 }
156 spin_unlock(&txq->lock);
157
158 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
159 jiffies_to_msecs(trans_pcie->wd_timeout));
160 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
161 txq->q.read_ptr, txq->q.write_ptr);
162
163 iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
164
165 iwl_print_hex_error(trans, buf, sizeof(buf));
166
167 for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
168 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
169 iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
170
171 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
172 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
173 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
174 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
175 u32 tbl_dw =
176 iwl_read_targ_mem(trans,
177 trans_pcie->scd_base_addr +
178 SCD_TRANS_TBL_OFFSET_QUEUE(i));
179
180 if (i & 0x1)
181 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
182 else
183 tbl_dw = tbl_dw & 0x0000FFFF;
184
185 IWL_ERR(trans,
186 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
187 i, active ? "" : "in", fifo, tbl_dw,
188 iwl_read_prph(trans,
189 SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
190 iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
191 }
192
193 for (i = q->read_ptr; i != q->write_ptr;
194 i = iwl_queue_inc_wrap(i, q->n_bd)) {
195 struct iwl_tx_cmd *tx_cmd =
196 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
197 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
198 get_unaligned_le32(&tx_cmd->scratch));
199 }
200
201 iwl_op_mode_nic_error(trans->op_mode);
202}
203
204/*
205 * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
47 */ 206 */
48void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 207static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
49 struct iwl_tx_queue *txq, 208 struct iwl_txq *txq, u16 byte_cnt)
50 u16 byte_cnt)
51{ 209{
52 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 210 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
53 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 211 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -88,10 +246,36 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
88 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; 246 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
89} 247}
90 248
91/** 249static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
92 * iwl_txq_update_write_ptr - Send new write index to hardware 250 struct iwl_txq *txq)
251{
252 struct iwl_trans_pcie *trans_pcie =
253 IWL_TRANS_GET_PCIE_TRANS(trans);
254 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
255 int txq_id = txq->q.id;
256 int read_ptr = txq->q.read_ptr;
257 u8 sta_id = 0;
258 __le16 bc_ent;
259 struct iwl_tx_cmd *tx_cmd =
260 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
261
262 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
263
264 if (txq_id != trans_pcie->cmd_queue)
265 sta_id = tx_cmd->sta_id;
266
267 bc_ent = cpu_to_le16(1 | (sta_id << 12));
268 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
269
270 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
271 scd_bc_tbl[txq_id].
272 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
273}
274
275/*
276 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
93 */ 277 */
94void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) 278void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
95{ 279{
96 u32 reg = 0; 280 u32 reg = 0;
97 int txq_id = txq->q.id; 281 int txq_id = txq->q.id;
@@ -137,7 +321,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
137 txq->need_update = 0; 321 txq->need_update = 0;
138} 322}
139 323
140static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 324static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
141{ 325{
142 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 326 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
143 327
@@ -149,15 +333,15 @@ static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
149 return addr; 333 return addr;
150} 334}
151 335
152static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) 336static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
153{ 337{
154 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 338 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
155 339
156 return le16_to_cpu(tb->hi_n_len) >> 4; 340 return le16_to_cpu(tb->hi_n_len) >> 4;
157} 341}
158 342
159static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, 343static inline void iwl_pcie_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
160 dma_addr_t addr, u16 len) 344 dma_addr_t addr, u16 len)
161{ 345{
162 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 346 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
163 u16 hi_n_len = len << 4; 347 u16 hi_n_len = len << 4;
@@ -171,19 +355,20 @@ static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
171 tfd->num_tbs = idx + 1; 355 tfd->num_tbs = idx + 1;
172} 356}
173 357
174static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) 358static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
175{ 359{
176 return tfd->num_tbs & 0x1f; 360 return tfd->num_tbs & 0x1f;
177} 361}
178 362
179static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, 363static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
180 struct iwl_tfd *tfd, enum dma_data_direction dma_dir) 364 struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
365 enum dma_data_direction dma_dir)
181{ 366{
182 int i; 367 int i;
183 int num_tbs; 368 int num_tbs;
184 369
185 /* Sanity check on number of chunks */ 370 /* Sanity check on number of chunks */
186 num_tbs = iwl_tfd_get_num_tbs(tfd); 371 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
187 372
188 if (num_tbs >= IWL_NUM_OF_TBS) { 373 if (num_tbs >= IWL_NUM_OF_TBS) {
189 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 374 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
@@ -200,14 +385,14 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
200 385
201 /* Unmap chunks, if any. */ 386 /* Unmap chunks, if any. */
202 for (i = 1; i < num_tbs; i++) 387 for (i = 1; i < num_tbs; i++)
203 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), 388 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
204 iwl_tfd_tb_get_len(tfd, i), dma_dir); 389 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
205 390
206 tfd->num_tbs = 0; 391 tfd->num_tbs = 0;
207} 392}
208 393
209/** 394/*
210 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 395 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
211 * @trans - transport private data 396 * @trans - transport private data
212 * @txq - tx queue 397 * @txq - tx queue
213 * @dma_dir - the direction of the DMA mapping 398 * @dma_dir - the direction of the DMA mapping
@@ -215,8 +400,8 @@ static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
215 * Does NOT advance any TFD circular buffer read/write indexes 400 * Does NOT advance any TFD circular buffer read/write indexes
216 * Does NOT free the TFD itself (which is within circular buffer) 401 * Does NOT free the TFD itself (which is within circular buffer)
217 */ 402 */
218void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 403static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
219 enum dma_data_direction dma_dir) 404 enum dma_data_direction dma_dir)
220{ 405{
221 struct iwl_tfd *tfd_tmp = txq->tfds; 406 struct iwl_tfd *tfd_tmp = txq->tfds;
222 407
@@ -227,8 +412,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
227 lockdep_assert_held(&txq->lock); 412 lockdep_assert_held(&txq->lock);
228 413
229 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 414 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
230 iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], 415 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
231 dma_dir); 416 dma_dir);
232 417
233 /* free SKB */ 418 /* free SKB */
234 if (txq->entries) { 419 if (txq->entries) {
@@ -247,10 +432,8 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
247 } 432 }
248} 433}
249 434
250int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, 435static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
251 struct iwl_tx_queue *txq, 436 dma_addr_t addr, u16 len, u8 reset)
252 dma_addr_t addr, u16 len,
253 u8 reset)
254{ 437{
255 struct iwl_queue *q; 438 struct iwl_queue *q;
256 struct iwl_tfd *tfd, *tfd_tmp; 439 struct iwl_tfd *tfd, *tfd_tmp;
@@ -263,7 +446,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
263 if (reset) 446 if (reset)
264 memset(tfd, 0, sizeof(*tfd)); 447 memset(tfd, 0, sizeof(*tfd));
265 448
266 num_tbs = iwl_tfd_get_num_tbs(tfd); 449 num_tbs = iwl_pcie_tfd_get_num_tbs(tfd);
267 450
268 /* Each TFD can point to a maximum 20 Tx buffers */ 451 /* Each TFD can point to a maximum 20 Tx buffers */
269 if (num_tbs >= IWL_NUM_OF_TBS) { 452 if (num_tbs >= IWL_NUM_OF_TBS) {
@@ -279,108 +462,534 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
279 IWL_ERR(trans, "Unaligned address = %llx\n", 462 IWL_ERR(trans, "Unaligned address = %llx\n",
280 (unsigned long long)addr); 463 (unsigned long long)addr);
281 464
282 iwl_tfd_set_tb(tfd, num_tbs, addr, len); 465 iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
283 466
284 return 0; 467 return 0;
285} 468}
286 469
287/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 470static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
288 * DMA services 471 struct iwl_txq *txq, int slots_num,
289 * 472 u32 txq_id)
290 * Theory of operation 473{
291 * 474 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
292 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 475 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
293 * of buffer descriptors, each of which points to one or more data buffers for 476 int i;
294 * the device to read from or fill. Driver and device exchange status of each 477
295 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 478 if (WARN_ON(txq->entries || txq->tfds))
296 * entries in each circular buffer, to protect against confusing empty and full 479 return -EINVAL;
297 * queue states. 480
298 * 481 setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
299 * The device reads or writes the data in the queues via the device's several 482 (unsigned long)txq);
300 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 483 txq->trans_pcie = trans_pcie;
301 * 484
302 * For Tx queue, there are low mark and high mark limits. If, after queuing 485 txq->q.n_window = slots_num;
303 * the packet for Tx, free space become < low mark, Tx queue stopped. When 486
304 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 487 txq->entries = kcalloc(slots_num,
305 * Tx queue resumed. 488 sizeof(struct iwl_pcie_txq_entry),
489 GFP_KERNEL);
490
491 if (!txq->entries)
492 goto error;
493
494 if (txq_id == trans_pcie->cmd_queue)
495 for (i = 0; i < slots_num; i++) {
496 txq->entries[i].cmd =
497 kmalloc(sizeof(struct iwl_device_cmd),
498 GFP_KERNEL);
499 if (!txq->entries[i].cmd)
500 goto error;
501 }
502
503 /* Circular buffer of transmit frame descriptors (TFDs),
504 * shared with device */
505 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
506 &txq->q.dma_addr, GFP_KERNEL);
507 if (!txq->tfds) {
508 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
509 goto error;
510 }
511 txq->q.id = txq_id;
512
513 return 0;
514error:
515 if (txq->entries && txq_id == trans_pcie->cmd_queue)
516 for (i = 0; i < slots_num; i++)
517 kfree(txq->entries[i].cmd);
518 kfree(txq->entries);
519 txq->entries = NULL;
520
521 return -ENOMEM;
522
523}
524
525static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
526 int slots_num, u32 txq_id)
527{
528 int ret;
529
530 txq->need_update = 0;
531
532 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
533 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
534 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
535
536 /* Initialize queue's high/low-water marks, and head/tail indexes */
537 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
538 txq_id);
539 if (ret)
540 return ret;
541
542 spin_lock_init(&txq->lock);
543
544 /*
545 * Tell nic where to find circular buffer of Tx Frame Descriptors for
546 * given Tx queue, and enable the DMA channel used for that queue.
547 * Circular buffer (TFD queue in DRAM) physical base address */
548 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
549 txq->q.dma_addr >> 8);
550
551 return 0;
552}
553
554/*
555 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
556 */
557static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
558{
559 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
560 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
561 struct iwl_queue *q = &txq->q;
562 enum dma_data_direction dma_dir;
563
564 if (!q->n_bd)
565 return;
566
567 /* In the command queue, all the TBs are mapped as BIDI
568 * so unmap them as such.
569 */
570 if (txq_id == trans_pcie->cmd_queue)
571 dma_dir = DMA_BIDIRECTIONAL;
572 else
573 dma_dir = DMA_TO_DEVICE;
574
575 spin_lock_bh(&txq->lock);
576 while (q->write_ptr != q->read_ptr) {
577 iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
578 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
579 }
580 spin_unlock_bh(&txq->lock);
581}
582
583/*
584 * iwl_pcie_txq_free - Deallocate DMA queue.
585 * @txq: Transmit queue to deallocate.
306 * 586 *
307 ***************************************************/ 587 * Empty queue by removing and destroying all BD's.
588 * Free all buffers.
589 * 0-fill, but do not free "txq" descriptor structure.
590 */
591static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
592{
593 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
594 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
595 struct device *dev = trans->dev;
596 int i;
597
598 if (WARN_ON(!txq))
599 return;
600
601 iwl_pcie_txq_unmap(trans, txq_id);
308 602
309int iwl_queue_space(const struct iwl_queue *q) 603 /* De-alloc array of command/tx buffers */
604 if (txq_id == trans_pcie->cmd_queue)
605 for (i = 0; i < txq->q.n_window; i++) {
606 kfree(txq->entries[i].cmd);
607 kfree(txq->entries[i].copy_cmd);
608 kfree(txq->entries[i].free_buf);
609 }
610
611 /* De-alloc circular buffer of TFDs */
612 if (txq->q.n_bd) {
613 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
614 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
615 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
616 }
617
618 kfree(txq->entries);
619 txq->entries = NULL;
620
621 del_timer_sync(&txq->stuck_timer);
622
623 /* 0-fill queue descriptor structure */
624 memset(txq, 0, sizeof(*txq));
625}
626
627/*
628 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
629 */
630static void iwl_pcie_txq_set_sched(struct iwl_trans *trans, u32 mask)
310{ 631{
311 int s = q->read_ptr - q->write_ptr; 632 struct iwl_trans_pcie __maybe_unused *trans_pcie =
633 IWL_TRANS_GET_PCIE_TRANS(trans);
312 634
313 if (q->read_ptr > q->write_ptr) 635 iwl_write_prph(trans, SCD_TXFACT, mask);
314 s -= q->n_bd; 636}
315 637
316 if (s <= 0) 638void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
317 s += q->n_window; 639{
318 /* keep some reserve to not confuse empty and full situations */ 640 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
319 s -= 2; 641 u32 a;
320 if (s < 0) 642 int chan;
321 s = 0; 643 u32 reg_val;
322 return s; 644
645 /* make sure all queue are not stopped/used */
646 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
647 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
648
649 trans_pcie->scd_base_addr =
650 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
651
652 WARN_ON(scd_base_addr != 0 &&
653 scd_base_addr != trans_pcie->scd_base_addr);
654
655 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
656 /* reset conext data memory */
657 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
658 a += 4)
659 iwl_write_targ_mem(trans, a, 0);
660 /* reset tx status memory */
661 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
662 a += 4)
663 iwl_write_targ_mem(trans, a, 0);
664 for (; a < trans_pcie->scd_base_addr +
665 SCD_TRANS_TBL_OFFSET_QUEUE(
666 trans->cfg->base_params->num_of_queues);
667 a += 4)
668 iwl_write_targ_mem(trans, a, 0);
669
670 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
671 trans_pcie->scd_bc_tbls.dma >> 10);
672
673 /* The chain extension of the SCD doesn't work well. This feature is
674 * enabled by default by the HW, so we need to disable it manually.
675 */
676 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
677
678 iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
679 trans_pcie->cmd_fifo);
680
681 /* Activate all Tx DMA/FIFO channels */
682 iwl_pcie_txq_set_sched(trans, IWL_MASK(0, 7));
683
684 /* Enable DMA channel */
685 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
686 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
687 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
688 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
689
690 /* Update FH chicken bits */
691 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
692 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
693 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
694
695 /* Enable L1-Active */
696 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
697 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
323} 698}
324 699
325/** 700/*
326 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 701 * iwl_pcie_tx_stop - Stop all Tx DMA channels
327 */ 702 */
328int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) 703int iwl_pcie_tx_stop(struct iwl_trans *trans)
329{ 704{
330 q->n_bd = count; 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
331 q->n_window = slots_num; 706 int ch, txq_id, ret;
332 q->id = id; 707 unsigned long flags;
333 708
334 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap 709 /* Turn off all Tx DMA fifos */
335 * and iwl_queue_dec_wrap are broken. */ 710 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
336 if (WARN_ON(!is_power_of_2(count)))
337 return -EINVAL;
338 711
339 /* slots_num must be power-of-two size, otherwise 712 iwl_pcie_txq_set_sched(trans, 0);
340 * get_cmd_index is broken. */
341 if (WARN_ON(!is_power_of_2(slots_num)))
342 return -EINVAL;
343 713
344 q->low_mark = q->n_window / 4; 714 /* Stop each Tx DMA channel, and wait for it to be idle */
345 if (q->low_mark < 4) 715 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
346 q->low_mark = 4; 716 iwl_write_direct32(trans,
717 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
718 ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
719 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
720 if (ret < 0)
721 IWL_ERR(trans,
722 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
723 ch,
724 iwl_read_direct32(trans,
725 FH_TSSR_TX_STATUS_REG));
726 }
727 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
347 728
348 q->high_mark = q->n_window / 8; 729 if (!trans_pcie->txq) {
349 if (q->high_mark < 2) 730 IWL_WARN(trans,
350 q->high_mark = 2; 731 "Stopping tx queues that aren't allocated...\n");
732 return 0;
733 }
351 734
352 q->write_ptr = q->read_ptr = 0; 735 /* Unmap DMA from host system and free skb's */
736 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
737 txq_id++)
738 iwl_pcie_txq_unmap(trans, txq_id);
353 739
354 return 0; 740 return 0;
355} 741}
356 742
357static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, 743/*
358 struct iwl_tx_queue *txq) 744 * iwl_trans_tx_free - Free TXQ Context
745 *
746 * Destroy all TX DMA queues and structures
747 */
748void iwl_pcie_tx_free(struct iwl_trans *trans)
359{ 749{
360 struct iwl_trans_pcie *trans_pcie = 750 int txq_id;
361 IWL_TRANS_GET_PCIE_TRANS(trans); 751 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
362 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
363 int txq_id = txq->q.id;
364 int read_ptr = txq->q.read_ptr;
365 u8 sta_id = 0;
366 __le16 bc_ent;
367 struct iwl_tx_cmd *tx_cmd =
368 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
369 752
370 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 753 /* Tx queues */
754 if (trans_pcie->txq) {
755 for (txq_id = 0;
756 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
757 iwl_pcie_txq_free(trans, txq_id);
758 }
371 759
372 if (txq_id != trans_pcie->cmd_queue) 760 kfree(trans_pcie->txq);
373 sta_id = tx_cmd->sta_id; 761 trans_pcie->txq = NULL;
374 762
375 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 763 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
376 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
377 764
378 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 765 iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
379 scd_bc_tbl[txq_id]. 766}
380 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 767
768/*
769 * iwl_pcie_tx_alloc - allocate TX context
770 * Allocate all Tx DMA structures and initialize them
771 */
772static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
773{
774 int ret;
775 int txq_id, slots_num;
776 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
777
778 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
779 sizeof(struct iwlagn_scd_bc_tbl);
780
781 /*It is not allowed to alloc twice, so warn when this happens.
782 * We cannot rely on the previous allocation, so free and fail */
783 if (WARN_ON(trans_pcie->txq)) {
784 ret = -EINVAL;
785 goto error;
786 }
787
788 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
789 scd_bc_tbls_size);
790 if (ret) {
791 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
792 goto error;
793 }
794
795 /* Alloc keep-warm buffer */
796 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
797 if (ret) {
798 IWL_ERR(trans, "Keep Warm allocation failed\n");
799 goto error;
800 }
801
802 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
803 sizeof(struct iwl_txq), GFP_KERNEL);
804 if (!trans_pcie->txq) {
805 IWL_ERR(trans, "Not enough memory for txq\n");
806 ret = ENOMEM;
807 goto error;
808 }
809
810 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
811 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
812 txq_id++) {
813 slots_num = (txq_id == trans_pcie->cmd_queue) ?
814 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
815 ret = iwl_pcie_txq_alloc(trans, &trans_pcie->txq[txq_id],
816 slots_num, txq_id);
817 if (ret) {
818 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
819 goto error;
820 }
821 }
822
823 return 0;
824
825error:
826 iwl_pcie_tx_free(trans);
827
828 return ret;
829}
830int iwl_pcie_tx_init(struct iwl_trans *trans)
831{
832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
833 int ret;
834 int txq_id, slots_num;
835 unsigned long flags;
836 bool alloc = false;
837
838 if (!trans_pcie->txq) {
839 ret = iwl_pcie_tx_alloc(trans);
840 if (ret)
841 goto error;
842 alloc = true;
843 }
844
845 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
846
847 /* Turn off all Tx DMA fifos */
848 iwl_write_prph(trans, SCD_TXFACT, 0);
849
850 /* Tell NIC where to find the "keep warm" buffer */
851 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
852 trans_pcie->kw.dma >> 4);
853
854 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
855
856 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
857 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
858 txq_id++) {
859 slots_num = (txq_id == trans_pcie->cmd_queue) ?
860 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
861 ret = iwl_pcie_txq_init(trans, &trans_pcie->txq[txq_id],
862 slots_num, txq_id);
863 if (ret) {
864 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
865 goto error;
866 }
867 }
868
869 return 0;
870error:
871 /*Upon error, free only if we allocated something */
872 if (alloc)
873 iwl_pcie_tx_free(trans);
874 return ret;
875}
876
877static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
878 struct iwl_txq *txq)
879{
880 if (!trans_pcie->wd_timeout)
881 return;
882
883 /*
884 * if empty delete timer, otherwise move timer forward
885 * since we're making progress on this queue
886 */
887 if (txq->q.read_ptr == txq->q.write_ptr)
888 del_timer(&txq->stuck_timer);
889 else
890 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
891}
892
893/* Frees buffers until index _not_ inclusive */
894void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
895 struct sk_buff_head *skbs)
896{
897 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
898 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
899 /* n_bd is usually 256 => n_bd - 1 = 0xff */
900 int tfd_num = ssn & (txq->q.n_bd - 1);
901 struct iwl_queue *q = &txq->q;
902 int last_to_free;
903
904 /* This function is not meant to release cmd queue*/
905 if (WARN_ON(txq_id == trans_pcie->cmd_queue))
906 return;
907
908 spin_lock(&txq->lock);
909
910 if (txq->q.read_ptr == tfd_num)
911 goto out;
912
913 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
914 txq_id, txq->q.read_ptr, tfd_num, ssn);
915
916 /*Since we free until index _not_ inclusive, the one before index is
917 * the last we will free. This one must be used */
918 last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
919
920 if (!iwl_queue_used(q, last_to_free)) {
921 IWL_ERR(trans,
922 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
923 __func__, txq_id, last_to_free, q->n_bd,
924 q->write_ptr, q->read_ptr);
925 goto out;
926 }
927
928 if (WARN_ON(!skb_queue_empty(skbs)))
929 goto out;
930
931 for (;
932 q->read_ptr != tfd_num;
933 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
934
935 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
936 continue;
937
938 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
939
940 txq->entries[txq->q.read_ptr].skb = NULL;
941
942 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
943
944 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
945 }
946
947 iwl_pcie_txq_progress(trans_pcie, txq);
948
949 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
950 iwl_wake_queue(trans, txq);
951out:
952 spin_unlock(&txq->lock);
381} 953}
382 954
383static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 955/*
956 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
957 *
958 * When FW advances 'R' index, all entries between old and new 'R' index
959 * need to be reclaimed. As result, some free space forms. If there is
960 * enough free space (> low mark), wake the stack that feeds us.
961 */
962static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
963{
964 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
965 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
966 struct iwl_queue *q = &txq->q;
967 int nfreed = 0;
968
969 lockdep_assert_held(&txq->lock);
970
971 if ((idx >= q->n_bd) || (!iwl_queue_used(q, idx))) {
972 IWL_ERR(trans,
973 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
974 __func__, txq_id, idx, q->n_bd,
975 q->write_ptr, q->read_ptr);
976 return;
977 }
978
979 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
980 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
981
982 if (nfreed++ > 0) {
983 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
984 idx, q->write_ptr, q->read_ptr);
985 iwl_op_mode_nic_error(trans->op_mode);
986 }
987 }
988
989 iwl_pcie_txq_progress(trans_pcie, txq);
990}
991
992static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
384 u16 txq_id) 993 u16 txq_id)
385{ 994{
386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 995 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -405,7 +1014,8 @@ static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
405 return 0; 1014 return 0;
406} 1015}
407 1016
408static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) 1017static inline void iwl_pcie_txq_set_inactive(struct iwl_trans *trans,
1018 u16 txq_id)
409{ 1019{
410 /* Simply stop the queue, but don't change any configuration; 1020 /* Simply stop the queue, but don't change any configuration;
411 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 1021 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -424,7 +1034,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
424 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1034 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
425 1035
426 /* Stop this Tx queue before configuring it */ 1036 /* Stop this Tx queue before configuring it */
427 iwl_txq_set_inactive(trans, txq_id); 1037 iwl_pcie_txq_set_inactive(trans, txq_id);
428 1038
429 /* Set this queue as a chain-building queue unless it is CMD queue */ 1039 /* Set this queue as a chain-building queue unless it is CMD queue */
430 if (txq_id != trans_pcie->cmd_queue) 1040 if (txq_id != trans_pcie->cmd_queue)
@@ -435,7 +1045,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
435 u16 ra_tid = BUILD_RAxTID(sta_id, tid); 1045 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
436 1046
437 /* Map receiver-address / traffic-ID to this queue */ 1047 /* Map receiver-address / traffic-ID to this queue */
438 iwl_txq_set_ratid_map(trans, ra_tid, txq_id); 1048 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
439 1049
440 /* enable aggregations for the queue */ 1050 /* enable aggregations for the queue */
441 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); 1051 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
@@ -480,20 +1090,29 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
480void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) 1090void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
481{ 1091{
482 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1092 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1093 u32 stts_addr = trans_pcie->scd_base_addr +
1094 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1095 static const u32 zero_val[4] = {};
483 1096
484 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { 1097 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
485 WARN_ONCE(1, "queue %d not used", txq_id); 1098 WARN_ONCE(1, "queue %d not used", txq_id);
486 return; 1099 return;
487 } 1100 }
488 1101
489 iwl_txq_set_inactive(trans, txq_id); 1102 iwl_pcie_txq_set_inactive(trans, txq_id);
1103
1104 _iwl_write_targ_mem_dwords(trans, stts_addr,
1105 zero_val, ARRAY_SIZE(zero_val));
1106
1107 iwl_pcie_txq_unmap(trans, txq_id);
1108
490 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1109 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
491} 1110}
492 1111
493/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1112/*************** HOST COMMAND QUEUE FUNCTIONS *****/
494 1113
495/** 1114/*
496 * iwl_enqueue_hcmd - enqueue a uCode command 1115 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
497 * @priv: device private data point 1116 * @priv: device private data point
498 * @cmd: a point to the ucode command structure 1117 * @cmd: a point to the ucode command structure
499 * 1118 *
@@ -501,15 +1120,17 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
501 * failed. On success, it turns the index (> 0) of command in the 1120 * failed. On success, it turns the index (> 0) of command in the
502 * command queue. 1121 * command queue.
503 */ 1122 */
504static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1123static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1124 struct iwl_host_cmd *cmd)
505{ 1125{
506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1126 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
507 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1127 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
508 struct iwl_queue *q = &txq->q; 1128 struct iwl_queue *q = &txq->q;
509 struct iwl_device_cmd *out_cmd; 1129 struct iwl_device_cmd *out_cmd;
510 struct iwl_cmd_meta *out_meta; 1130 struct iwl_cmd_meta *out_meta;
1131 void *dup_buf = NULL;
511 dma_addr_t phys_addr; 1132 dma_addr_t phys_addr;
512 u32 idx; 1133 int idx;
513 u16 copy_size, cmd_size; 1134 u16 copy_size, cmd_size;
514 bool had_nocopy = false; 1135 bool had_nocopy = false;
515 int i; 1136 int i;
@@ -526,10 +1147,33 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
526 continue; 1147 continue;
527 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1148 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
528 had_nocopy = true; 1149 had_nocopy = true;
1150 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1151 idx = -EINVAL;
1152 goto free_dup_buf;
1153 }
1154 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1155 /*
1156 * This is also a chunk that isn't copied
1157 * to the static buffer so set had_nocopy.
1158 */
1159 had_nocopy = true;
1160
1161 /* only allowed once */
1162 if (WARN_ON(dup_buf)) {
1163 idx = -EINVAL;
1164 goto free_dup_buf;
1165 }
1166
1167 dup_buf = kmemdup(cmd->data[i], cmd->len[i],
1168 GFP_ATOMIC);
1169 if (!dup_buf)
1170 return -ENOMEM;
529 } else { 1171 } else {
530 /* NOCOPY must not be followed by normal! */ 1172 /* NOCOPY must not be followed by normal! */
531 if (WARN_ON(had_nocopy)) 1173 if (WARN_ON(had_nocopy)) {
532 return -EINVAL; 1174 idx = -EINVAL;
1175 goto free_dup_buf;
1176 }
533 copy_size += cmd->len[i]; 1177 copy_size += cmd->len[i];
534 } 1178 }
535 cmd_size += cmd->len[i]; 1179 cmd_size += cmd->len[i];
@@ -541,8 +1185,12 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
541 * allocated into separate TFDs, then we will need to 1185 * allocated into separate TFDs, then we will need to
542 * increase the size of the buffers. 1186 * increase the size of the buffers.
543 */ 1187 */
544 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) 1188 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
545 return -EINVAL; 1189 "Command %s (%#x) is too large (%d bytes)\n",
1190 get_cmd_string(trans_pcie, cmd->id), cmd->id, copy_size)) {
1191 idx = -EINVAL;
1192 goto free_dup_buf;
1193 }
546 1194
547 spin_lock_bh(&txq->lock); 1195 spin_lock_bh(&txq->lock);
548 1196
@@ -551,7 +1199,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
551 1199
552 IWL_ERR(trans, "No space in command queue\n"); 1200 IWL_ERR(trans, "No space in command queue\n");
553 iwl_op_mode_cmd_queue_full(trans->op_mode); 1201 iwl_op_mode_cmd_queue_full(trans->op_mode);
554 return -ENOSPC; 1202 idx = -ENOSPC;
1203 goto free_dup_buf;
555 } 1204 }
556 1205
557 idx = get_cmd_index(q, q->write_ptr); 1206 idx = get_cmd_index(q, q->write_ptr);
@@ -575,7 +1224,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
575 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1224 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
576 if (!cmd->len[i]) 1225 if (!cmd->len[i])
577 continue; 1226 continue;
578 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) 1227 if (cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1228 IWL_HCMD_DFL_DUP))
579 break; 1229 break;
580 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]); 1230 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], cmd->len[i]);
581 cmd_pos += cmd->len[i]; 1231 cmd_pos += cmd->len[i];
@@ -600,7 +1250,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
600 1250
601 IWL_DEBUG_HC(trans, 1251 IWL_DEBUG_HC(trans,
602 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1252 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
603 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 1253 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
604 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1254 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
605 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1255 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
606 1256
@@ -614,28 +1264,35 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
614 dma_unmap_addr_set(out_meta, mapping, phys_addr); 1264 dma_unmap_addr_set(out_meta, mapping, phys_addr);
615 dma_unmap_len_set(out_meta, len, copy_size); 1265 dma_unmap_len_set(out_meta, len, copy_size);
616 1266
617 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1); 1267 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
618 1268
619 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1269 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
1270 const void *data = cmd->data[i];
1271
620 if (!cmd->len[i]) 1272 if (!cmd->len[i])
621 continue; 1273 continue;
622 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) 1274 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1275 IWL_HCMD_DFL_DUP)))
623 continue; 1276 continue;
624 phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i], 1277 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1278 data = dup_buf;
1279 phys_addr = dma_map_single(trans->dev, (void *)data,
625 cmd->len[i], DMA_BIDIRECTIONAL); 1280 cmd->len[i], DMA_BIDIRECTIONAL);
626 if (dma_mapping_error(trans->dev, phys_addr)) { 1281 if (dma_mapping_error(trans->dev, phys_addr)) {
627 iwl_unmap_tfd(trans, out_meta, 1282 iwl_pcie_tfd_unmap(trans, out_meta,
628 &txq->tfds[q->write_ptr], 1283 &txq->tfds[q->write_ptr],
629 DMA_BIDIRECTIONAL); 1284 DMA_BIDIRECTIONAL);
630 idx = -ENOMEM; 1285 idx = -ENOMEM;
631 goto out; 1286 goto out;
632 } 1287 }
633 1288
634 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, 1289 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmd->len[i], 0);
635 cmd->len[i], 0);
636 } 1290 }
637 1291
638 out_meta->flags = cmd->flags; 1292 out_meta->flags = cmd->flags;
1293 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1294 kfree(txq->entries[idx].free_buf);
1295 txq->entries[idx].free_buf = dup_buf;
639 1296
640 txq->need_update = 1; 1297 txq->need_update = 1;
641 1298
@@ -648,70 +1305,18 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
648 1305
649 /* Increment and update queue's write index */ 1306 /* Increment and update queue's write index */
650 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1307 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
651 iwl_txq_update_write_ptr(trans, txq); 1308 iwl_pcie_txq_inc_wr_ptr(trans, txq);
652 1309
653 out: 1310 out:
654 spin_unlock_bh(&txq->lock); 1311 spin_unlock_bh(&txq->lock);
1312 free_dup_buf:
1313 if (idx < 0)
1314 kfree(dup_buf);
655 return idx; 1315 return idx;
656} 1316}
657 1317
658static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie, 1318/*
659 struct iwl_tx_queue *txq) 1319 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
660{
661 if (!trans_pcie->wd_timeout)
662 return;
663
664 /*
665 * if empty delete timer, otherwise move timer forward
666 * since we're making progress on this queue
667 */
668 if (txq->q.read_ptr == txq->q.write_ptr)
669 del_timer(&txq->stuck_timer);
670 else
671 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
672}
673
674/**
675 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
676 *
677 * When FW advances 'R' index, all entries between old and new 'R' index
678 * need to be reclaimed. As result, some free space forms. If there is
679 * enough free space (> low mark), wake the stack that feeds us.
680 */
681static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
682 int idx)
683{
684 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
685 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
686 struct iwl_queue *q = &txq->q;
687 int nfreed = 0;
688
689 lockdep_assert_held(&txq->lock);
690
691 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
692 IWL_ERR(trans,
693 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
694 __func__, txq_id, idx, q->n_bd,
695 q->write_ptr, q->read_ptr);
696 return;
697 }
698
699 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
700 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
701
702 if (nfreed++ > 0) {
703 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
704 idx, q->write_ptr, q->read_ptr);
705 iwl_op_mode_nic_error(trans->op_mode);
706 }
707
708 }
709
710 iwl_queue_progress(trans_pcie, txq);
711}
712
713/**
714 * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
715 * @rxb: Rx buffer to reclaim 1320 * @rxb: Rx buffer to reclaim
716 * @handler_status: return value of the handler of the command 1321 * @handler_status: return value of the handler of the command
717 * (put in setup_rx_handlers) 1322 * (put in setup_rx_handlers)
@@ -720,8 +1325,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
720 * will be executed. The attached skb (if present) will only be freed 1325 * will be executed. The attached skb (if present) will only be freed
721 * if the callback returns 1 1326 * if the callback returns 1
722 */ 1327 */
723void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb, 1328void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
724 int handler_status) 1329 struct iwl_rx_cmd_buffer *rxb, int handler_status)
725{ 1330{
726 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1331 struct iwl_rx_packet *pkt = rxb_addr(rxb);
727 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1332 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -731,7 +1336,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
731 struct iwl_device_cmd *cmd; 1336 struct iwl_device_cmd *cmd;
732 struct iwl_cmd_meta *meta; 1337 struct iwl_cmd_meta *meta;
733 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1338 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
734 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1339 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
735 1340
736 /* If a Tx command is being handled and it isn't in the actual 1341 /* If a Tx command is being handled and it isn't in the actual
737 * command queue then there a command routing bug has been introduced 1342 * command queue then there a command routing bug has been introduced
@@ -751,7 +1356,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
751 cmd = txq->entries[cmd_index].cmd; 1356 cmd = txq->entries[cmd_index].cmd;
752 meta = &txq->entries[cmd_index].meta; 1357 meta = &txq->entries[cmd_index].meta;
753 1358
754 iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 1359 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
755 1360
756 /* Input error checking is done when commands are added to queue. */ 1361 /* Input error checking is done when commands are added to queue. */
757 if (meta->flags & CMD_WANT_SKB) { 1362 if (meta->flags & CMD_WANT_SKB) {
@@ -763,20 +1368,18 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
763 meta->source->handler_status = handler_status; 1368 meta->source->handler_status = handler_status;
764 } 1369 }
765 1370
766 iwl_hcmd_queue_reclaim(trans, txq_id, index); 1371 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
767 1372
768 if (!(meta->flags & CMD_ASYNC)) { 1373 if (!(meta->flags & CMD_ASYNC)) {
769 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { 1374 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
770 IWL_WARN(trans, 1375 IWL_WARN(trans,
771 "HCMD_ACTIVE already clear for command %s\n", 1376 "HCMD_ACTIVE already clear for command %s\n",
772 trans_pcie_get_cmd_string(trans_pcie, 1377 get_cmd_string(trans_pcie, cmd->hdr.cmd));
773 cmd->hdr.cmd));
774 } 1378 }
775 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 1379 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
776 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1380 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
777 trans_pcie_get_cmd_string(trans_pcie, 1381 get_cmd_string(trans_pcie, cmd->hdr.cmd));
778 cmd->hdr.cmd)); 1382 wake_up(&trans_pcie->wait_command_queue);
779 wake_up(&trans->wait_command_queue);
780 } 1383 }
781 1384
782 meta->flags = 0; 1385 meta->flags = 0;
@@ -786,7 +1389,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
786 1389
787#define HOST_COMPLETE_TIMEOUT (2 * HZ) 1390#define HOST_COMPLETE_TIMEOUT (2 * HZ)
788 1391
789static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1392static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1393 struct iwl_host_cmd *cmd)
790{ 1394{
791 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1395 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
792 int ret; 1396 int ret;
@@ -795,59 +1399,59 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
795 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1399 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
796 return -EINVAL; 1400 return -EINVAL;
797 1401
798 1402 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
799 ret = iwl_enqueue_hcmd(trans, cmd);
800 if (ret < 0) { 1403 if (ret < 0) {
801 IWL_ERR(trans, 1404 IWL_ERR(trans,
802 "Error sending %s: enqueue_hcmd failed: %d\n", 1405 "Error sending %s: enqueue_hcmd failed: %d\n",
803 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); 1406 get_cmd_string(trans_pcie, cmd->id), ret);
804 return ret; 1407 return ret;
805 } 1408 }
806 return 0; 1409 return 0;
807} 1410}
808 1411
809static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1412static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1413 struct iwl_host_cmd *cmd)
810{ 1414{
811 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1415 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
812 int cmd_idx; 1416 int cmd_idx;
813 int ret; 1417 int ret;
814 1418
815 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1419 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
816 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 1420 get_cmd_string(trans_pcie, cmd->id));
817 1421
818 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, 1422 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
819 &trans_pcie->status))) { 1423 &trans_pcie->status))) {
820 IWL_ERR(trans, "Command %s: a command is already active!\n", 1424 IWL_ERR(trans, "Command %s: a command is already active!\n",
821 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 1425 get_cmd_string(trans_pcie, cmd->id));
822 return -EIO; 1426 return -EIO;
823 } 1427 }
824 1428
825 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1429 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
826 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 1430 get_cmd_string(trans_pcie, cmd->id));
827 1431
828 cmd_idx = iwl_enqueue_hcmd(trans, cmd); 1432 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
829 if (cmd_idx < 0) { 1433 if (cmd_idx < 0) {
830 ret = cmd_idx; 1434 ret = cmd_idx;
831 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 1435 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
832 IWL_ERR(trans, 1436 IWL_ERR(trans,
833 "Error sending %s: enqueue_hcmd failed: %d\n", 1437 "Error sending %s: enqueue_hcmd failed: %d\n",
834 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret); 1438 get_cmd_string(trans_pcie, cmd->id), ret);
835 return ret; 1439 return ret;
836 } 1440 }
837 1441
838 ret = wait_event_timeout(trans->wait_command_queue, 1442 ret = wait_event_timeout(trans_pcie->wait_command_queue,
839 !test_bit(STATUS_HCMD_ACTIVE, 1443 !test_bit(STATUS_HCMD_ACTIVE,
840 &trans_pcie->status), 1444 &trans_pcie->status),
841 HOST_COMPLETE_TIMEOUT); 1445 HOST_COMPLETE_TIMEOUT);
842 if (!ret) { 1446 if (!ret) {
843 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) { 1447 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
844 struct iwl_tx_queue *txq = 1448 struct iwl_txq *txq =
845 &trans_pcie->txq[trans_pcie->cmd_queue]; 1449 &trans_pcie->txq[trans_pcie->cmd_queue];
846 struct iwl_queue *q = &txq->q; 1450 struct iwl_queue *q = &txq->q;
847 1451
848 IWL_ERR(trans, 1452 IWL_ERR(trans,
849 "Error sending %s: time out after %dms.\n", 1453 "Error sending %s: time out after %dms.\n",
850 trans_pcie_get_cmd_string(trans_pcie, cmd->id), 1454 get_cmd_string(trans_pcie, cmd->id),
851 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1455 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
852 1456
853 IWL_ERR(trans, 1457 IWL_ERR(trans,
@@ -857,16 +1461,28 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
857 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status); 1461 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
858 IWL_DEBUG_INFO(trans, 1462 IWL_DEBUG_INFO(trans,
859 "Clearing HCMD_ACTIVE for command %s\n", 1463 "Clearing HCMD_ACTIVE for command %s\n",
860 trans_pcie_get_cmd_string(trans_pcie, 1464 get_cmd_string(trans_pcie, cmd->id));
861 cmd->id));
862 ret = -ETIMEDOUT; 1465 ret = -ETIMEDOUT;
863 goto cancel; 1466 goto cancel;
864 } 1467 }
865 } 1468 }
866 1469
1470 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status)) {
1471 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1472 get_cmd_string(trans_pcie, cmd->id));
1473 ret = -EIO;
1474 goto cancel;
1475 }
1476
1477 if (test_bit(STATUS_RFKILL, &trans_pcie->status)) {
1478 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1479 ret = -ERFKILL;
1480 goto cancel;
1481 }
1482
867 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1483 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
868 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1484 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
869 trans_pcie_get_cmd_string(trans_pcie, cmd->id)); 1485 get_cmd_string(trans_pcie, cmd->id));
870 ret = -EIO; 1486 ret = -EIO;
871 goto cancel; 1487 goto cancel;
872 } 1488 }
@@ -893,64 +1509,183 @@ cancel:
893 return ret; 1509 return ret;
894} 1510}
895 1511
896int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1512int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
897{ 1513{
1514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1515
1516 if (test_bit(STATUS_FW_ERROR, &trans_pcie->status))
1517 return -EIO;
1518
1519 if (test_bit(STATUS_RFKILL, &trans_pcie->status))
1520 return -ERFKILL;
1521
898 if (cmd->flags & CMD_ASYNC) 1522 if (cmd->flags & CMD_ASYNC)
899 return iwl_send_cmd_async(trans, cmd); 1523 return iwl_pcie_send_hcmd_async(trans, cmd);
900 1524
901 return iwl_send_cmd_sync(trans, cmd); 1525 /* We still can fail on RFKILL that can be asserted while we wait */
1526 return iwl_pcie_send_hcmd_sync(trans, cmd);
902} 1527}
903 1528
904/* Frees buffers until index _not_ inclusive */ 1529int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
905int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 1530 struct iwl_device_cmd *dev_cmd, int txq_id)
906 struct sk_buff_head *skbs)
907{ 1531{
908 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1532 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
909 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 1533 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
910 struct iwl_queue *q = &txq->q; 1534 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
911 int last_to_free; 1535 struct iwl_cmd_meta *out_meta;
912 int freed = 0; 1536 struct iwl_txq *txq;
1537 struct iwl_queue *q;
1538 dma_addr_t phys_addr = 0;
1539 dma_addr_t txcmd_phys;
1540 dma_addr_t scratch_phys;
1541 u16 len, firstlen, secondlen;
1542 u8 wait_write_ptr = 0;
1543 __le16 fc = hdr->frame_control;
1544 u8 hdr_len = ieee80211_hdrlen(fc);
1545 u16 __maybe_unused wifi_seq;
1546
1547 txq = &trans_pcie->txq[txq_id];
1548 q = &txq->q;
913 1549
914 /* This function is not meant to release cmd queue*/ 1550 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
915 if (WARN_ON(txq_id == trans_pcie->cmd_queue)) 1551 WARN_ON_ONCE(1);
916 return 0; 1552 return -EINVAL;
1553 }
917 1554
918 lockdep_assert_held(&txq->lock); 1555 spin_lock(&txq->lock);
919 1556
920 /*Since we free until index _not_ inclusive, the one before index is 1557 /* In AGG mode, the index in the ring must correspond to the WiFi
921 * the last we will free. This one must be used */ 1558 * sequence number. This is a HW requirements to help the SCD to parse
922 last_to_free = iwl_queue_dec_wrap(index, q->n_bd); 1559 * the BA.
1560 * Check here that the packets are in the right place on the ring.
1561 */
1562#ifdef CONFIG_IWLWIFI_DEBUG
1563 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1564 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1565 ((wifi_seq & 0xff) != q->write_ptr),
1566 "Q: %d WiFi Seq %d tfdNum %d",
1567 txq_id, wifi_seq, q->write_ptr);
1568#endif
1569
1570 /* Set up driver data for this TFD */
1571 txq->entries[q->write_ptr].skb = skb;
1572 txq->entries[q->write_ptr].cmd = dev_cmd;
1573
1574 dev_cmd->hdr.cmd = REPLY_TX;
1575 dev_cmd->hdr.sequence =
1576 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1577 INDEX_TO_SEQ(q->write_ptr)));
1578
1579 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1580 out_meta = &txq->entries[q->write_ptr].meta;
923 1581
924 if ((index >= q->n_bd) || 1582 /*
925 (iwl_queue_used(q, last_to_free) == 0)) { 1583 * Use the first empty entry in this queue's command buffer array
926 IWL_ERR(trans, 1584 * to contain the Tx command and MAC header concatenated together
927 "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 1585 * (payload data will be in another buffer).
928 __func__, txq_id, last_to_free, q->n_bd, 1586 * Size of this varies, due to varying MAC header length.
929 q->write_ptr, q->read_ptr); 1587 * If end is not dword aligned, we'll have 2 extra bytes at the end
930 return 0; 1588 * of the MAC header (device reads on dword boundaries).
1589 * We'll tell device about this padding later.
1590 */
1591 len = sizeof(struct iwl_tx_cmd) +
1592 sizeof(struct iwl_cmd_header) + hdr_len;
1593 firstlen = (len + 3) & ~3;
1594
1595 /* Tell NIC about any 2-byte padding after MAC header */
1596 if (firstlen != len)
1597 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1598
1599 /* Physical address of this Tx command's header (not MAC header!),
1600 * within command buffer array. */
1601 txcmd_phys = dma_map_single(trans->dev,
1602 &dev_cmd->hdr, firstlen,
1603 DMA_BIDIRECTIONAL);
1604 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1605 goto out_err;
1606 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1607 dma_unmap_len_set(out_meta, len, firstlen);
1608
1609 if (!ieee80211_has_morefrags(fc)) {
1610 txq->need_update = 1;
1611 } else {
1612 wait_write_ptr = 1;
1613 txq->need_update = 0;
931 } 1614 }
932 1615
933 if (WARN_ON(!skb_queue_empty(skbs))) 1616 /* Set up TFD's 2nd entry to point directly to remainder of skb,
934 return 0; 1617 * if any (802.11 null frames have no payload). */
1618 secondlen = skb->len - hdr_len;
1619 if (secondlen > 0) {
1620 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1621 secondlen, DMA_TO_DEVICE);
1622 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1623 dma_unmap_single(trans->dev,
1624 dma_unmap_addr(out_meta, mapping),
1625 dma_unmap_len(out_meta, len),
1626 DMA_BIDIRECTIONAL);
1627 goto out_err;
1628 }
1629 }
935 1630
936 for (; 1631 /* Attach buffers to TFD */
937 q->read_ptr != index; 1632 iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
938 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1633 if (secondlen > 0)
1634 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
939 1635
940 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) 1636 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
941 continue; 1637 offsetof(struct iwl_tx_cmd, scratch);
942 1638
943 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb); 1639 /* take back ownership of DMA buffer to enable update */
1640 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1641 DMA_BIDIRECTIONAL);
1642 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1643 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
944 1644
945 txq->entries[txq->q.read_ptr].skb = NULL; 1645 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1646 le16_to_cpu(dev_cmd->hdr.sequence));
1647 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
946 1648
947 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 1649 /* Set up entry for this TFD in Tx byte-count array */
1650 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
948 1651
949 iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 1652 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
950 freed++; 1653 DMA_BIDIRECTIONAL);
951 } 1654
1655 trace_iwlwifi_dev_tx(trans->dev, skb,
1656 &txq->tfds[txq->q.write_ptr],
1657 sizeof(struct iwl_tfd),
1658 &dev_cmd->hdr, firstlen,
1659 skb->data + hdr_len, secondlen);
1660 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1661 skb->data + hdr_len, secondlen);
952 1662
953 iwl_queue_progress(trans_pcie, txq); 1663 /* start timer if queue currently empty */
1664 if (txq->need_update && q->read_ptr == q->write_ptr &&
1665 trans_pcie->wd_timeout)
1666 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1667
1668 /* Tell device the write index *just past* this latest filled TFD */
1669 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1670 iwl_pcie_txq_inc_wr_ptr(trans, txq);
954 1671
955 return freed; 1672 /*
1673 * At this point the frame is "transmitted" successfully
1674 * and we will get a TX status notification eventually,
1675 * regardless of the value of ret. "ret" only indicates
1676 * whether or not we should update the write pointer.
1677 */
1678 if (iwl_queue_space(q) < q->high_mark) {
1679 if (wait_write_ptr) {
1680 txq->need_update = 1;
1681 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1682 } else {
1683 iwl_stop_queue(trans, txq);
1684 }
1685 }
1686 spin_unlock(&txq->lock);
1687 return 0;
1688out_err:
1689 spin_unlock(&txq->lock);
1690 return -1;
956} 1691}
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 1c10b542ab23..ec6d5d6b452e 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -298,6 +298,7 @@ static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss)
298 const u8 *rates_eid, *ext_rates_eid; 298 const u8 *rates_eid, *ext_rates_eid;
299 int n = 0; 299 int n = 0;
300 300
301 rcu_read_lock();
301 rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); 302 rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
302 ext_rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_EXT_SUPP_RATES); 303 ext_rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_EXT_SUPP_RATES);
303 304
@@ -325,6 +326,7 @@ static int lbs_add_common_rates_tlv(u8 *tlv, struct cfg80211_bss *bss)
325 *tlv++ = 0x96; 326 *tlv++ = 0x96;
326 n = 4; 327 n = 4;
327 } 328 }
329 rcu_read_unlock();
328 330
329 rate_tlv->header.len = cpu_to_le16(n); 331 rate_tlv->header.len = cpu_to_le16(n);
330 return sizeof(rate_tlv->header) + n; 332 return sizeof(rate_tlv->header) + n;
@@ -436,19 +438,19 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
436 */ 438 */
437 439
438static int lbs_cfg_set_monitor_channel(struct wiphy *wiphy, 440static int lbs_cfg_set_monitor_channel(struct wiphy *wiphy,
439 struct ieee80211_channel *channel, 441 struct cfg80211_chan_def *chandef)
440 enum nl80211_channel_type channel_type)
441{ 442{
442 struct lbs_private *priv = wiphy_priv(wiphy); 443 struct lbs_private *priv = wiphy_priv(wiphy);
443 int ret = -ENOTSUPP; 444 int ret = -ENOTSUPP;
444 445
445 lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", 446 lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
446 channel->center_freq, channel_type); 447 chandef->chan->center_freq,
448 cfg80211_get_chandef_type(chandef));
447 449
448 if (channel_type != NL80211_CHAN_NO_HT) 450 if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
449 goto out; 451 goto out;
450 452
451 ret = lbs_set_channel(priv, channel->hw_value); 453 ret = lbs_set_channel(priv, chandef->chan->hw_value);
452 454
453 out: 455 out:
454 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); 456 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -1140,11 +1142,13 @@ static int lbs_associate(struct lbs_private *priv,
1140 cmd->capability = cpu_to_le16(bss->capability); 1142 cmd->capability = cpu_to_le16(bss->capability);
1141 1143
1142 /* add SSID TLV */ 1144 /* add SSID TLV */
1145 rcu_read_lock();
1143 ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); 1146 ssid_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
1144 if (ssid_eid) 1147 if (ssid_eid)
1145 pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]); 1148 pos += lbs_add_ssid_tlv(pos, ssid_eid + 2, ssid_eid[1]);
1146 else 1149 else
1147 lbs_deb_assoc("no SSID\n"); 1150 lbs_deb_assoc("no SSID\n");
1151 rcu_read_unlock();
1148 1152
1149 /* add DS param TLV */ 1153 /* add DS param TLV */
1150 if (bss->channel) 1154 if (bss->channel)
@@ -1734,7 +1738,7 @@ static void lbs_join_post(struct lbs_private *priv,
1734 /* Fake DS channel IE */ 1738 /* Fake DS channel IE */
1735 *fake++ = WLAN_EID_DS_PARAMS; 1739 *fake++ = WLAN_EID_DS_PARAMS;
1736 *fake++ = 1; 1740 *fake++ = 1;
1737 *fake++ = params->channel->hw_value; 1741 *fake++ = params->chandef.chan->hw_value;
1738 /* Fake IBSS params IE */ 1742 /* Fake IBSS params IE */
1739 *fake++ = WLAN_EID_IBSS_PARAMS; 1743 *fake++ = WLAN_EID_IBSS_PARAMS;
1740 *fake++ = 2; 1744 *fake++ = 2;
@@ -1755,7 +1759,7 @@ static void lbs_join_post(struct lbs_private *priv,
1755 lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie); 1759 lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
1756 1760
1757 bss = cfg80211_inform_bss(priv->wdev->wiphy, 1761 bss = cfg80211_inform_bss(priv->wdev->wiphy,
1758 params->channel, 1762 params->chandef.chan,
1759 bssid, 1763 bssid,
1760 0, 1764 0,
1761 capability, 1765 capability,
@@ -1782,7 +1786,7 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
1782 struct cfg80211_ibss_params *params, 1786 struct cfg80211_ibss_params *params,
1783 struct cfg80211_bss *bss) 1787 struct cfg80211_bss *bss)
1784{ 1788{
1785 const u8 *rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES); 1789 const u8 *rates_eid;
1786 struct cmd_ds_802_11_ad_hoc_join cmd; 1790 struct cmd_ds_802_11_ad_hoc_join cmd;
1787 u8 preamble = RADIO_PREAMBLE_SHORT; 1791 u8 preamble = RADIO_PREAMBLE_SHORT;
1788 int ret = 0; 1792 int ret = 0;
@@ -1833,7 +1837,7 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
1833 cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval); 1837 cmd.bss.beaconperiod = cpu_to_le16(params->beacon_interval);
1834 cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS; 1838 cmd.bss.ds.header.id = WLAN_EID_DS_PARAMS;
1835 cmd.bss.ds.header.len = 1; 1839 cmd.bss.ds.header.len = 1;
1836 cmd.bss.ds.channel = params->channel->hw_value; 1840 cmd.bss.ds.channel = params->chandef.chan->hw_value;
1837 cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS; 1841 cmd.bss.ibss.header.id = WLAN_EID_IBSS_PARAMS;
1838 cmd.bss.ibss.header.len = 2; 1842 cmd.bss.ibss.header.len = 2;
1839 cmd.bss.ibss.atimwindow = 0; 1843 cmd.bss.ibss.atimwindow = 0;
@@ -1841,6 +1845,8 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
1841 1845
1842 /* set rates to the intersection of our rates and the rates in the 1846 /* set rates to the intersection of our rates and the rates in the
1843 bss */ 1847 bss */
1848 rcu_read_lock();
1849 rates_eid = ieee80211_bss_get_ie(bss, WLAN_EID_SUPP_RATES);
1844 if (!rates_eid) { 1850 if (!rates_eid) {
1845 lbs_add_rates(cmd.bss.rates); 1851 lbs_add_rates(cmd.bss.rates);
1846 } else { 1852 } else {
@@ -1860,6 +1866,7 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
1860 } 1866 }
1861 } 1867 }
1862 } 1868 }
1869 rcu_read_unlock();
1863 1870
1864 /* Only v8 and below support setting this */ 1871 /* Only v8 and below support setting this */
1865 if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) { 1872 if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) {
@@ -1942,7 +1949,7 @@ static int lbs_ibss_start_new(struct lbs_private *priv,
1942 cmd.ibss.atimwindow = 0; 1949 cmd.ibss.atimwindow = 0;
1943 cmd.ds.header.id = WLAN_EID_DS_PARAMS; 1950 cmd.ds.header.id = WLAN_EID_DS_PARAMS;
1944 cmd.ds.header.len = 1; 1951 cmd.ds.header.len = 1;
1945 cmd.ds.channel = params->channel->hw_value; 1952 cmd.ds.channel = params->chandef.chan->hw_value;
1946 /* Only v8 and below support setting probe delay */ 1953 /* Only v8 and below support setting probe delay */
1947 if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8) 1954 if (MRVL_FW_MAJOR_REV(priv->fwrelease) <= 8)
1948 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME); 1955 cmd.probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
@@ -1987,18 +1994,18 @@ static int lbs_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1987 1994
1988 lbs_deb_enter(LBS_DEB_CFG80211); 1995 lbs_deb_enter(LBS_DEB_CFG80211);
1989 1996
1990 if (!params->channel) { 1997 if (!params->chandef.chan) {
1991 ret = -ENOTSUPP; 1998 ret = -ENOTSUPP;
1992 goto out; 1999 goto out;
1993 } 2000 }
1994 2001
1995 ret = lbs_set_channel(priv, params->channel->hw_value); 2002 ret = lbs_set_channel(priv, params->chandef.chan->hw_value);
1996 if (ret) 2003 if (ret)
1997 goto out; 2004 goto out;
1998 2005
1999 /* Search if someone is beaconing. This assumes that the 2006 /* Search if someone is beaconing. This assumes that the
2000 * bss list is populated already */ 2007 * bss list is populated already */
2001 bss = cfg80211_get_bss(wiphy, params->channel, params->bssid, 2008 bss = cfg80211_get_bss(wiphy, params->chandef.chan, params->bssid,
2002 params->ssid, params->ssid_len, 2009 params->ssid, params->ssid_len,
2003 WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); 2010 WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
2004 2011
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 4cb234349fbf..739309e70d8b 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -588,17 +588,38 @@ static int if_sdio_prog_real(struct if_sdio_card *card,
588 size = fw->size; 588 size = fw->size;
589 589
590 while (size) { 590 while (size) {
591 ret = if_sdio_wait_status(card, FW_DL_READY_STATUS); 591 timeout = jiffies + HZ;
592 if (ret) 592 while (1) {
593 goto release; 593 ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
594 if (ret)
595 goto release;
594 596
595 req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret); 597 req_size = sdio_readb(card->func, IF_SDIO_RD_BASE,
596 if (ret) 598 &ret);
597 goto release; 599 if (ret)
600 goto release;
601
602 req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1,
603 &ret) << 8;
604 if (ret)
605 goto release;
606
607 /*
608 * For SD8688 wait until the length is not 0, 1 or 2
609 * before downloading the first FW block,
610 * since BOOT code writes the register to indicate the
611 * helper/FW download winner,
612 * the value could be 1 or 2 (Func1 or Func2).
613 */
614 if ((size != fw->size) || (req_size > 2))
615 break;
616 if (time_after(jiffies, timeout)) {
617 ret = -ETIMEDOUT;
618 goto release;
619 }
620 mdelay(1);
621 }
598 622
599 req_size |= sdio_readb(card->func, IF_SDIO_RD_BASE + 1, &ret) << 8;
600 if (ret)
601 goto release;
602/* 623/*
603 lbs_deb_sdio("firmware wants %d bytes\n", (int)req_size); 624 lbs_deb_sdio("firmware wants %d bytes\n", (int)req_size);
604*/ 625*/
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 9604a1c4a74d..4bb6574f4073 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1124,7 +1124,7 @@ static void if_spi_resume_worker(struct work_struct *work)
1124 } 1124 }
1125} 1125}
1126 1126
1127static int __devinit if_spi_probe(struct spi_device *spi) 1127static int if_spi_probe(struct spi_device *spi)
1128{ 1128{
1129 struct if_spi_card *card; 1129 struct if_spi_card *card;
1130 struct lbs_private *priv = NULL; 1130 struct lbs_private *priv = NULL;
@@ -1226,7 +1226,7 @@ out:
1226 return err; 1226 return err;
1227} 1227}
1228 1228
1229static int __devexit libertas_spi_remove(struct spi_device *spi) 1229static int libertas_spi_remove(struct spi_device *spi)
1230{ 1230{
1231 struct if_spi_card *card = spi_get_drvdata(spi); 1231 struct if_spi_card *card = spi_get_drvdata(spi);
1232 struct lbs_private *priv = card->priv; 1232 struct lbs_private *priv = card->priv;
@@ -1285,7 +1285,7 @@ static const struct dev_pm_ops if_spi_pm_ops = {
1285 1285
1286static struct spi_driver libertas_spi_driver = { 1286static struct spi_driver libertas_spi_driver = {
1287 .probe = if_spi_probe, 1287 .probe = if_spi_probe,
1288 .remove = __devexit_p(libertas_spi_remove), 1288 .remove = libertas_spi_remove,
1289 .driver = { 1289 .driver = {
1290 .name = "libertas_spi", 1290 .name = "libertas_spi",
1291 .owner = THIS_MODULE, 1291 .owner = THIS_MODULE,
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 97807751ebcf..3e81264db81e 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -101,7 +101,7 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
101 101
102 switch (action) { 102 switch (action) {
103 case CMD_ACT_MESH_CONFIG_START: 103 case CMD_ACT_MESH_CONFIG_START:
104 ie->id = WLAN_EID_GENERIC; 104 ie->id = WLAN_EID_VENDOR_SPECIFIC;
105 ie->val.oui[0] = 0x00; 105 ie->val.oui[0] = 0x00;
106 ie->val.oui[1] = 0x50; 106 ie->val.oui[1] = 0x50;
107 ie->val.oui[2] = 0x43; 107 ie->val.oui[2] = 0x43;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 429ca3215fdb..ff9085502bea 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -44,9 +44,9 @@ static int radios = 2;
44module_param(radios, int, 0444); 44module_param(radios, int, 0444);
45MODULE_PARM_DESC(radios, "Number of simulated radios"); 45MODULE_PARM_DESC(radios, "Number of simulated radios");
46 46
47static bool fake_hw_scan; 47static int channels = 1;
48module_param(fake_hw_scan, bool, 0444); 48module_param(channels, int, 0444);
49MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler"); 49MODULE_PARM_DESC(channels, "Number of concurrent channels");
50 50
51/** 51/**
52 * enum hwsim_regtest - the type of regulatory tests we offer 52 * enum hwsim_regtest - the type of regulatory tests we offer
@@ -166,7 +166,9 @@ struct hwsim_vif_priv {
166static inline void hwsim_check_magic(struct ieee80211_vif *vif) 166static inline void hwsim_check_magic(struct ieee80211_vif *vif)
167{ 167{
168 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 168 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
169 WARN_ON(vp->magic != HWSIM_VIF_MAGIC); 169 WARN(vp->magic != HWSIM_VIF_MAGIC,
170 "Invalid VIF (%p) magic %#x, %pM, %d/%d\n",
171 vif, vp->magic, vif->addr, vif->type, vif->p2p);
170} 172}
171 173
172static inline void hwsim_set_magic(struct ieee80211_vif *vif) 174static inline void hwsim_set_magic(struct ieee80211_vif *vif)
@@ -185,7 +187,7 @@ struct hwsim_sta_priv {
185 u32 magic; 187 u32 magic;
186}; 188};
187 189
188#define HWSIM_STA_MAGIC 0x6d537748 190#define HWSIM_STA_MAGIC 0x6d537749
189 191
190static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta) 192static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta)
191{ 193{
@@ -205,6 +207,30 @@ static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta)
205 sp->magic = 0; 207 sp->magic = 0;
206} 208}
207 209
210struct hwsim_chanctx_priv {
211 u32 magic;
212};
213
214#define HWSIM_CHANCTX_MAGIC 0x6d53774a
215
216static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c)
217{
218 struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
219 WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC);
220}
221
222static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c)
223{
224 struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
225 cp->magic = HWSIM_CHANCTX_MAGIC;
226}
227
228static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
229{
230 struct hwsim_chanctx_priv *cp = (void *)c->drv_priv;
231 cp->magic = 0;
232}
233
208static struct class *hwsim_class; 234static struct class *hwsim_class;
209 235
210static struct net_device *hwsim_mon; /* global monitor netdev */ 236static struct net_device *hwsim_mon; /* global monitor netdev */
@@ -299,6 +325,13 @@ struct mac80211_hwsim_data {
299 325
300 struct mac_address addresses[2]; 326 struct mac_address addresses[2];
301 327
328 struct ieee80211_channel *tmp_chan;
329 struct delayed_work roc_done;
330 struct delayed_work hw_scan;
331 struct cfg80211_scan_request *hw_scan_request;
332 struct ieee80211_vif *hw_scan_vif;
333 int scan_chan_idx;
334
302 struct ieee80211_channel *channel; 335 struct ieee80211_channel *channel;
303 unsigned long beacon_int; /* in jiffies unit */ 336 unsigned long beacon_int; /* in jiffies unit */
304 unsigned int rx_filter; 337 unsigned int rx_filter;
@@ -396,7 +429,8 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw,
396} 429}
397 430
398static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, 431static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
399 struct sk_buff *tx_skb) 432 struct sk_buff *tx_skb,
433 struct ieee80211_channel *chan)
400{ 434{
401 struct mac80211_hwsim_data *data = hw->priv; 435 struct mac80211_hwsim_data *data = hw->priv;
402 struct sk_buff *skb; 436 struct sk_buff *skb;
@@ -423,7 +457,7 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
423 hdr->rt_tsft = __mac80211_hwsim_get_tsf(data); 457 hdr->rt_tsft = __mac80211_hwsim_get_tsf(data);
424 hdr->rt_flags = 0; 458 hdr->rt_flags = 0;
425 hdr->rt_rate = txrate->bitrate / 5; 459 hdr->rt_rate = txrate->bitrate / 5;
426 hdr->rt_channel = cpu_to_le16(data->channel->center_freq); 460 hdr->rt_channel = cpu_to_le16(chan->center_freq);
427 flags = IEEE80211_CHAN_2GHZ; 461 flags = IEEE80211_CHAN_2GHZ;
428 if (txrate->flags & IEEE80211_RATE_ERP_G) 462 if (txrate->flags & IEEE80211_RATE_ERP_G)
429 flags |= IEEE80211_CHAN_OFDM; 463 flags |= IEEE80211_CHAN_OFDM;
@@ -441,9 +475,9 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
441} 475}
442 476
443 477
444static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr) 478static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
479 const u8 *addr)
445{ 480{
446 struct mac80211_hwsim_data *data = hw->priv;
447 struct sk_buff *skb; 481 struct sk_buff *skb;
448 struct hwsim_radiotap_hdr *hdr; 482 struct hwsim_radiotap_hdr *hdr;
449 u16 flags; 483 u16 flags;
@@ -464,7 +498,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr)
464 (1 << IEEE80211_RADIOTAP_CHANNEL)); 498 (1 << IEEE80211_RADIOTAP_CHANNEL));
465 hdr->rt_flags = 0; 499 hdr->rt_flags = 0;
466 hdr->rt_rate = 0; 500 hdr->rt_rate = 0;
467 hdr->rt_channel = cpu_to_le16(data->channel->center_freq); 501 hdr->rt_channel = cpu_to_le16(chan->center_freq);
468 flags = IEEE80211_CHAN_2GHZ; 502 flags = IEEE80211_CHAN_2GHZ;
469 hdr->rt_chbitmask = cpu_to_le16(flags); 503 hdr->rt_chbitmask = cpu_to_le16(flags);
470 504
@@ -537,6 +571,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
537 md.ret = false; 571 md.ret = false;
538 md.addr = addr; 572 md.addr = addr;
539 ieee80211_iterate_active_interfaces_atomic(data->hw, 573 ieee80211_iterate_active_interfaces_atomic(data->hw,
574 IEEE80211_IFACE_ITER_NORMAL,
540 mac80211_hwsim_addr_iter, 575 mac80211_hwsim_addr_iter,
541 &md); 576 &md);
542 577
@@ -556,12 +591,6 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
556 int i; 591 int i;
557 struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES]; 592 struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES];
558 593
559 if (data->idle) {
560 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
561 dev_kfree_skb(my_skb);
562 return;
563 }
564
565 if (data->ps != PS_DISABLED) 594 if (data->ps != PS_DISABLED)
566 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 595 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
567 /* If the queue contains MAX_QUEUE skb's drop some */ 596 /* If the queue contains MAX_QUEUE skb's drop some */
@@ -629,8 +658,38 @@ nla_put_failure:
629 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); 658 printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
630} 659}
631 660
661static bool hwsim_chans_compat(struct ieee80211_channel *c1,
662 struct ieee80211_channel *c2)
663{
664 if (!c1 || !c2)
665 return false;
666
667 return c1->center_freq == c2->center_freq;
668}
669
670struct tx_iter_data {
671 struct ieee80211_channel *channel;
672 bool receive;
673};
674
675static void mac80211_hwsim_tx_iter(void *_data, u8 *addr,
676 struct ieee80211_vif *vif)
677{
678 struct tx_iter_data *data = _data;
679
680 if (!vif->chanctx_conf)
681 return;
682
683 if (!hwsim_chans_compat(data->channel,
684 rcu_dereference(vif->chanctx_conf)->def.chan))
685 return;
686
687 data->receive = true;
688}
689
632static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, 690static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
633 struct sk_buff *skb) 691 struct sk_buff *skb,
692 struct ieee80211_channel *chan)
634{ 693{
635 struct mac80211_hwsim_data *data = hw->priv, *data2; 694 struct mac80211_hwsim_data *data = hw->priv, *data2;
636 bool ack = false; 695 bool ack = false;
@@ -639,15 +698,10 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
639 struct ieee80211_rx_status rx_status; 698 struct ieee80211_rx_status rx_status;
640 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); 699 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
641 700
642 if (data->idle) {
643 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
644 return false;
645 }
646
647 memset(&rx_status, 0, sizeof(rx_status)); 701 memset(&rx_status, 0, sizeof(rx_status));
648 rx_status.flag |= RX_FLAG_MACTIME_MPDU; 702 rx_status.flag |= RX_FLAG_MACTIME_START;
649 rx_status.freq = data->channel->center_freq; 703 rx_status.freq = chan->center_freq;
650 rx_status.band = data->channel->band; 704 rx_status.band = chan->band;
651 rx_status.rate_idx = info->control.rates[0].idx; 705 rx_status.rate_idx = info->control.rates[0].idx;
652 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) 706 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
653 rx_status.flag |= RX_FLAG_HT; 707 rx_status.flag |= RX_FLAG_HT;
@@ -673,17 +727,35 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
673 list_for_each_entry(data2, &hwsim_radios, list) { 727 list_for_each_entry(data2, &hwsim_radios, list) {
674 struct sk_buff *nskb; 728 struct sk_buff *nskb;
675 struct ieee80211_mgmt *mgmt; 729 struct ieee80211_mgmt *mgmt;
730 struct tx_iter_data tx_iter_data = {
731 .receive = false,
732 .channel = chan,
733 };
676 734
677 if (data == data2) 735 if (data == data2)
678 continue; 736 continue;
679 737
680 if (data2->idle || !data2->started || 738 if (!data2->started || (data2->idle && !data2->tmp_chan) ||
681 !hwsim_ps_rx_ok(data2, skb) || !data2->channel || 739 !hwsim_ps_rx_ok(data2, skb))
682 data->channel->center_freq != data2->channel->center_freq ||
683 !(data->group & data2->group))
684 continue; 740 continue;
685 741
686 nskb = skb_copy(skb, GFP_ATOMIC); 742 if (!(data->group & data2->group))
743 continue;
744
745 if (!hwsim_chans_compat(chan, data2->tmp_chan) &&
746 !hwsim_chans_compat(chan, data2->channel)) {
747 ieee80211_iterate_active_interfaces_atomic(
748 data2->hw, IEEE80211_IFACE_ITER_NORMAL,
749 mac80211_hwsim_tx_iter, &tx_iter_data);
750 if (!tx_iter_data.receive)
751 continue;
752 }
753
754 /*
755 * reserve some space for our vendor and the normal
756 * radiotap header, since we're copying anyway
757 */
758 nskb = skb_copy_expand(skb, 64, 0, GFP_ATOMIC);
687 if (nskb == NULL) 759 if (nskb == NULL)
688 continue; 760 continue;
689 761
@@ -701,6 +773,33 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
701 (data->tsf_offset - data2->tsf_offset) + 773 (data->tsf_offset - data2->tsf_offset) +
702 24 * 8 * 10 / txrate->bitrate); 774 24 * 8 * 10 / txrate->bitrate);
703 775
776#if 0
777 /*
778 * Don't enable this code by default as the OUI 00:00:00
779 * is registered to Xerox so we shouldn't use it here, it
780 * might find its way into pcap files.
781 * Note that this code requires the headroom in the SKB
782 * that was allocated earlier.
783 */
784 rx_status.vendor_radiotap_oui[0] = 0x00;
785 rx_status.vendor_radiotap_oui[1] = 0x00;
786 rx_status.vendor_radiotap_oui[2] = 0x00;
787 rx_status.vendor_radiotap_subns = 127;
788 /*
789 * Radiotap vendor namespaces can (and should) also be
790 * split into fields by using the standard radiotap
791 * presence bitmap mechanism. Use just BIT(0) here for
792 * the presence bitmap.
793 */
794 rx_status.vendor_radiotap_bitmap = BIT(0);
795 /* We have 8 bytes of (dummy) data */
796 rx_status.vendor_radiotap_len = 8;
797 /* For testing, also require it to be aligned */
798 rx_status.vendor_radiotap_align = 8;
799 /* push the data */
800 memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
801#endif
802
704 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); 803 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
705 ieee80211_rx_irqsafe(data2->hw, nskb); 804 ieee80211_rx_irqsafe(data2->hw, nskb);
706 } 805 }
@@ -713,18 +812,51 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
713 struct ieee80211_tx_control *control, 812 struct ieee80211_tx_control *control,
714 struct sk_buff *skb) 813 struct sk_buff *skb)
715{ 814{
815 struct mac80211_hwsim_data *data = hw->priv;
816 struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
817 struct ieee80211_chanctx_conf *chanctx_conf;
818 struct ieee80211_channel *channel;
716 bool ack; 819 bool ack;
717 struct ieee80211_tx_info *txi;
718 u32 _portid; 820 u32 _portid;
719 821
720 mac80211_hwsim_monitor_rx(hw, skb); 822 if (WARN_ON(skb->len < 10)) {
721
722 if (skb->len < 10) {
723 /* Should not happen; just a sanity check for addr1 use */ 823 /* Should not happen; just a sanity check for addr1 use */
724 dev_kfree_skb(skb); 824 dev_kfree_skb(skb);
725 return; 825 return;
726 } 826 }
727 827
828 if (channels == 1) {
829 channel = data->channel;
830 } else if (txi->hw_queue == 4) {
831 channel = data->tmp_chan;
832 } else {
833 chanctx_conf = rcu_dereference(txi->control.vif->chanctx_conf);
834 if (chanctx_conf)
835 channel = chanctx_conf->def.chan;
836 else
837 channel = NULL;
838 }
839
840 if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
841 dev_kfree_skb(skb);
842 return;
843 }
844
845 if (data->idle && !data->tmp_chan) {
846 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
847 dev_kfree_skb(skb);
848 return;
849 }
850
851 if (txi->control.vif)
852 hwsim_check_magic(txi->control.vif);
853 if (control->sta)
854 hwsim_check_sta_magic(control->sta);
855
856 txi->rate_driver_data[0] = channel;
857
858 mac80211_hwsim_monitor_rx(hw, skb, channel);
859
728 /* wmediumd mode check */ 860 /* wmediumd mode check */
729 _portid = ACCESS_ONCE(wmediumd_portid); 861 _portid = ACCESS_ONCE(wmediumd_portid);
730 862
@@ -732,15 +864,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
732 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); 864 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid);
733 865
734 /* NO wmediumd detected, perfect medium simulation */ 866 /* NO wmediumd detected, perfect medium simulation */
735 ack = mac80211_hwsim_tx_frame_no_nl(hw, skb); 867 ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
736 868
737 if (ack && skb->len >= 16) { 869 if (ack && skb->len >= 16) {
738 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 870 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
739 mac80211_hwsim_monitor_ack(hw, hdr->addr2); 871 mac80211_hwsim_monitor_ack(channel, hdr->addr2);
740 } 872 }
741 873
742 txi = IEEE80211_SKB_CB(skb);
743
744 ieee80211_tx_info_clear_status(txi); 874 ieee80211_tx_info_clear_status(txi);
745 875
746 /* frame was transmitted at most favorable rate at first attempt */ 876 /* frame was transmitted at most favorable rate at first attempt */
@@ -778,6 +908,13 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
778 __func__, ieee80211_vif_type_p2p(vif), 908 __func__, ieee80211_vif_type_p2p(vif),
779 vif->addr); 909 vif->addr);
780 hwsim_set_magic(vif); 910 hwsim_set_magic(vif);
911
912 vif->cab_queue = 0;
913 vif->hw_queue[IEEE80211_AC_VO] = 0;
914 vif->hw_queue[IEEE80211_AC_VI] = 1;
915 vif->hw_queue[IEEE80211_AC_BE] = 2;
916 vif->hw_queue[IEEE80211_AC_BK] = 3;
917
781 return 0; 918 return 0;
782} 919}
783 920
@@ -807,14 +944,26 @@ static void mac80211_hwsim_remove_interface(
807 hwsim_clear_magic(vif); 944 hwsim_clear_magic(vif);
808} 945}
809 946
947static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
948 struct sk_buff *skb,
949 struct ieee80211_channel *chan)
950{
951 u32 _pid = ACCESS_ONCE(wmediumd_portid);
952
953 mac80211_hwsim_monitor_rx(hw, skb, chan);
954
955 if (_pid)
956 return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
957
958 mac80211_hwsim_tx_frame_no_nl(hw, skb, chan);
959 dev_kfree_skb(skb);
960}
810 961
811static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, 962static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
812 struct ieee80211_vif *vif) 963 struct ieee80211_vif *vif)
813{ 964{
814 struct ieee80211_hw *hw = arg; 965 struct ieee80211_hw *hw = arg;
815 struct sk_buff *skb; 966 struct sk_buff *skb;
816 struct ieee80211_tx_info *info;
817 u32 _portid;
818 967
819 hwsim_check_magic(vif); 968 hwsim_check_magic(vif);
820 969
@@ -826,18 +975,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
826 skb = ieee80211_beacon_get(hw, vif); 975 skb = ieee80211_beacon_get(hw, vif);
827 if (skb == NULL) 976 if (skb == NULL)
828 return; 977 return;
829 info = IEEE80211_SKB_CB(skb);
830
831 mac80211_hwsim_monitor_rx(hw, skb);
832
833 /* wmediumd mode check */
834 _portid = ACCESS_ONCE(wmediumd_portid);
835 978
836 if (_portid) 979 mac80211_hwsim_tx_frame(hw, skb,
837 return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); 980 rcu_dereference(vif->chanctx_conf)->def.chan);
838
839 mac80211_hwsim_tx_frame_no_nl(hw, skb);
840 dev_kfree_skb(skb);
841} 981}
842 982
843 983
@@ -850,7 +990,8 @@ static void mac80211_hwsim_beacon(unsigned long arg)
850 return; 990 return;
851 991
852 ieee80211_iterate_active_interfaces_atomic( 992 ieee80211_iterate_active_interfaces_atomic(
853 hw, mac80211_hwsim_beacon_tx, hw); 993 hw, IEEE80211_IFACE_ITER_NORMAL,
994 mac80211_hwsim_beacon_tx, hw);
854 995
855 data->beacon_timer.expires = jiffies + data->beacon_int; 996 data->beacon_timer.expires = jiffies + data->beacon_int;
856 add_timer(&data->beacon_timer); 997 add_timer(&data->beacon_timer);
@@ -877,7 +1018,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
877 wiphy_debug(hw->wiphy, 1018 wiphy_debug(hw->wiphy,
878 "%s (freq=%d/%s idle=%d ps=%d smps=%s)\n", 1019 "%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
879 __func__, 1020 __func__,
880 conf->channel->center_freq, 1021 conf->channel ? conf->channel->center_freq : 0,
881 hwsim_chantypes[conf->channel_type], 1022 hwsim_chantypes[conf->channel_type],
882 !!(conf->flags & IEEE80211_CONF_IDLE), 1023 !!(conf->flags & IEEE80211_CONF_IDLE),
883 !!(conf->flags & IEEE80211_CONF_PS), 1024 !!(conf->flags & IEEE80211_CONF_PS),
@@ -886,6 +1027,9 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
886 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); 1027 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
887 1028
888 data->channel = conf->channel; 1029 data->channel = conf->channel;
1030
1031 WARN_ON(data->channel && channels > 1);
1032
889 data->power_level = conf->power_level; 1033 data->power_level = conf->power_level;
890 if (!data->started || !data->beacon_int) 1034 if (!data->started || !data->beacon_int)
891 del_timer(&data->beacon_timer); 1035 del_timer(&data->beacon_timer);
@@ -963,15 +1107,17 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
963 } 1107 }
964 1108
965 if (changed & BSS_CHANGED_HT) { 1109 if (changed & BSS_CHANGED_HT) {
966 wiphy_debug(hw->wiphy, " HT: op_mode=0x%x, chantype=%s\n", 1110 wiphy_debug(hw->wiphy, " HT: op_mode=0x%x\n",
967 info->ht_operation_mode, 1111 info->ht_operation_mode);
968 hwsim_chantypes[info->channel_type]);
969 } 1112 }
970 1113
971 if (changed & BSS_CHANGED_BASIC_RATES) { 1114 if (changed & BSS_CHANGED_BASIC_RATES) {
972 wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n", 1115 wiphy_debug(hw->wiphy, " BASIC_RATES: 0x%llx\n",
973 (unsigned long long) info->basic_rates); 1116 (unsigned long long) info->basic_rates);
974 } 1117 }
1118
1119 if (changed & BSS_CHANGED_TXPOWER)
1120 wiphy_debug(hw->wiphy, " TX Power: %d dBm\n", info->txpower);
975} 1121}
976 1122
977static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw, 1123static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
@@ -1166,45 +1312,101 @@ static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
1166 /* Not implemented, queues only on kernel side */ 1312 /* Not implemented, queues only on kernel side */
1167} 1313}
1168 1314
1169struct hw_scan_done { 1315static void hw_scan_work(struct work_struct *work)
1170 struct delayed_work w;
1171 struct ieee80211_hw *hw;
1172};
1173
1174static void hw_scan_done(struct work_struct *work)
1175{ 1316{
1176 struct hw_scan_done *hsd = 1317 struct mac80211_hwsim_data *hwsim =
1177 container_of(work, struct hw_scan_done, w.work); 1318 container_of(work, struct mac80211_hwsim_data, hw_scan.work);
1319 struct cfg80211_scan_request *req = hwsim->hw_scan_request;
1320 int dwell, i;
1321
1322 mutex_lock(&hwsim->mutex);
1323 if (hwsim->scan_chan_idx >= req->n_channels) {
1324 wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n");
1325 ieee80211_scan_completed(hwsim->hw, false);
1326 hwsim->hw_scan_request = NULL;
1327 hwsim->hw_scan_vif = NULL;
1328 hwsim->tmp_chan = NULL;
1329 mutex_unlock(&hwsim->mutex);
1330 return;
1331 }
1332
1333 wiphy_debug(hwsim->hw->wiphy, "hw scan %d MHz\n",
1334 req->channels[hwsim->scan_chan_idx]->center_freq);
1335
1336 hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx];
1337 if (hwsim->tmp_chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
1338 !req->n_ssids) {
1339 dwell = 120;
1340 } else {
1341 dwell = 30;
1342 /* send probes */
1343 for (i = 0; i < req->n_ssids; i++) {
1344 struct sk_buff *probe;
1345
1346 probe = ieee80211_probereq_get(hwsim->hw,
1347 hwsim->hw_scan_vif,
1348 req->ssids[i].ssid,
1349 req->ssids[i].ssid_len,
1350 req->ie_len);
1351 if (!probe)
1352 continue;
1353
1354 if (req->ie_len)
1355 memcpy(skb_put(probe, req->ie_len), req->ie,
1356 req->ie_len);
1178 1357
1179 ieee80211_scan_completed(hsd->hw, false); 1358 local_bh_disable();
1180 kfree(hsd); 1359 mac80211_hwsim_tx_frame(hwsim->hw, probe,
1360 hwsim->tmp_chan);
1361 local_bh_enable();
1362 }
1363 }
1364 ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan,
1365 msecs_to_jiffies(dwell));
1366 hwsim->scan_chan_idx++;
1367 mutex_unlock(&hwsim->mutex);
1181} 1368}
1182 1369
1183static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, 1370static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
1184 struct ieee80211_vif *vif, 1371 struct ieee80211_vif *vif,
1185 struct cfg80211_scan_request *req) 1372 struct cfg80211_scan_request *req)
1186{ 1373{
1187 struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL); 1374 struct mac80211_hwsim_data *hwsim = hw->priv;
1188 int i;
1189
1190 if (!hsd)
1191 return -ENOMEM;
1192 1375
1193 hsd->hw = hw; 1376 mutex_lock(&hwsim->mutex);
1194 INIT_DELAYED_WORK(&hsd->w, hw_scan_done); 1377 if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
1378 mutex_unlock(&hwsim->mutex);
1379 return -EBUSY;
1380 }
1381 hwsim->hw_scan_request = req;
1382 hwsim->hw_scan_vif = vif;
1383 hwsim->scan_chan_idx = 0;
1384 mutex_unlock(&hwsim->mutex);
1195 1385
1196 printk(KERN_DEBUG "hwsim hw_scan request\n"); 1386 wiphy_debug(hw->wiphy, "hwsim hw_scan request\n");
1197 for (i = 0; i < req->n_channels; i++)
1198 printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
1199 req->channels[i]->center_freq);
1200 print_hex_dump(KERN_DEBUG, "scan IEs: ", DUMP_PREFIX_OFFSET,
1201 16, 1, req->ie, req->ie_len, 1);
1202 1387
1203 ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ); 1388 ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0);
1204 1389
1205 return 0; 1390 return 0;
1206} 1391}
1207 1392
1393static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw,
1394 struct ieee80211_vif *vif)
1395{
1396 struct mac80211_hwsim_data *hwsim = hw->priv;
1397
1398 wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n");
1399
1400 cancel_delayed_work_sync(&hwsim->hw_scan);
1401
1402 mutex_lock(&hwsim->mutex);
1403 ieee80211_scan_completed(hwsim->hw, true);
1404 hwsim->tmp_chan = NULL;
1405 hwsim->hw_scan_request = NULL;
1406 hwsim->hw_scan_vif = NULL;
1407 mutex_unlock(&hwsim->mutex);
1408}
1409
1208static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw) 1410static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
1209{ 1411{
1210 struct mac80211_hwsim_data *hwsim = hw->priv; 1412 struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -1235,6 +1437,111 @@ static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
1235 mutex_unlock(&hwsim->mutex); 1437 mutex_unlock(&hwsim->mutex);
1236} 1438}
1237 1439
1440static void hw_roc_done(struct work_struct *work)
1441{
1442 struct mac80211_hwsim_data *hwsim =
1443 container_of(work, struct mac80211_hwsim_data, roc_done.work);
1444
1445 mutex_lock(&hwsim->mutex);
1446 ieee80211_remain_on_channel_expired(hwsim->hw);
1447 hwsim->tmp_chan = NULL;
1448 mutex_unlock(&hwsim->mutex);
1449
1450 wiphy_debug(hwsim->hw->wiphy, "hwsim ROC expired\n");
1451}
1452
1453static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
1454 struct ieee80211_vif *vif,
1455 struct ieee80211_channel *chan,
1456 int duration)
1457{
1458 struct mac80211_hwsim_data *hwsim = hw->priv;
1459
1460 mutex_lock(&hwsim->mutex);
1461 if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) {
1462 mutex_unlock(&hwsim->mutex);
1463 return -EBUSY;
1464 }
1465
1466 hwsim->tmp_chan = chan;
1467 mutex_unlock(&hwsim->mutex);
1468
1469 wiphy_debug(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n",
1470 chan->center_freq, duration);
1471
1472 ieee80211_ready_on_channel(hw);
1473
1474 ieee80211_queue_delayed_work(hw, &hwsim->roc_done,
1475 msecs_to_jiffies(duration));
1476 return 0;
1477}
1478
1479static int mac80211_hwsim_croc(struct ieee80211_hw *hw)
1480{
1481 struct mac80211_hwsim_data *hwsim = hw->priv;
1482
1483 cancel_delayed_work_sync(&hwsim->roc_done);
1484
1485 mutex_lock(&hwsim->mutex);
1486 hwsim->tmp_chan = NULL;
1487 mutex_unlock(&hwsim->mutex);
1488
1489 wiphy_debug(hw->wiphy, "hwsim ROC canceled\n");
1490
1491 return 0;
1492}
1493
1494static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
1495 struct ieee80211_chanctx_conf *ctx)
1496{
1497 hwsim_set_chanctx_magic(ctx);
1498 wiphy_debug(hw->wiphy,
1499 "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
1500 ctx->def.chan->center_freq, ctx->def.width,
1501 ctx->def.center_freq1, ctx->def.center_freq2);
1502 return 0;
1503}
1504
1505static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw,
1506 struct ieee80211_chanctx_conf *ctx)
1507{
1508 wiphy_debug(hw->wiphy,
1509 "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
1510 ctx->def.chan->center_freq, ctx->def.width,
1511 ctx->def.center_freq1, ctx->def.center_freq2);
1512 hwsim_check_chanctx_magic(ctx);
1513 hwsim_clear_chanctx_magic(ctx);
1514}
1515
1516static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw,
1517 struct ieee80211_chanctx_conf *ctx,
1518 u32 changed)
1519{
1520 hwsim_check_chanctx_magic(ctx);
1521 wiphy_debug(hw->wiphy,
1522 "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
1523 ctx->def.chan->center_freq, ctx->def.width,
1524 ctx->def.center_freq1, ctx->def.center_freq2);
1525}
1526
1527static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw,
1528 struct ieee80211_vif *vif,
1529 struct ieee80211_chanctx_conf *ctx)
1530{
1531 hwsim_check_magic(vif);
1532 hwsim_check_chanctx_magic(ctx);
1533
1534 return 0;
1535}
1536
1537static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw,
1538 struct ieee80211_vif *vif,
1539 struct ieee80211_chanctx_conf *ctx)
1540{
1541 hwsim_check_magic(vif);
1542 hwsim_check_chanctx_magic(ctx);
1543}
1544
1238static struct ieee80211_ops mac80211_hwsim_ops = 1545static struct ieee80211_ops mac80211_hwsim_ops =
1239{ 1546{
1240 .tx = mac80211_hwsim_tx, 1547 .tx = mac80211_hwsim_tx,
@@ -1315,7 +1622,6 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1315 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 1622 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
1316 struct sk_buff *skb; 1623 struct sk_buff *skb;
1317 struct ieee80211_pspoll *pspoll; 1624 struct ieee80211_pspoll *pspoll;
1318 u32 _portid;
1319 1625
1320 if (!vp->assoc) 1626 if (!vp->assoc)
1321 return; 1627 return;
@@ -1335,25 +1641,18 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
1335 memcpy(pspoll->bssid, vp->bssid, ETH_ALEN); 1641 memcpy(pspoll->bssid, vp->bssid, ETH_ALEN);
1336 memcpy(pspoll->ta, mac, ETH_ALEN); 1642 memcpy(pspoll->ta, mac, ETH_ALEN);
1337 1643
1338 /* wmediumd mode check */ 1644 rcu_read_lock();
1339 _portid = ACCESS_ONCE(wmediumd_portid); 1645 mac80211_hwsim_tx_frame(data->hw, skb,
1340 1646 rcu_dereference(vif->chanctx_conf)->def.chan);
1341 if (_portid) 1647 rcu_read_unlock();
1342 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
1343
1344 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
1345 printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__);
1346 dev_kfree_skb(skb);
1347} 1648}
1348 1649
1349
1350static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, 1650static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1351 struct ieee80211_vif *vif, int ps) 1651 struct ieee80211_vif *vif, int ps)
1352{ 1652{
1353 struct hwsim_vif_priv *vp = (void *)vif->drv_priv; 1653 struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
1354 struct sk_buff *skb; 1654 struct sk_buff *skb;
1355 struct ieee80211_hdr *hdr; 1655 struct ieee80211_hdr *hdr;
1356 u32 _portid;
1357 1656
1358 if (!vp->assoc) 1657 if (!vp->assoc)
1359 return; 1658 return;
@@ -1374,15 +1673,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
1374 memcpy(hdr->addr2, mac, ETH_ALEN); 1673 memcpy(hdr->addr2, mac, ETH_ALEN);
1375 memcpy(hdr->addr3, vp->bssid, ETH_ALEN); 1674 memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
1376 1675
1377 /* wmediumd mode check */ 1676 rcu_read_lock();
1378 _portid = ACCESS_ONCE(wmediumd_portid); 1677 mac80211_hwsim_tx_frame(data->hw, skb,
1379 1678 rcu_dereference(vif->chanctx_conf)->def.chan);
1380 if (_portid) 1679 rcu_read_unlock();
1381 return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid);
1382
1383 if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb))
1384 printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__);
1385 dev_kfree_skb(skb);
1386} 1680}
1387 1681
1388 1682
@@ -1423,14 +1717,17 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
1423 1717
1424 if (val == PS_MANUAL_POLL) { 1718 if (val == PS_MANUAL_POLL) {
1425 ieee80211_iterate_active_interfaces(data->hw, 1719 ieee80211_iterate_active_interfaces(data->hw,
1720 IEEE80211_IFACE_ITER_NORMAL,
1426 hwsim_send_ps_poll, data); 1721 hwsim_send_ps_poll, data);
1427 data->ps_poll_pending = true; 1722 data->ps_poll_pending = true;
1428 } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { 1723 } else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
1429 ieee80211_iterate_active_interfaces(data->hw, 1724 ieee80211_iterate_active_interfaces(data->hw,
1725 IEEE80211_IFACE_ITER_NORMAL,
1430 hwsim_send_nullfunc_ps, 1726 hwsim_send_nullfunc_ps,
1431 data); 1727 data);
1432 } else if (old_ps != PS_DISABLED && val == PS_DISABLED) { 1728 } else if (old_ps != PS_DISABLED && val == PS_DISABLED) {
1433 ieee80211_iterate_active_interfaces(data->hw, 1729 ieee80211_iterate_active_interfaces(data->hw,
1730 IEEE80211_IFACE_ITER_NORMAL,
1434 hwsim_send_nullfunc_no_ps, 1731 hwsim_send_nullfunc_no_ps,
1435 data); 1732 data);
1436 } 1733 }
@@ -1551,7 +1848,8 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1551 (hwsim_flags & HWSIM_TX_STAT_ACK)) { 1848 (hwsim_flags & HWSIM_TX_STAT_ACK)) {
1552 if (skb->len >= 16) { 1849 if (skb->len >= 16) {
1553 hdr = (struct ieee80211_hdr *) skb->data; 1850 hdr = (struct ieee80211_hdr *) skb->data;
1554 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); 1851 mac80211_hwsim_monitor_ack(txi->rate_driver_data[0],
1852 hdr->addr2);
1555 } 1853 }
1556 txi->flags |= IEEE80211_TX_STAT_ACK; 1854 txi->flags |= IEEE80211_TX_STAT_ACK;
1557 } 1855 }
@@ -1566,7 +1864,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
1566 struct genl_info *info) 1864 struct genl_info *info)
1567{ 1865{
1568 1866
1569 struct mac80211_hwsim_data *data2; 1867 struct mac80211_hwsim_data *data2;
1570 struct ieee80211_rx_status rx_status; 1868 struct ieee80211_rx_status rx_status;
1571 struct mac_address *dst; 1869 struct mac_address *dst;
1572 int frame_data_len; 1870 int frame_data_len;
@@ -1574,9 +1872,9 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
1574 struct sk_buff *skb = NULL; 1872 struct sk_buff *skb = NULL;
1575 1873
1576 if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || 1874 if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] ||
1577 !info->attrs[HWSIM_ATTR_FRAME] || 1875 !info->attrs[HWSIM_ATTR_FRAME] ||
1578 !info->attrs[HWSIM_ATTR_RX_RATE] || 1876 !info->attrs[HWSIM_ATTR_RX_RATE] ||
1579 !info->attrs[HWSIM_ATTR_SIGNAL]) 1877 !info->attrs[HWSIM_ATTR_SIGNAL])
1580 goto out; 1878 goto out;
1581 1879
1582 dst = (struct mac_address *)nla_data( 1880 dst = (struct mac_address *)nla_data(
@@ -1604,7 +1902,7 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2,
1604 1902
1605 /* check if radio is configured properly */ 1903 /* check if radio is configured properly */
1606 1904
1607 if (data2->idle || !data2->started || !data2->channel) 1905 if (data2->idle || !data2->started)
1608 goto out; 1906 goto out;
1609 1907
1610 /*A frame is received from user space*/ 1908 /*A frame is received from user space*/
@@ -1688,6 +1986,11 @@ static struct notifier_block hwsim_netlink_notifier = {
1688static int hwsim_init_netlink(void) 1986static int hwsim_init_netlink(void)
1689{ 1987{
1690 int rc; 1988 int rc;
1989
1990 /* userspace test API hasn't been adjusted for multi-channel */
1991 if (channels > 1)
1992 return 0;
1993
1691 printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); 1994 printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
1692 1995
1693 rc = genl_register_family_with_ops(&hwsim_genl_family, 1996 rc = genl_register_family_with_ops(&hwsim_genl_family,
@@ -1710,6 +2013,10 @@ static void hwsim_exit_netlink(void)
1710{ 2013{
1711 int ret; 2014 int ret;
1712 2015
2016 /* userspace test API hasn't been adjusted for multi-channel */
2017 if (channels > 1)
2018 return;
2019
1713 printk(KERN_INFO "mac80211_hwsim: closing netlink\n"); 2020 printk(KERN_INFO "mac80211_hwsim: closing netlink\n");
1714 /* unregister the notifier */ 2021 /* unregister the notifier */
1715 netlink_unregister_notifier(&hwsim_netlink_notifier); 2022 netlink_unregister_notifier(&hwsim_netlink_notifier);
@@ -1732,7 +2039,7 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
1732 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }, 2039 { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
1733}; 2040};
1734 2041
1735static const struct ieee80211_iface_combination hwsim_if_comb = { 2042static struct ieee80211_iface_combination hwsim_if_comb = {
1736 .limits = hwsim_if_limits, 2043 .limits = hwsim_if_limits,
1737 .n_limits = ARRAY_SIZE(hwsim_if_limits), 2044 .n_limits = ARRAY_SIZE(hwsim_if_limits),
1738 .max_interfaces = 2048, 2045 .max_interfaces = 2048,
@@ -1750,10 +2057,30 @@ static int __init init_mac80211_hwsim(void)
1750 if (radios < 1 || radios > 100) 2057 if (radios < 1 || radios > 100)
1751 return -EINVAL; 2058 return -EINVAL;
1752 2059
1753 if (fake_hw_scan) { 2060 if (channels < 1)
2061 return -EINVAL;
2062
2063 if (channels > 1) {
2064 hwsim_if_comb.num_different_channels = channels;
1754 mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; 2065 mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
2066 mac80211_hwsim_ops.cancel_hw_scan =
2067 mac80211_hwsim_cancel_hw_scan;
1755 mac80211_hwsim_ops.sw_scan_start = NULL; 2068 mac80211_hwsim_ops.sw_scan_start = NULL;
1756 mac80211_hwsim_ops.sw_scan_complete = NULL; 2069 mac80211_hwsim_ops.sw_scan_complete = NULL;
2070 mac80211_hwsim_ops.remain_on_channel =
2071 mac80211_hwsim_roc;
2072 mac80211_hwsim_ops.cancel_remain_on_channel =
2073 mac80211_hwsim_croc;
2074 mac80211_hwsim_ops.add_chanctx =
2075 mac80211_hwsim_add_chanctx;
2076 mac80211_hwsim_ops.remove_chanctx =
2077 mac80211_hwsim_remove_chanctx;
2078 mac80211_hwsim_ops.change_chanctx =
2079 mac80211_hwsim_change_chanctx;
2080 mac80211_hwsim_ops.assign_vif_chanctx =
2081 mac80211_hwsim_assign_vif_chanctx;
2082 mac80211_hwsim_ops.unassign_vif_chanctx =
2083 mac80211_hwsim_unassign_vif_chanctx;
1757 } 2084 }
1758 2085
1759 spin_lock_init(&hwsim_radio_lock); 2086 spin_lock_init(&hwsim_radio_lock);
@@ -1803,13 +2130,18 @@ static int __init init_mac80211_hwsim(void)
1803 hw->wiphy->iface_combinations = &hwsim_if_comb; 2130 hw->wiphy->iface_combinations = &hwsim_if_comb;
1804 hw->wiphy->n_iface_combinations = 1; 2131 hw->wiphy->n_iface_combinations = 1;
1805 2132
1806 if (fake_hw_scan) { 2133 if (channels > 1) {
1807 hw->wiphy->max_scan_ssids = 255; 2134 hw->wiphy->max_scan_ssids = 255;
1808 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; 2135 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
2136 hw->wiphy->max_remain_on_channel_duration = 1000;
1809 } 2137 }
1810 2138
2139 INIT_DELAYED_WORK(&data->roc_done, hw_roc_done);
2140 INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work);
2141
1811 hw->channel_change_time = 1; 2142 hw->channel_change_time = 1;
1812 hw->queues = 4; 2143 hw->queues = 5;
2144 hw->offchannel_tx_hw_queue = 4;
1813 hw->wiphy->interface_modes = 2145 hw->wiphy->interface_modes =
1814 BIT(NL80211_IFTYPE_STATION) | 2146 BIT(NL80211_IFTYPE_STATION) |
1815 BIT(NL80211_IFTYPE_AP) | 2147 BIT(NL80211_IFTYPE_AP) |
@@ -1824,7 +2156,8 @@ static int __init init_mac80211_hwsim(void)
1824 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 2156 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
1825 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 2157 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
1826 IEEE80211_HW_AMPDU_AGGREGATION | 2158 IEEE80211_HW_AMPDU_AGGREGATION |
1827 IEEE80211_HW_WANT_MONITOR_VIF; 2159 IEEE80211_HW_WANT_MONITOR_VIF |
2160 IEEE80211_HW_QUEUE_CONTROL;
1828 2161
1829 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | 2162 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
1830 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 2163 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -1874,6 +2207,34 @@ static int __init init_mac80211_hwsim(void)
1874 sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 2207 sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1875 2208
1876 hw->wiphy->bands[band] = sband; 2209 hw->wiphy->bands[band] = sband;
2210
2211 if (channels == 1)
2212 continue;
2213
2214 sband->vht_cap.vht_supported = true;
2215 sband->vht_cap.cap =
2216 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
2217 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
2218 IEEE80211_VHT_CAP_RXLDPC |
2219 IEEE80211_VHT_CAP_SHORT_GI_80 |
2220 IEEE80211_VHT_CAP_SHORT_GI_160 |
2221 IEEE80211_VHT_CAP_TXSTBC |
2222 IEEE80211_VHT_CAP_RXSTBC_1 |
2223 IEEE80211_VHT_CAP_RXSTBC_2 |
2224 IEEE80211_VHT_CAP_RXSTBC_3 |
2225 IEEE80211_VHT_CAP_RXSTBC_4 |
2226 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
2227 sband->vht_cap.vht_mcs.rx_mcs_map =
2228 cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_8 << 0 |
2229 IEEE80211_VHT_MCS_SUPPORT_0_8 << 2 |
2230 IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
2231 IEEE80211_VHT_MCS_SUPPORT_0_8 << 6 |
2232 IEEE80211_VHT_MCS_SUPPORT_0_8 << 8 |
2233 IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 |
2234 IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 |
2235 IEEE80211_VHT_MCS_SUPPORT_0_8 << 14);
2236 sband->vht_cap.vht_mcs.tx_mcs_map =
2237 sband->vht_cap.vht_mcs.rx_mcs_map;
1877 } 2238 }
1878 /* By default all radios are belonging to the first group */ 2239 /* By default all radios are belonging to the first group */
1879 data->group = 1; 2240 data->group = 1;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 395f1bfd4102..68d52cfc1ebd 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -197,7 +197,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
197 ra_list_flags); 197 ra_list_flags);
198 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); 198 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad);
199 199
200 mwifiex_write_data_complete(adapter, skb_src, 0); 200 mwifiex_write_data_complete(adapter, skb_src, 0, 0);
201 201
202 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); 202 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
203 203
@@ -256,7 +256,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
256 if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) { 256 if (!mwifiex_is_ralist_valid(priv, pra_list, ptrindex)) {
257 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 257 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
258 ra_list_flags); 258 ra_list_flags);
259 mwifiex_write_data_complete(adapter, skb_aggr, -1); 259 mwifiex_write_data_complete(adapter, skb_aggr, 1, -1);
260 return -1; 260 return -1;
261 } 261 }
262 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && 262 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
@@ -282,13 +282,13 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
282 dev_err(adapter->dev, "%s: host_to_card failed: %#x\n", 282 dev_err(adapter->dev, "%s: host_to_card failed: %#x\n",
283 __func__, ret); 283 __func__, ret);
284 adapter->dbg.num_tx_host_to_card_failure++; 284 adapter->dbg.num_tx_host_to_card_failure++;
285 mwifiex_write_data_complete(adapter, skb_aggr, ret); 285 mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
286 return 0; 286 return 0;
287 case -EINPROGRESS: 287 case -EINPROGRESS:
288 adapter->data_sent = false; 288 adapter->data_sent = false;
289 break; 289 break;
290 case 0: 290 case 0:
291 mwifiex_write_data_complete(adapter, skb_aggr, ret); 291 mwifiex_write_data_complete(adapter, skb_aggr, 1, ret);
292 break; 292 break;
293 default: 293 default:
294 break; 294 break;
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 9402b93b9a36..4a97acd170f7 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -58,8 +58,7 @@ mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv,
58 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) 58 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
59 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr); 59 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
60 else 60 else
61 mwifiex_process_rx_packet(priv->adapter, 61 mwifiex_process_rx_packet(priv, rx_tmp_ptr);
62 rx_tmp_ptr);
63 } 62 }
64 } 63 }
65 64
@@ -106,7 +105,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
106 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) 105 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
107 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr); 106 mwifiex_handle_uap_rx_forward(priv, rx_tmp_ptr);
108 else 107 else
109 mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); 108 mwifiex_process_rx_packet(priv, rx_tmp_ptr);
110 } 109 }
111 110
112 spin_lock_irqsave(&priv->rx_pkt_lock, flags); 111 spin_lock_irqsave(&priv->rx_pkt_lock, flags);
@@ -442,8 +441,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
442 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) 441 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
443 mwifiex_handle_uap_rx_forward(priv, payload); 442 mwifiex_handle_uap_rx_forward(priv, payload);
444 else 443 else
445 mwifiex_process_rx_packet(priv->adapter, 444 mwifiex_process_rx_packet(priv, payload);
446 payload);
447 } 445 }
448 return 0; 446 return 0;
449 } 447 }
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 8e384fae3e68..b2e27723f801 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -1,7 +1,6 @@
1config MWIFIEX 1config MWIFIEX
2 tristate "Marvell WiFi-Ex Driver" 2 tristate "Marvell WiFi-Ex Driver"
3 depends on CFG80211 3 depends on CFG80211
4 select LIB80211
5 ---help--- 4 ---help---
6 This adds support for wireless adapters based on Marvell 5 This adds support for wireless adapters based on Marvell
7 802.11n chipsets. 6 802.11n chipsets.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 780d3e168297..a875499f8945 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -180,10 +180,8 @@ mwifiex_form_mgmt_frame(struct sk_buff *skb, const u8 *buf, size_t len)
180static int 180static int
181mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, 181mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
182 struct ieee80211_channel *chan, bool offchan, 182 struct ieee80211_channel *chan, bool offchan,
183 enum nl80211_channel_type channel_type, 183 unsigned int wait, const u8 *buf, size_t len,
184 bool channel_type_valid, unsigned int wait, 184 bool no_cck, bool dont_wait_for_ack, u64 *cookie)
185 const u8 *buf, size_t len, bool no_cck,
186 bool dont_wait_for_ack, u64 *cookie)
187{ 185{
188 struct sk_buff *skb; 186 struct sk_buff *skb;
189 u16 pkt_len; 187 u16 pkt_len;
@@ -253,7 +251,6 @@ static int
253mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy, 251mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
254 struct wireless_dev *wdev, 252 struct wireless_dev *wdev,
255 struct ieee80211_channel *chan, 253 struct ieee80211_channel *chan,
256 enum nl80211_channel_type channel_type,
257 unsigned int duration, u64 *cookie) 254 unsigned int duration, u64 *cookie)
258{ 255{
259 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev); 256 struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
@@ -271,15 +268,14 @@ mwifiex_cfg80211_remain_on_channel(struct wiphy *wiphy,
271 } 268 }
272 269
273 ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan, 270 ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_SET, chan,
274 &channel_type, duration); 271 duration);
275 272
276 if (!ret) { 273 if (!ret) {
277 *cookie = random32() | 1; 274 *cookie = random32() | 1;
278 priv->roc_cfg.cookie = *cookie; 275 priv->roc_cfg.cookie = *cookie;
279 priv->roc_cfg.chan = *chan; 276 priv->roc_cfg.chan = *chan;
280 priv->roc_cfg.chan_type = channel_type;
281 277
282 cfg80211_ready_on_channel(wdev, *cookie, chan, channel_type, 278 cfg80211_ready_on_channel(wdev, *cookie, chan,
283 duration, GFP_ATOMIC); 279 duration, GFP_ATOMIC);
284 280
285 wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie); 281 wiphy_dbg(wiphy, "info: ROC, cookie = 0x%llx\n", *cookie);
@@ -302,13 +298,11 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
302 return -ENOENT; 298 return -ENOENT;
303 299
304 ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE, 300 ret = mwifiex_remain_on_chan_cfg(priv, HostCmd_ACT_GEN_REMOVE,
305 &priv->roc_cfg.chan, 301 &priv->roc_cfg.chan, 0);
306 &priv->roc_cfg.chan_type, 0);
307 302
308 if (!ret) { 303 if (!ret) {
309 cfg80211_remain_on_channel_expired(wdev, cookie, 304 cfg80211_remain_on_channel_expired(wdev, cookie,
310 &priv->roc_cfg.chan, 305 &priv->roc_cfg.chan,
311 priv->roc_cfg.chan_type,
312 GFP_ATOMIC); 306 GFP_ATOMIC);
313 307
314 memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg)); 308 memset(&priv->roc_cfg, 0, sizeof(struct mwifiex_roc_cfg));
@@ -324,6 +318,7 @@ mwifiex_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy,
324 */ 318 */
325static int 319static int
326mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, 320mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
321 struct wireless_dev *wdev,
327 enum nl80211_tx_power_setting type, 322 enum nl80211_tx_power_setting type,
328 int mbm) 323 int mbm)
329{ 324{
@@ -471,13 +466,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
471 flag = 1; 466 flag = 1;
472 first_chan = (u32) ch->hw_value; 467 first_chan = (u32) ch->hw_value;
473 next_chan = first_chan; 468 next_chan = first_chan;
474 max_pwr = ch->max_reg_power; 469 max_pwr = ch->max_power;
475 no_of_parsed_chan = 1; 470 no_of_parsed_chan = 1;
476 continue; 471 continue;
477 } 472 }
478 473
479 if (ch->hw_value == next_chan + 1 && 474 if (ch->hw_value == next_chan + 1 &&
480 ch->max_reg_power == max_pwr) { 475 ch->max_power == max_pwr) {
481 next_chan++; 476 next_chan++;
482 no_of_parsed_chan++; 477 no_of_parsed_chan++;
483 } else { 478 } else {
@@ -488,7 +483,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
488 no_of_triplet++; 483 no_of_triplet++;
489 first_chan = (u32) ch->hw_value; 484 first_chan = (u32) ch->hw_value;
490 next_chan = first_chan; 485 next_chan = first_chan;
491 max_pwr = ch->max_reg_power; 486 max_pwr = ch->max_power;
492 no_of_parsed_chan = 1; 487 no_of_parsed_chan = 1;
493 } 488 }
494 } 489 }
@@ -1296,21 +1291,23 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
1296 return -EINVAL; 1291 return -EINVAL;
1297 } 1292 }
1298 1293
1299 bss_cfg->channel = 1294 bss_cfg->channel = ieee80211_frequency_to_channel(
1300 (u8)ieee80211_frequency_to_channel(params->channel->center_freq); 1295 params->chandef.chan->center_freq);
1301 1296
1302 /* Set appropriate bands */ 1297 /* Set appropriate bands */
1303 if (params->channel->band == IEEE80211_BAND_2GHZ) { 1298 if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
1304 bss_cfg->band_cfg = BAND_CONFIG_BG; 1299 bss_cfg->band_cfg = BAND_CONFIG_BG;
1305 1300
1306 if (params->channel_type == NL80211_CHAN_NO_HT) 1301 if (cfg80211_get_chandef_type(&params->chandef) ==
1302 NL80211_CHAN_NO_HT)
1307 config_bands = BAND_B | BAND_G; 1303 config_bands = BAND_B | BAND_G;
1308 else 1304 else
1309 config_bands = BAND_B | BAND_G | BAND_GN; 1305 config_bands = BAND_B | BAND_G | BAND_GN;
1310 } else { 1306 } else {
1311 bss_cfg->band_cfg = BAND_CONFIG_A; 1307 bss_cfg->band_cfg = BAND_CONFIG_A;
1312 1308
1313 if (params->channel_type == NL80211_CHAN_NO_HT) 1309 if (cfg80211_get_chandef_type(&params->chandef) ==
1310 NL80211_CHAN_NO_HT)
1314 config_bands = BAND_A; 1311 config_bands = BAND_A;
1315 else 1312 else
1316 config_bands = BAND_AN | BAND_A; 1313 config_bands = BAND_AN | BAND_A;
@@ -1683,7 +1680,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
1683 int index = 0, i; 1680 int index = 0, i;
1684 u8 config_bands = 0; 1681 u8 config_bands = 0;
1685 1682
1686 if (params->channel->band == IEEE80211_BAND_2GHZ) { 1683 if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
1687 if (!params->basic_rates) { 1684 if (!params->basic_rates) {
1688 config_bands = BAND_B | BAND_G; 1685 config_bands = BAND_B | BAND_G;
1689 } else { 1686 } else {
@@ -1708,10 +1705,12 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
1708 } 1705 }
1709 } 1706 }
1710 1707
1711 if (params->channel_type != NL80211_CHAN_NO_HT) 1708 if (cfg80211_get_chandef_type(&params->chandef) !=
1709 NL80211_CHAN_NO_HT)
1712 config_bands |= BAND_GN; 1710 config_bands |= BAND_GN;
1713 } else { 1711 } else {
1714 if (params->channel_type == NL80211_CHAN_NO_HT) 1712 if (cfg80211_get_chandef_type(&params->chandef) !=
1713 NL80211_CHAN_NO_HT)
1715 config_bands = BAND_A; 1714 config_bands = BAND_A;
1716 else 1715 else
1717 config_bands = BAND_AN | BAND_A; 1716 config_bands = BAND_AN | BAND_A;
@@ -1728,9 +1727,10 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
1728 } 1727 }
1729 1728
1730 adapter->sec_chan_offset = 1729 adapter->sec_chan_offset =
1731 mwifiex_chan_type_to_sec_chan_offset(params->channel_type); 1730 mwifiex_chan_type_to_sec_chan_offset(
1732 priv->adhoc_channel = 1731 cfg80211_get_chandef_type(&params->chandef));
1733 ieee80211_frequency_to_channel(params->channel->center_freq); 1732 priv->adhoc_channel = ieee80211_frequency_to_channel(
1733 params->chandef.chan->center_freq);
1734 1734
1735 wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n", 1735 wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
1736 config_bands, priv->adhoc_channel, adapter->sec_chan_offset); 1736 config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
@@ -1764,7 +1764,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
1764 1764
1765 ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid, 1765 ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
1766 params->bssid, priv->bss_mode, 1766 params->bssid, priv->bss_mode,
1767 params->channel, NULL, params->privacy); 1767 params->chandef.chan, NULL,
1768 params->privacy);
1768done: 1769done:
1769 if (!ret) { 1770 if (!ret) {
1770 cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL); 1771 cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
@@ -1819,12 +1820,18 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
1819 1820
1820 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); 1821 wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
1821 1822
1822 if (atomic_read(&priv->wmm.tx_pkts_queued) >= 1823 if ((request->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
1824 atomic_read(&priv->wmm.tx_pkts_queued) >=
1823 MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) { 1825 MWIFIEX_MIN_TX_PENDING_TO_CANCEL_SCAN) {
1824 dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n"); 1826 dev_dbg(priv->adapter->dev, "scan rejected due to traffic\n");
1825 return -EBUSY; 1827 return -EBUSY;
1826 } 1828 }
1827 1829
1830 if (priv->user_scan_cfg) {
1831 dev_err(priv->adapter->dev, "cmd: Scan already in process..\n");
1832 return -EBUSY;
1833 }
1834
1828 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), 1835 priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
1829 GFP_KERNEL); 1836 GFP_KERNEL);
1830 if (!priv->user_scan_cfg) { 1837 if (!priv->user_scan_cfg) {
@@ -1941,6 +1948,21 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
1941 else 1948 else
1942 ht_info->cap &= ~IEEE80211_HT_CAP_TX_STBC; 1949 ht_info->cap &= ~IEEE80211_HT_CAP_TX_STBC;
1943 1950
1951 if (ISSUPP_GREENFIELD(adapter->hw_dot_11n_dev_cap))
1952 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
1953 else
1954 ht_info->cap &= ~IEEE80211_HT_CAP_GRN_FLD;
1955
1956 if (ISENABLED_40MHZ_INTOLERANT(adapter->hw_dot_11n_dev_cap))
1957 ht_info->cap |= IEEE80211_HT_CAP_40MHZ_INTOLERANT;
1958 else
1959 ht_info->cap &= ~IEEE80211_HT_CAP_40MHZ_INTOLERANT;
1960
1961 if (ISSUPP_RXLDPC(adapter->hw_dot_11n_dev_cap))
1962 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
1963 else
1964 ht_info->cap &= ~IEEE80211_HT_CAP_LDPC_CODING;
1965
1944 ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU; 1966 ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU;
1945 ht_info->cap |= IEEE80211_HT_CAP_SM_PS; 1967 ht_info->cap |= IEEE80211_HT_CAP_SM_PS;
1946 1968
@@ -2074,8 +2096,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2074 return ERR_PTR(-EINVAL); 2096 return ERR_PTR(-EINVAL);
2075 } 2097 }
2076 2098
2077 dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name, 2099 dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
2078 ether_setup, 1); 2100 ether_setup, IEEE80211_NUM_ACS, 1);
2079 if (!dev) { 2101 if (!dev) {
2080 wiphy_err(wiphy, "no memory available for netdevice\n"); 2102 wiphy_err(wiphy, "no memory available for netdevice\n");
2081 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; 2103 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -2116,7 +2138,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2116 } 2138 }
2117 2139
2118 sema_init(&priv->async_sem, 1); 2140 sema_init(&priv->async_sem, 1);
2119 priv->scan_pending_on_block = false;
2120 2141
2121 dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name); 2142 dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name);
2122 2143
@@ -2138,8 +2159,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2138 mwifiex_dev_debugfs_remove(priv); 2159 mwifiex_dev_debugfs_remove(priv);
2139#endif 2160#endif
2140 2161
2141 if (!netif_queue_stopped(priv->netdev)) 2162 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
2142 netif_stop_queue(priv->netdev);
2143 2163
2144 if (netif_carrier_ok(priv->netdev)) 2164 if (netif_carrier_ok(priv->netdev))
2145 netif_carrier_off(priv->netdev); 2165 netif_carrier_off(priv->netdev);
@@ -2253,8 +2273,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2253 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1; 2273 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
2254 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1; 2274 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
2255 2275
2256 wiphy->features = NL80211_FEATURE_HT_IBSS | 2276 wiphy->features |= NL80211_FEATURE_HT_IBSS |
2257 NL80211_FEATURE_INACTIVITY_TIMER; 2277 NL80211_FEATURE_INACTIVITY_TIMER |
2278 NL80211_FEATURE_LOW_PRIORITY_SCAN;
2258 2279
2259 /* Reserve space for mwifiex specific private data for BSS */ 2280 /* Reserve space for mwifiex specific private data for BSS */
2260 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv); 2281 wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index ae9010ed58de..5f438e6c2155 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -914,21 +914,24 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
914 914
915 dev_err(adapter->dev, "last_cmd_index = %d\n", 915 dev_err(adapter->dev, "last_cmd_index = %d\n",
916 adapter->dbg.last_cmd_index); 916 adapter->dbg.last_cmd_index);
917 print_hex_dump_bytes("last_cmd_id: ", DUMP_PREFIX_OFFSET, 917 dev_err(adapter->dev, "last_cmd_id: %*ph\n",
918 adapter->dbg.last_cmd_id, DBG_CMD_NUM); 918 (int)sizeof(adapter->dbg.last_cmd_id),
919 print_hex_dump_bytes("last_cmd_act: ", DUMP_PREFIX_OFFSET, 919 adapter->dbg.last_cmd_id);
920 adapter->dbg.last_cmd_act, DBG_CMD_NUM); 920 dev_err(adapter->dev, "last_cmd_act: %*ph\n",
921 (int)sizeof(adapter->dbg.last_cmd_act),
922 adapter->dbg.last_cmd_act);
921 923
922 dev_err(adapter->dev, "last_cmd_resp_index = %d\n", 924 dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
923 adapter->dbg.last_cmd_resp_index); 925 adapter->dbg.last_cmd_resp_index);
924 print_hex_dump_bytes("last_cmd_resp_id: ", DUMP_PREFIX_OFFSET, 926 dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
925 adapter->dbg.last_cmd_resp_id, 927 (int)sizeof(adapter->dbg.last_cmd_resp_id),
926 DBG_CMD_NUM); 928 adapter->dbg.last_cmd_resp_id);
927 929
928 dev_err(adapter->dev, "last_event_index = %d\n", 930 dev_err(adapter->dev, "last_event_index = %d\n",
929 adapter->dbg.last_event_index); 931 adapter->dbg.last_event_index);
930 print_hex_dump_bytes("last_event: ", DUMP_PREFIX_OFFSET, 932 dev_err(adapter->dev, "last_event: %*ph\n",
931 adapter->dbg.last_event, DBG_CMD_NUM); 933 (int)sizeof(adapter->dbg.last_event),
934 adapter->dbg.last_event);
932 935
933 dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n", 936 dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
934 adapter->data_sent, adapter->cmd_sent); 937 adapter->data_sent, adapter->cmd_sent);
@@ -946,6 +949,9 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
946 } 949 }
947 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) 950 if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
948 mwifiex_init_fw_complete(adapter); 951 mwifiex_init_fw_complete(adapter);
952
953 if (adapter->if_ops.card_reset)
954 adapter->if_ops.card_reset(adapter);
949} 955}
950 956
951/* 957/*
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index a870b5885c09..46e34aa65d1c 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -178,6 +178,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
178 (struct mwifiex_private *) file->private_data; 178 (struct mwifiex_private *) file->private_data;
179 struct net_device *netdev = priv->netdev; 179 struct net_device *netdev = priv->netdev;
180 struct netdev_hw_addr *ha; 180 struct netdev_hw_addr *ha;
181 struct netdev_queue *txq;
181 unsigned long page = get_zeroed_page(GFP_KERNEL); 182 unsigned long page = get_zeroed_page(GFP_KERNEL);
182 char *p = (char *) page, fmt[64]; 183 char *p = (char *) page, fmt[64];
183 struct mwifiex_bss_info info; 184 struct mwifiex_bss_info info;
@@ -229,8 +230,13 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
229 p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors); 230 p += sprintf(p, "num_rx_pkts_err = %lu\n", priv->stats.rx_errors);
230 p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev)) 231 p += sprintf(p, "carrier %s\n", ((netif_carrier_ok(priv->netdev))
231 ? "on" : "off")); 232 ? "on" : "off"));
232 p += sprintf(p, "tx queue %s\n", ((netif_queue_stopped(priv->netdev)) 233 p += sprintf(p, "tx queue");
233 ? "stopped" : "started")); 234 for (i = 0; i < netdev->num_tx_queues; i++) {
235 txq = netdev_get_tx_queue(netdev, i);
236 p += sprintf(p, " %d:%s", i, netif_tx_queue_stopped(txq) ?
237 "stopped" : "started");
238 }
239 p += sprintf(p, "\n");
234 240
235 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page, 241 ret = simple_read_from_buffer(ubuf, count, ppos, (char *) page,
236 (unsigned long) p - page); 242 (unsigned long) p - page);
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index dda588b35570..4dc8e2e9a889 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -194,6 +194,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
194#define ISSUPP_TXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(25)) 194#define ISSUPP_TXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(25))
195#define ISSUPP_RXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(26)) 195#define ISSUPP_RXSTBC(Dot11nDevCap) (Dot11nDevCap & BIT(26))
196#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29)) 196#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
197#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
198#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
197 199
198/* httxcfg bitmap 200/* httxcfg bitmap
199 * 0 reserved 201 * 0 reserved
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index b5d37a8caa09..39f03ce5a5b1 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -84,18 +84,19 @@ static void scan_delay_timer_fn(unsigned long data)
84 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 84 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
85 85
86 if (priv->user_scan_cfg) { 86 if (priv->user_scan_cfg) {
87 dev_dbg(priv->adapter->dev, 87 if (priv->scan_request) {
88 "info: %s: scan aborted\n", __func__); 88 dev_dbg(priv->adapter->dev,
89 cfg80211_scan_done(priv->scan_request, 1); 89 "info: aborting scan\n");
90 priv->scan_request = NULL; 90 cfg80211_scan_done(priv->scan_request, 1);
91 priv->scan_request = NULL;
92 } else {
93 dev_dbg(priv->adapter->dev,
94 "info: scan already aborted\n");
95 }
96
91 kfree(priv->user_scan_cfg); 97 kfree(priv->user_scan_cfg);
92 priv->user_scan_cfg = NULL; 98 priv->user_scan_cfg = NULL;
93 } 99 }
94
95 if (priv->scan_pending_on_block) {
96 priv->scan_pending_on_block = false;
97 up(&priv->async_sem);
98 }
99 goto done; 100 goto done;
100 } 101 }
101 102
@@ -387,9 +388,17 @@ void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
387 struct mwifiex_adapter *adapter) 388 struct mwifiex_adapter *adapter)
388{ 389{
389 unsigned long dev_queue_flags; 390 unsigned long dev_queue_flags;
391 unsigned int i;
390 392
391 spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); 393 spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
392 netif_tx_wake_all_queues(netdev); 394
395 for (i = 0; i < netdev->num_tx_queues; i++) {
396 struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
397
398 if (netif_tx_queue_stopped(txq))
399 netif_tx_wake_queue(txq);
400 }
401
393 spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); 402 spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
394} 403}
395 404
@@ -400,9 +409,17 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev,
400 struct mwifiex_adapter *adapter) 409 struct mwifiex_adapter *adapter)
401{ 410{
402 unsigned long dev_queue_flags; 411 unsigned long dev_queue_flags;
412 unsigned int i;
403 413
404 spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); 414 spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
405 netif_tx_stop_all_queues(netdev); 415
416 for (i = 0; i < netdev->num_tx_queues; i++) {
417 struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
418
419 if (!netif_tx_queue_stopped(txq))
420 netif_tx_stop_queue(txq);
421 }
422
406 spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); 423 spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
407} 424}
408 425
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 7b0858af8f5d..88664ae667ba 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -721,8 +721,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
721 721
722 if (!netif_carrier_ok(priv->netdev)) 722 if (!netif_carrier_ok(priv->netdev))
723 netif_carrier_on(priv->netdev); 723 netif_carrier_on(priv->netdev);
724 if (netif_queue_stopped(priv->netdev)) 724 mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
725 netif_wake_queue(priv->netdev);
726 725
727 if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled) 726 if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
728 priv->scan_block = true; 727 priv->scan_block = true;
@@ -1238,8 +1237,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv,
1238 1237
1239 if (!netif_carrier_ok(priv->netdev)) 1238 if (!netif_carrier_ok(priv->netdev))
1240 netif_carrier_on(priv->netdev); 1239 netif_carrier_on(priv->netdev);
1241 if (netif_queue_stopped(priv->netdev)) 1240 mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
1242 netif_wake_queue(priv->netdev);
1243 1241
1244 mwifiex_save_curr_bcn(priv); 1242 mwifiex_save_curr_bcn(priv);
1245 1243
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index eb22dd248d54..9c802ede9c3b 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -282,6 +282,7 @@ exit_main_proc:
282 mwifiex_shutdown_drv(adapter); 282 mwifiex_shutdown_drv(adapter);
283 return ret; 283 return ret;
284} 284}
285EXPORT_SYMBOL_GPL(mwifiex_main_process);
285 286
286/* 287/*
287 * This function frees the adapter structure. 288 * This function frees the adapter structure.
@@ -412,49 +413,6 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
412} 413}
413 414
414/* 415/*
415 * This function fills a driver buffer.
416 *
417 * The function associates a given SKB with the provided driver buffer
418 * and also updates some of the SKB parameters, including IP header,
419 * priority and timestamp.
420 */
421static void
422mwifiex_fill_buffer(struct sk_buff *skb)
423{
424 struct ethhdr *eth;
425 struct iphdr *iph;
426 struct timeval tv;
427 u8 tid = 0;
428
429 eth = (struct ethhdr *) skb->data;
430 switch (eth->h_proto) {
431 case __constant_htons(ETH_P_IP):
432 iph = ip_hdr(skb);
433 tid = IPTOS_PREC(iph->tos);
434 pr_debug("data: packet type ETH_P_IP: %04x, tid=%#x prio=%#x\n",
435 eth->h_proto, tid, skb->priority);
436 break;
437 case __constant_htons(ETH_P_ARP):
438 pr_debug("data: ARP packet: %04x\n", eth->h_proto);
439 default:
440 break;
441 }
442/* Offset for TOS field in the IP header */
443#define IPTOS_OFFSET 5
444 tid = (tid >> IPTOS_OFFSET);
445 skb->priority = tid;
446 /* Record the current time the packet was queued; used to
447 determine the amount of time the packet was queued in
448 the driver before it was sent to the firmware.
449 The delay is then sent along with the packet to the
450 firmware for aggregate delay calculation for stats and
451 MSDU lifetime expiry.
452 */
453 do_gettimeofday(&tv);
454 skb->tstamp = timeval_to_ktime(tv);
455}
456
457/*
458 * CFG802.11 network device handler for open. 416 * CFG802.11 network device handler for open.
459 * 417 *
460 * Starts the data queue. 418 * Starts the data queue.
@@ -472,6 +430,14 @@ mwifiex_open(struct net_device *dev)
472static int 430static int
473mwifiex_close(struct net_device *dev) 431mwifiex_close(struct net_device *dev)
474{ 432{
433 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
434
435 if (priv->scan_request) {
436 dev_dbg(priv->adapter->dev, "aborting scan on ndo_stop\n");
437 cfg80211_scan_done(priv->scan_request, 1);
438 priv->scan_request = NULL;
439 }
440
475 return 0; 441 return 0;
476} 442}
477 443
@@ -480,17 +446,23 @@ mwifiex_close(struct net_device *dev)
480 */ 446 */
481int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) 447int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
482{ 448{
483 mwifiex_wmm_add_buf_txqueue(priv, skb); 449 struct netdev_queue *txq;
450 int index = mwifiex_1d_to_wmm_queue[skb->priority];
451
452 if (atomic_inc_return(&priv->wmm_tx_pending[index]) >= MAX_TX_PENDING) {
453 txq = netdev_get_tx_queue(priv->netdev, index);
454 if (!netif_tx_queue_stopped(txq)) {
455 netif_tx_stop_queue(txq);
456 dev_dbg(priv->adapter->dev, "stop queue: %d\n", index);
457 }
458 }
459
484 atomic_inc(&priv->adapter->tx_pending); 460 atomic_inc(&priv->adapter->tx_pending);
461 mwifiex_wmm_add_buf_txqueue(priv, skb);
485 462
486 if (priv->adapter->scan_delay_cnt) 463 if (priv->adapter->scan_delay_cnt)
487 atomic_set(&priv->adapter->is_tx_received, true); 464 atomic_set(&priv->adapter->is_tx_received, true);
488 465
489 if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
490 mwifiex_set_trans_start(priv->netdev);
491 mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
492 }
493
494 queue_work(priv->adapter->workqueue, &priv->adapter->main_work); 466 queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
495 467
496 return 0; 468 return 0;
@@ -505,6 +477,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
505 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 477 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
506 struct sk_buff *new_skb; 478 struct sk_buff *new_skb;
507 struct mwifiex_txinfo *tx_info; 479 struct mwifiex_txinfo *tx_info;
480 struct timeval tv;
508 481
509 dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n", 482 dev_dbg(priv->adapter->dev, "data: %lu BSS(%d-%d): Data <= kernel\n",
510 jiffies, priv->bss_type, priv->bss_num); 483 jiffies, priv->bss_type, priv->bss_num);
@@ -542,7 +515,16 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
542 tx_info = MWIFIEX_SKB_TXCB(skb); 515 tx_info = MWIFIEX_SKB_TXCB(skb);
543 tx_info->bss_num = priv->bss_num; 516 tx_info->bss_num = priv->bss_num;
544 tx_info->bss_type = priv->bss_type; 517 tx_info->bss_type = priv->bss_type;
545 mwifiex_fill_buffer(skb); 518
519 /* Record the current time the packet was queued; used to
520 * determine the amount of time the packet was queued in
521 * the driver before it was sent to the firmware.
522 * The delay is then sent along with the packet to the
523 * firmware for aggregate delay calculation for stats and
524 * MSDU lifetime expiry.
525 */
526 do_gettimeofday(&tv);
527 skb->tstamp = timeval_to_ktime(tv);
546 528
547 mwifiex_queue_tx_pkt(priv, skb); 529 mwifiex_queue_tx_pkt(priv, skb);
548 530
@@ -622,6 +604,13 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
622 return &priv->stats; 604 return &priv->stats;
623} 605}
624 606
607static u16
608mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb)
609{
610 skb->priority = cfg80211_classify8021d(skb);
611 return mwifiex_1d_to_wmm_queue[skb->priority];
612}
613
625/* Network device handlers */ 614/* Network device handlers */
626static const struct net_device_ops mwifiex_netdev_ops = { 615static const struct net_device_ops mwifiex_netdev_ops = {
627 .ndo_open = mwifiex_open, 616 .ndo_open = mwifiex_open,
@@ -631,6 +620,7 @@ static const struct net_device_ops mwifiex_netdev_ops = {
631 .ndo_tx_timeout = mwifiex_tx_timeout, 620 .ndo_tx_timeout = mwifiex_tx_timeout,
632 .ndo_get_stats = mwifiex_get_stats, 621 .ndo_get_stats = mwifiex_get_stats,
633 .ndo_set_rx_mode = mwifiex_set_multicast_list, 622 .ndo_set_rx_mode = mwifiex_set_multicast_list,
623 .ndo_select_queue = mwifiex_netdev_select_wmm_queue,
634}; 624};
635 625
636/* 626/*
@@ -830,9 +820,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
830 for (i = 0; i < adapter->priv_num; i++) { 820 for (i = 0; i < adapter->priv_num; i++) {
831 priv = adapter->priv[i]; 821 priv = adapter->priv[i];
832 if (priv && priv->netdev) { 822 if (priv && priv->netdev) {
833 if (!netif_queue_stopped(priv->netdev)) 823 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
834 mwifiex_stop_net_dev_queue(priv->netdev,
835 adapter);
836 if (netif_carrier_ok(priv->netdev)) 824 if (netif_carrier_ok(priv->netdev))
837 netif_carrier_off(priv->netdev); 825 netif_carrier_off(priv->netdev);
838 } 826 }
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index c2d0ab146af5..1b3cfc821940 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -115,8 +115,6 @@ enum {
115#define MWIFIEX_TYPE_DATA 0 115#define MWIFIEX_TYPE_DATA 0
116#define MWIFIEX_TYPE_EVENT 3 116#define MWIFIEX_TYPE_EVENT 3
117 117
118#define DBG_CMD_NUM 5
119
120#define MAX_BITMAP_RATES_SIZE 10 118#define MAX_BITMAP_RATES_SIZE 10
121 119
122#define MAX_CHANNEL_BAND_BG 14 120#define MAX_CHANNEL_BAND_BG 14
@@ -373,7 +371,6 @@ struct wps {
373struct mwifiex_roc_cfg { 371struct mwifiex_roc_cfg {
374 u64 cookie; 372 u64 cookie;
375 struct ieee80211_channel chan; 373 struct ieee80211_channel chan;
376 enum nl80211_channel_type chan_type;
377}; 374};
378 375
379struct mwifiex_adapter; 376struct mwifiex_adapter;
@@ -442,6 +439,7 @@ struct mwifiex_private {
442 u8 wmm_enabled; 439 u8 wmm_enabled;
443 u8 wmm_qosinfo; 440 u8 wmm_qosinfo;
444 struct mwifiex_wmm_desc wmm; 441 struct mwifiex_wmm_desc wmm;
442 atomic_t wmm_tx_pending[IEEE80211_NUM_ACS];
445 struct list_head sta_list; 443 struct list_head sta_list;
446 /* spin lock for associated station list */ 444 /* spin lock for associated station list */
447 spinlock_t sta_list_spinlock; 445 spinlock_t sta_list_spinlock;
@@ -484,7 +482,6 @@ struct mwifiex_private {
484 u8 nick_name[16]; 482 u8 nick_name[16];
485 u16 current_key_index; 483 u16 current_key_index;
486 struct semaphore async_sem; 484 struct semaphore async_sem;
487 u8 scan_pending_on_block;
488 u8 report_scan_result; 485 u8 report_scan_result;
489 struct cfg80211_scan_request *scan_request; 486 struct cfg80211_scan_request *scan_request;
490 struct mwifiex_user_scan_cfg *user_scan_cfg; 487 struct mwifiex_user_scan_cfg *user_scan_cfg;
@@ -603,6 +600,7 @@ struct mwifiex_if_ops {
603 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); 600 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
604 int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *); 601 int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
605 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); 602 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
603 void (*card_reset) (struct mwifiex_adapter *);
606}; 604};
607 605
608struct mwifiex_adapter { 606struct mwifiex_adapter {
@@ -750,9 +748,9 @@ int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
750 748
751int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *); 749int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
752 750
753int mwifiex_recv_packet(struct mwifiex_adapter *, struct sk_buff *skb); 751int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
754 752
755int mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter, 753int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
756 struct sk_buff *skb); 754 struct sk_buff *skb);
757 755
758int mwifiex_process_event(struct mwifiex_adapter *adapter); 756int mwifiex_process_event(struct mwifiex_adapter *adapter);
@@ -791,7 +789,7 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
791 struct mwifiex_tx_param *tx_param); 789 struct mwifiex_tx_param *tx_param);
792int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags); 790int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags);
793int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, 791int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
794 struct sk_buff *skb, int status); 792 struct sk_buff *skb, int aggr, int status);
795void mwifiex_clean_txrx(struct mwifiex_private *priv); 793void mwifiex_clean_txrx(struct mwifiex_private *priv);
796u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv); 794u8 mwifiex_check_last_packet_indication(struct mwifiex_private *priv);
797void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter); 795void mwifiex_check_ps_cond(struct mwifiex_adapter *adapter);
@@ -809,7 +807,7 @@ void mwifiex_hs_activated_event(struct mwifiex_private *priv,
809 u8 activated); 807 u8 activated);
810int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv, 808int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
811 struct host_cmd_ds_command *resp); 809 struct host_cmd_ds_command *resp);
812int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter, 810int mwifiex_process_rx_packet(struct mwifiex_private *priv,
813 struct sk_buff *skb); 811 struct sk_buff *skb);
814int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no, 812int mwifiex_sta_prepare_cmd(struct mwifiex_private *, uint16_t cmd_no,
815 u16 cmd_action, u32 cmd_oid, 813 u16 cmd_action, u32 cmd_oid,
@@ -819,9 +817,9 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
819 void *data_buf, void *cmd_buf); 817 void *data_buf, void *cmd_buf);
820int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no, 818int mwifiex_process_sta_cmdresp(struct mwifiex_private *, u16 cmdresp_no,
821 struct host_cmd_ds_command *resp); 819 struct host_cmd_ds_command *resp);
822int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *, 820int mwifiex_process_sta_rx_packet(struct mwifiex_private *,
823 struct sk_buff *skb); 821 struct sk_buff *skb);
824int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter, 822int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
825 struct sk_buff *skb); 823 struct sk_buff *skb);
826int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, 824int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
827 struct sk_buff *skb); 825 struct sk_buff *skb);
@@ -1019,7 +1017,6 @@ int mwifiex_get_ver_ext(struct mwifiex_private *priv);
1019 1017
1020int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, 1018int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1021 struct ieee80211_channel *chan, 1019 struct ieee80211_channel *chan,
1022 enum nl80211_channel_type *channel_type,
1023 unsigned int duration); 1020 unsigned int duration);
1024 1021
1025int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role); 1022int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 9171aaedbccd..9189a32b7844 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -153,7 +153,7 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
153 153
154 if (((bss_desc->bcn_wpa_ie) && 154 if (((bss_desc->bcn_wpa_ie) &&
155 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id == 155 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
156 WLAN_EID_WPA))) { 156 WLAN_EID_VENDOR_SPECIFIC))) {
157 iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data; 157 iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
158 oui = &mwifiex_wpa_oui[cipher][0]; 158 oui = &mwifiex_wpa_oui[cipher][0];
159 ret = mwifiex_search_oui_in_ie(iebody, oui); 159 ret = mwifiex_search_oui_in_ie(iebody, oui);
@@ -202,7 +202,7 @@ mwifiex_is_bss_no_sec(struct mwifiex_private *priv,
202 if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && 202 if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
203 !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) || 203 !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
204 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != 204 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
205 WLAN_EID_WPA)) && 205 WLAN_EID_VENDOR_SPECIFIC)) &&
206 ((!bss_desc->bcn_rsn_ie) || 206 ((!bss_desc->bcn_rsn_ie) ||
207 ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != 207 ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
208 WLAN_EID_RSN)) && 208 WLAN_EID_RSN)) &&
@@ -237,7 +237,8 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv,
237{ 237{
238 if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled && 238 if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
239 !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) && 239 !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
240 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id == WLAN_EID_WPA)) 240 ((*(bss_desc->bcn_wpa_ie)).
241 vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC))
241 /* 242 /*
242 * Privacy bit may NOT be set in some APs like 243 * Privacy bit may NOT be set in some APs like
243 * LinkSys WRT54G && bss_desc->privacy 244 * LinkSys WRT54G && bss_desc->privacy
@@ -309,7 +310,8 @@ mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv,
309 if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && 310 if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
310 !priv->sec_info.wpa2_enabled && 311 !priv->sec_info.wpa2_enabled &&
311 ((!bss_desc->bcn_wpa_ie) || 312 ((!bss_desc->bcn_wpa_ie) ||
312 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) && 313 ((*(bss_desc->bcn_wpa_ie)).
314 vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
313 ((!bss_desc->bcn_rsn_ie) || 315 ((!bss_desc->bcn_rsn_ie) ||
314 ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && 316 ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
315 !priv->sec_info.encryption_mode && bss_desc->privacy) { 317 !priv->sec_info.encryption_mode && bss_desc->privacy) {
@@ -329,7 +331,8 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
329 if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && 331 if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
330 !priv->sec_info.wpa2_enabled && 332 !priv->sec_info.wpa2_enabled &&
331 ((!bss_desc->bcn_wpa_ie) || 333 ((!bss_desc->bcn_wpa_ie) ||
332 ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) && 334 ((*(bss_desc->bcn_wpa_ie)).
335 vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
333 ((!bss_desc->bcn_rsn_ie) || 336 ((!bss_desc->bcn_rsn_ie) ||
334 ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && 337 ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
335 priv->sec_info.encryption_mode && bss_desc->privacy) { 338 priv->sec_info.encryption_mode && bss_desc->privacy) {
@@ -938,6 +941,11 @@ mwifiex_config_scan(struct mwifiex_private *priv,
938 chan_idx)->chan_scan_mode_bitmap 941 chan_idx)->chan_scan_mode_bitmap
939 &= ~MWIFIEX_PASSIVE_SCAN; 942 &= ~MWIFIEX_PASSIVE_SCAN;
940 943
944 if (*filtered_scan)
945 (scan_chan_list +
946 chan_idx)->chan_scan_mode_bitmap
947 |= MWIFIEX_DISABLE_CHAN_FILT;
948
941 if (user_scan_in->chan_list[chan_idx].scan_time) { 949 if (user_scan_in->chan_list[chan_idx].scan_time) {
942 scan_dur = (u16) user_scan_in-> 950 scan_dur = (u16) user_scan_in->
943 chan_list[chan_idx].scan_time; 951 chan_list[chan_idx].scan_time;
@@ -1759,26 +1767,39 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1759 } 1767 }
1760 if (priv->report_scan_result) 1768 if (priv->report_scan_result)
1761 priv->report_scan_result = false; 1769 priv->report_scan_result = false;
1762 if (priv->scan_pending_on_block) {
1763 priv->scan_pending_on_block = false;
1764 up(&priv->async_sem);
1765 }
1766 1770
1767 if (priv->user_scan_cfg) { 1771 if (priv->user_scan_cfg) {
1768 dev_dbg(priv->adapter->dev, 1772 if (priv->scan_request) {
1769 "info: %s: sending scan results\n", __func__); 1773 dev_dbg(priv->adapter->dev,
1770 cfg80211_scan_done(priv->scan_request, 0); 1774 "info: notifying scan done\n");
1771 priv->scan_request = NULL; 1775 cfg80211_scan_done(priv->scan_request, 0);
1776 priv->scan_request = NULL;
1777 } else {
1778 dev_dbg(priv->adapter->dev,
1779 "info: scan already aborted\n");
1780 }
1781
1772 kfree(priv->user_scan_cfg); 1782 kfree(priv->user_scan_cfg);
1773 priv->user_scan_cfg = NULL; 1783 priv->user_scan_cfg = NULL;
1774 } 1784 }
1775 } else { 1785 } else {
1776 if (!mwifiex_wmm_lists_empty(adapter)) { 1786 if (priv->user_scan_cfg && !priv->scan_request) {
1787 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1788 flags);
1789 adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
1790 mod_timer(&priv->scan_delay_timer, jiffies);
1791 dev_dbg(priv->adapter->dev,
1792 "info: %s: triggerring scan abort\n", __func__);
1793 } else if (!mwifiex_wmm_lists_empty(adapter) &&
1794 (priv->scan_request && (priv->scan_request->flags &
1795 NL80211_SCAN_FLAG_LOW_PRIORITY))) {
1777 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, 1796 spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
1778 flags); 1797 flags);
1779 adapter->scan_delay_cnt = 1; 1798 adapter->scan_delay_cnt = 1;
1780 mod_timer(&priv->scan_delay_timer, jiffies + 1799 mod_timer(&priv->scan_delay_timer, jiffies +
1781 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC)); 1800 msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
1801 dev_dbg(priv->adapter->dev,
1802 "info: %s: deferring scan\n", __func__);
1782 } else { 1803 } else {
1783 /* Get scan command from scan_pending_q and put to 1804 /* Get scan command from scan_pending_q and put to
1784 cmd_pending_q */ 1805 cmd_pending_q */
@@ -1891,7 +1912,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
1891 __func__); 1912 __func__);
1892 return -1; 1913 return -1;
1893 } 1914 }
1894 priv->scan_pending_on_block = true;
1895 1915
1896 priv->adapter->scan_wait_q_woken = false; 1916 priv->adapter->scan_wait_q_woken = false;
1897 1917
@@ -1905,10 +1925,7 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
1905 if (!ret) 1925 if (!ret)
1906 ret = mwifiex_wait_queue_complete(priv->adapter); 1926 ret = mwifiex_wait_queue_complete(priv->adapter);
1907 1927
1908 if (ret == -1) { 1928 up(&priv->async_sem);
1909 priv->scan_pending_on_block = false;
1910 up(&priv->async_sem);
1911 }
1912 1929
1913 return ret; 1930 return ret;
1914} 1931}
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 82cf0fa2d9f6..5a1c1d0e5599 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -906,8 +906,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
906/* 906/*
907 * SDIO interrupt handler. 907 * SDIO interrupt handler.
908 * 908 *
909 * This function reads the interrupt status from firmware and assigns 909 * This function reads the interrupt status from firmware and handles
910 * the main process in workqueue which will handle the interrupt. 910 * the interrupt in current thread (ksdioirqd) right away.
911 */ 911 */
912static void 912static void
913mwifiex_sdio_interrupt(struct sdio_func *func) 913mwifiex_sdio_interrupt(struct sdio_func *func)
@@ -930,7 +930,7 @@ mwifiex_sdio_interrupt(struct sdio_func *func)
930 adapter->ps_state = PS_STATE_AWAKE; 930 adapter->ps_state = PS_STATE_AWAKE;
931 931
932 mwifiex_interrupt_status(adapter); 932 mwifiex_interrupt_status(adapter);
933 queue_work(adapter->workqueue, &adapter->main_work); 933 mwifiex_main_process(adapter);
934} 934}
935 935
936/* 936/*
@@ -1749,6 +1749,37 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
1749 port, card->mp_data_port_mask); 1749 port, card->mp_data_port_mask);
1750} 1750}
1751 1751
1752static struct mmc_host *reset_host;
1753static void sdio_card_reset_worker(struct work_struct *work)
1754{
1755 /* The actual reset operation must be run outside of driver thread.
1756 * This is because mmc_remove_host() will cause the device to be
1757 * instantly destroyed, and the driver then needs to end its thread,
1758 * leading to a deadlock.
1759 *
1760 * We run it in a totally independent workqueue.
1761 */
1762
1763 pr_err("Resetting card...\n");
1764 mmc_remove_host(reset_host);
1765 /* 20ms delay is based on experiment with sdhci controller */
1766 mdelay(20);
1767 mmc_add_host(reset_host);
1768}
1769static DECLARE_WORK(card_reset_work, sdio_card_reset_worker);
1770
1771/* This function resets the card */
1772static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
1773{
1774 struct sdio_mmc_card *card = adapter->card;
1775
1776 if (work_pending(&card_reset_work))
1777 return;
1778
1779 reset_host = card->func->card->host;
1780 schedule_work(&card_reset_work);
1781}
1782
1752static struct mwifiex_if_ops sdio_ops = { 1783static struct mwifiex_if_ops sdio_ops = {
1753 .init_if = mwifiex_init_sdio, 1784 .init_if = mwifiex_init_sdio,
1754 .cleanup_if = mwifiex_cleanup_sdio, 1785 .cleanup_if = mwifiex_cleanup_sdio,
@@ -1767,6 +1798,7 @@ static struct mwifiex_if_ops sdio_ops = {
1767 .cleanup_mpa_buf = mwifiex_cleanup_mpa_buf, 1798 .cleanup_mpa_buf = mwifiex_cleanup_mpa_buf,
1768 .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete, 1799 .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete,
1769 .event_complete = mwifiex_sdio_event_complete, 1800 .event_complete = mwifiex_sdio_event_complete,
1801 .card_reset = mwifiex_sdio_card_reset,
1770}; 1802};
1771 1803
1772/* 1804/*
@@ -1804,6 +1836,7 @@ mwifiex_sdio_cleanup_module(void)
1804 /* Set the flag as user is removing this module. */ 1836 /* Set the flag as user is removing this module. */
1805 user_rmmod = 1; 1837 user_rmmod = 1;
1806 1838
1839 cancel_work_sync(&card_reset_work);
1807 sdio_unregister_driver(&mwifiex_sdio); 1840 sdio_unregister_driver(&mwifiex_sdio);
1808} 1841}
1809 1842
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 21033738ef0c..8cc5468654b4 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -25,6 +25,7 @@
25#include <linux/mmc/sdio_ids.h> 25#include <linux/mmc/sdio_ids.h>
26#include <linux/mmc/sdio_func.h> 26#include <linux/mmc/sdio_func.h>
27#include <linux/mmc/card.h> 27#include <linux/mmc/card.h>
28#include <linux/mmc/host.h>
28 29
29#include "main.h" 30#include "main.h"
30 31
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 09e6a267f566..65c12eb3e5e7 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -85,10 +85,6 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
85 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); 85 spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
86 if (priv->report_scan_result) 86 if (priv->report_scan_result)
87 priv->report_scan_result = false; 87 priv->report_scan_result = false;
88 if (priv->scan_pending_on_block) {
89 priv->scan_pending_on_block = false;
90 up(&priv->async_sem);
91 }
92 break; 88 break;
93 89
94 case HostCmd_CMD_MAC_CONTROL: 90 case HostCmd_CMD_MAC_CONTROL:
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8132119e1a21..41aafc7454ed 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -124,8 +124,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
124 } 124 }
125 memset(priv->cfg_bssid, 0, ETH_ALEN); 125 memset(priv->cfg_bssid, 0, ETH_ALEN);
126 126
127 if (!netif_queue_stopped(priv->netdev)) 127 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
128 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
129 if (netif_carrier_ok(priv->netdev)) 128 if (netif_carrier_ok(priv->netdev))
130 netif_carrier_off(priv->netdev); 129 netif_carrier_off(priv->netdev);
131} 130}
@@ -197,8 +196,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
197 dev_dbg(adapter->dev, "event: LINK_SENSED\n"); 196 dev_dbg(adapter->dev, "event: LINK_SENSED\n");
198 if (!netif_carrier_ok(priv->netdev)) 197 if (!netif_carrier_ok(priv->netdev))
199 netif_carrier_on(priv->netdev); 198 netif_carrier_on(priv->netdev);
200 if (netif_queue_stopped(priv->netdev)) 199 mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
201 mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
202 break; 200 break;
203 201
204 case EVENT_DEAUTHENTICATED: 202 case EVENT_DEAUTHENTICATED:
@@ -306,8 +304,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
306 dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n"); 304 dev_dbg(adapter->dev, "event: ADHOC_BCN_LOST\n");
307 priv->adhoc_is_link_sensed = false; 305 priv->adhoc_is_link_sensed = false;
308 mwifiex_clean_txrx(priv); 306 mwifiex_clean_txrx(priv);
309 if (!netif_queue_stopped(priv->netdev)) 307 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
310 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
311 if (netif_carrier_ok(priv->netdev)) 308 if (netif_carrier_ok(priv->netdev))
312 netif_carrier_off(priv->netdev); 309 netif_carrier_off(priv->netdev);
313 break; 310 break;
@@ -424,7 +421,6 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
424 cfg80211_remain_on_channel_expired(priv->wdev, 421 cfg80211_remain_on_channel_expired(priv->wdev,
425 priv->roc_cfg.cookie, 422 priv->roc_cfg.cookie,
426 &priv->roc_cfg.chan, 423 &priv->roc_cfg.chan,
427 priv->roc_cfg.chan_type,
428 GFP_ATOMIC); 424 GFP_ATOMIC);
429 425
430 memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg)); 426 memset(&priv->roc_cfg, 0x00, sizeof(struct mwifiex_roc_cfg));
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 0c9f70b2cbe6..cb682561c438 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -160,10 +160,21 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
160{ 160{
161 int ret; 161 int ret;
162 u8 *beacon_ie; 162 u8 *beacon_ie;
163 size_t beacon_ie_len;
163 struct mwifiex_bss_priv *bss_priv = (void *)bss->priv; 164 struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
165 const struct cfg80211_bss_ies *ies;
166
167 rcu_read_lock();
168 ies = rcu_dereference(bss->ies);
169 if (WARN_ON(!ies)) {
170 /* should never happen */
171 rcu_read_unlock();
172 return -EINVAL;
173 }
174 beacon_ie = kmemdup(ies->data, ies->len, GFP_ATOMIC);
175 beacon_ie_len = ies->len;
176 rcu_read_unlock();
164 177
165 beacon_ie = kmemdup(bss->information_elements, bss->len_beacon_ies,
166 GFP_KERNEL);
167 if (!beacon_ie) { 178 if (!beacon_ie) {
168 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); 179 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
169 return -ENOMEM; 180 return -ENOMEM;
@@ -172,7 +183,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
172 memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN); 183 memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN);
173 bss_desc->rssi = bss->signal; 184 bss_desc->rssi = bss->signal;
174 bss_desc->beacon_buf = beacon_ie; 185 bss_desc->beacon_buf = beacon_ie;
175 bss_desc->beacon_buf_size = bss->len_beacon_ies; 186 bss_desc->beacon_buf_size = beacon_ie_len;
176 bss_desc->beacon_period = bss->beacon_interval; 187 bss_desc->beacon_period = bss->beacon_interval;
177 bss_desc->cap_info_bitmap = bss->capability; 188 bss_desc->cap_info_bitmap = bss->capability;
178 bss_desc->bss_band = bss_priv->band; 189 bss_desc->bss_band = bss_priv->band;
@@ -198,18 +209,23 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
198static int mwifiex_process_country_ie(struct mwifiex_private *priv, 209static int mwifiex_process_country_ie(struct mwifiex_private *priv,
199 struct cfg80211_bss *bss) 210 struct cfg80211_bss *bss)
200{ 211{
201 u8 *country_ie, country_ie_len; 212 const u8 *country_ie;
213 u8 country_ie_len;
202 struct mwifiex_802_11d_domain_reg *domain_info = 214 struct mwifiex_802_11d_domain_reg *domain_info =
203 &priv->adapter->domain_reg; 215 &priv->adapter->domain_reg;
204 216
205 country_ie = (u8 *)ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY); 217 rcu_read_lock();
206 218 country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
207 if (!country_ie) 219 if (!country_ie) {
220 rcu_read_unlock();
208 return 0; 221 return 0;
222 }
209 223
210 country_ie_len = country_ie[1]; 224 country_ie_len = country_ie[1];
211 if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) 225 if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) {
226 rcu_read_unlock();
212 return 0; 227 return 0;
228 }
213 229
214 domain_info->country_code[0] = country_ie[2]; 230 domain_info->country_code[0] = country_ie[2];
215 domain_info->country_code[1] = country_ie[3]; 231 domain_info->country_code[1] = country_ie[3];
@@ -223,6 +239,8 @@ static int mwifiex_process_country_ie(struct mwifiex_private *priv,
223 memcpy((u8 *)domain_info->triplet, 239 memcpy((u8 *)domain_info->triplet,
224 &country_ie[2] + IEEE80211_COUNTRY_STRING_LEN, country_ie_len); 240 &country_ie[2] + IEEE80211_COUNTRY_STRING_LEN, country_ie_len);
225 241
242 rcu_read_unlock();
243
226 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, 244 if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO,
227 HostCmd_ACT_GEN_SET, 0, NULL)) { 245 HostCmd_ACT_GEN_SET, 0, NULL)) {
228 wiphy_err(priv->adapter->wiphy, 246 wiphy_err(priv->adapter->wiphy,
@@ -276,8 +294,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
276 dev_dbg(adapter->dev, "info: SSID found in scan list ... " 294 dev_dbg(adapter->dev, "info: SSID found in scan list ... "
277 "associating...\n"); 295 "associating...\n");
278 296
279 if (!netif_queue_stopped(priv->netdev)) 297 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
280 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
281 if (netif_carrier_ok(priv->netdev)) 298 if (netif_carrier_ok(priv->netdev))
282 netif_carrier_off(priv->netdev); 299 netif_carrier_off(priv->netdev);
283 300
@@ -318,8 +335,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
318 335
319 ret = mwifiex_check_network_compatibility(priv, bss_desc); 336 ret = mwifiex_check_network_compatibility(priv, bss_desc);
320 337
321 if (!netif_queue_stopped(priv->netdev)) 338 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
322 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
323 if (netif_carrier_ok(priv->netdev)) 339 if (netif_carrier_ok(priv->netdev))
324 netif_carrier_off(priv->netdev); 340 netif_carrier_off(priv->netdev);
325 341
@@ -463,7 +479,7 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
463 } 479 }
464 480
465 if (adapter->hs_activated) { 481 if (adapter->hs_activated) {
466 dev_dbg(adapter->dev, "cmd: HS Already actived\n"); 482 dev_dbg(adapter->dev, "cmd: HS Already activated\n");
467 return true; 483 return true;
468 } 484 }
469 485
@@ -713,7 +729,7 @@ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
713 dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n", 729 dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n",
714 priv->wpa_ie_len, priv->wpa_ie[0]); 730 priv->wpa_ie_len, priv->wpa_ie[0]);
715 731
716 if (priv->wpa_ie[0] == WLAN_EID_WPA) { 732 if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
717 priv->sec_info.wpa_enabled = true; 733 priv->sec_info.wpa_enabled = true;
718 } else if (priv->wpa_ie[0] == WLAN_EID_RSN) { 734 } else if (priv->wpa_ie[0] == WLAN_EID_RSN) {
719 priv->sec_info.wpa2_enabled = true; 735 priv->sec_info.wpa2_enabled = true;
@@ -1046,7 +1062,6 @@ mwifiex_get_ver_ext(struct mwifiex_private *priv)
1046int 1062int
1047mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, 1063mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1048 struct ieee80211_channel *chan, 1064 struct ieee80211_channel *chan,
1049 enum nl80211_channel_type *ct,
1050 unsigned int duration) 1065 unsigned int duration)
1051{ 1066{
1052 struct host_cmd_ds_remain_on_chan roc_cfg; 1067 struct host_cmd_ds_remain_on_chan roc_cfg;
@@ -1056,7 +1071,7 @@ mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
1056 roc_cfg.action = cpu_to_le16(action); 1071 roc_cfg.action = cpu_to_le16(action);
1057 if (action == HostCmd_ACT_GEN_SET) { 1072 if (action == HostCmd_ACT_GEN_SET) {
1058 roc_cfg.band_cfg = chan->band; 1073 roc_cfg.band_cfg = chan->band;
1059 sc = mwifiex_chan_type_to_sec_chan_offset(*ct); 1074 sc = mwifiex_chan_type_to_sec_chan_offset(NL80211_CHAN_NO_HT);
1060 roc_cfg.band_cfg |= (sc << 2); 1075 roc_cfg.band_cfg |= (sc << 2);
1061 1076
1062 roc_cfg.channel = 1077 roc_cfg.channel =
@@ -1253,7 +1268,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
1253 } 1268 }
1254 pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr; 1269 pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
1255 /* Test to see if it is a WPA IE, if not, then it is a gen IE */ 1270 /* Test to see if it is a WPA IE, if not, then it is a gen IE */
1256 if (((pvendor_ie->element_id == WLAN_EID_WPA) && 1271 if (((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) &&
1257 (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui)))) || 1272 (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui)))) ||
1258 (pvendor_ie->element_id == WLAN_EID_RSN)) { 1273 (pvendor_ie->element_id == WLAN_EID_RSN)) {
1259 1274
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 07d32b73783e..b5c109504393 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -38,14 +38,10 @@
38 * 38 *
39 * The completion callback is called after processing in complete. 39 * The completion callback is called after processing in complete.
40 */ 40 */
41int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter, 41int mwifiex_process_rx_packet(struct mwifiex_private *priv,
42 struct sk_buff *skb) 42 struct sk_buff *skb)
43{ 43{
44 int ret; 44 int ret;
45 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
46 struct mwifiex_private *priv =
47 mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
48 rx_info->bss_type);
49 struct rx_packet_hdr *rx_pkt_hdr; 45 struct rx_packet_hdr *rx_pkt_hdr;
50 struct rxpd *local_rx_pd; 46 struct rxpd *local_rx_pd;
51 int hdr_chop; 47 int hdr_chop;
@@ -98,9 +94,9 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
98 94
99 priv->rxpd_htinfo = local_rx_pd->ht_info; 95 priv->rxpd_htinfo = local_rx_pd->ht_info;
100 96
101 ret = mwifiex_recv_packet(adapter, skb); 97 ret = mwifiex_recv_packet(priv, skb);
102 if (ret == -1) 98 if (ret == -1)
103 dev_err(adapter->dev, "recv packet failed\n"); 99 dev_err(priv->adapter->dev, "recv packet failed\n");
104 100
105 return ret; 101 return ret;
106} 102}
@@ -117,21 +113,15 @@ int mwifiex_process_rx_packet(struct mwifiex_adapter *adapter,
117 * 113 *
118 * The completion callback is called after processing in complete. 114 * The completion callback is called after processing in complete.
119 */ 115 */
120int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter, 116int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
121 struct sk_buff *skb) 117 struct sk_buff *skb)
122{ 118{
119 struct mwifiex_adapter *adapter = priv->adapter;
123 int ret = 0; 120 int ret = 0;
124 struct rxpd *local_rx_pd; 121 struct rxpd *local_rx_pd;
125 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
126 struct rx_packet_hdr *rx_pkt_hdr; 122 struct rx_packet_hdr *rx_pkt_hdr;
127 u8 ta[ETH_ALEN]; 123 u8 ta[ETH_ALEN];
128 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num; 124 u16 rx_pkt_type, rx_pkt_offset, rx_pkt_length, seq_num;
129 struct mwifiex_private *priv =
130 mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
131 rx_info->bss_type);
132
133 if (!priv)
134 return -1;
135 125
136 local_rx_pd = (struct rxpd *) (skb->data); 126 local_rx_pd = (struct rxpd *) (skb->data);
137 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type); 127 rx_pkt_type = le16_to_cpu(local_rx_pd->rx_pkt_type);
@@ -169,13 +159,13 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
169 159
170 while (!skb_queue_empty(&list)) { 160 while (!skb_queue_empty(&list)) {
171 rx_skb = __skb_dequeue(&list); 161 rx_skb = __skb_dequeue(&list);
172 ret = mwifiex_recv_packet(adapter, rx_skb); 162 ret = mwifiex_recv_packet(priv, rx_skb);
173 if (ret == -1) 163 if (ret == -1)
174 dev_err(adapter->dev, "Rx of A-MSDU failed"); 164 dev_err(adapter->dev, "Rx of A-MSDU failed");
175 } 165 }
176 return 0; 166 return 0;
177 } else if (rx_pkt_type == PKT_TYPE_MGMT) { 167 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
178 ret = mwifiex_process_mgmt_packet(adapter, skb); 168 ret = mwifiex_process_mgmt_packet(priv, skb);
179 if (ret) 169 if (ret)
180 dev_err(adapter->dev, "Rx of mgmt packet failed"); 170 dev_err(adapter->dev, "Rx of mgmt packet failed");
181 dev_kfree_skb_any(skb); 171 dev_kfree_skb_any(skb);
@@ -188,7 +178,7 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
188 */ 178 */
189 if (!IS_11N_ENABLED(priv) || 179 if (!IS_11N_ENABLED(priv) ||
190 memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) { 180 memcmp(priv->curr_addr, rx_pkt_hdr->eth803_hdr.h_dest, ETH_ALEN)) {
191 mwifiex_process_rx_packet(adapter, skb); 181 mwifiex_process_rx_packet(priv, skb);
192 return ret; 182 return ret;
193 } 183 }
194 184
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 2af263992e83..8c80024c30ff 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -48,13 +48,19 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
48 if (!priv) 48 if (!priv)
49 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 49 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
50 50
51 if (!priv) {
52 dev_err(adapter->dev, "data: priv not found. Drop RX packet\n");
53 dev_kfree_skb_any(skb);
54 return -1;
55 }
56
51 rx_info->bss_num = priv->bss_num; 57 rx_info->bss_num = priv->bss_num;
52 rx_info->bss_type = priv->bss_type; 58 rx_info->bss_type = priv->bss_type;
53 59
54 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) 60 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
55 return mwifiex_process_uap_rx_packet(adapter, skb); 61 return mwifiex_process_uap_rx_packet(priv, skb);
56 62
57 return mwifiex_process_sta_rx_packet(adapter, skb); 63 return mwifiex_process_sta_rx_packet(priv, skb);
58} 64}
59EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet); 65EXPORT_SYMBOL_GPL(mwifiex_handle_rx_packet);
60 66
@@ -115,13 +121,13 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
115 dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", 121 dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n",
116 ret); 122 ret);
117 adapter->dbg.num_tx_host_to_card_failure++; 123 adapter->dbg.num_tx_host_to_card_failure++;
118 mwifiex_write_data_complete(adapter, skb, ret); 124 mwifiex_write_data_complete(adapter, skb, 0, ret);
119 break; 125 break;
120 case -EINPROGRESS: 126 case -EINPROGRESS:
121 adapter->data_sent = false; 127 adapter->data_sent = false;
122 break; 128 break;
123 case 0: 129 case 0:
124 mwifiex_write_data_complete(adapter, skb, ret); 130 mwifiex_write_data_complete(adapter, skb, 0, ret);
125 break; 131 break;
126 default: 132 default:
127 break; 133 break;
@@ -138,11 +144,12 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
138 * wakes up stalled traffic queue if required, and then frees the buffer. 144 * wakes up stalled traffic queue if required, and then frees the buffer.
139 */ 145 */
140int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, 146int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
141 struct sk_buff *skb, int status) 147 struct sk_buff *skb, int aggr, int status)
142{ 148{
143 struct mwifiex_private *priv, *tpriv; 149 struct mwifiex_private *priv;
144 struct mwifiex_txinfo *tx_info; 150 struct mwifiex_txinfo *tx_info;
145 int i; 151 struct netdev_queue *txq;
152 int index;
146 153
147 if (!skb) 154 if (!skb)
148 return 0; 155 return 0;
@@ -166,15 +173,20 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
166 173
167 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) 174 if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
168 atomic_dec_return(&adapter->pending_bridged_pkts); 175 atomic_dec_return(&adapter->pending_bridged_pkts);
169 if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING) 176
177 if (aggr)
178 /* For skb_aggr, do not wake up tx queue */
170 goto done; 179 goto done;
171 180
172 for (i = 0; i < adapter->priv_num; i++) { 181 atomic_dec(&adapter->tx_pending);
173 tpriv = adapter->priv[i];
174 182
175 if (tpriv->media_connected && 183 index = mwifiex_1d_to_wmm_queue[skb->priority];
176 netif_queue_stopped(tpriv->netdev)) 184 if (atomic_dec_return(&priv->wmm_tx_pending[index]) < LOW_TX_PENDING) {
177 mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter); 185 txq = netdev_get_tx_queue(priv->netdev, index);
186 if (netif_tx_queue_stopped(txq)) {
187 netif_tx_wake_queue(txq);
188 dev_dbg(adapter->dev, "wake queue: %d\n", index);
189 }
178 } 190 }
179done: 191done:
180 dev_kfree_skb_any(skb); 192 dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index d95a2d558fcf..8dd72240f162 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -188,10 +188,19 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
188 int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable); 188 int var_offset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
189 const u8 *var_pos = params->beacon.head + var_offset; 189 const u8 *var_pos = params->beacon.head + var_offset;
190 int len = params->beacon.head_len - var_offset; 190 int len = params->beacon.head_len - var_offset;
191 u8 rate_len = 0;
191 192
192 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); 193 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
193 if (rate_ie) 194 if (rate_ie) {
194 memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); 195 memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
196 rate_len = rate_ie->len;
197 }
198
199 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
200 params->beacon.tail,
201 params->beacon.tail_len);
202 if (rate_ie)
203 memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
195 204
196 return; 205 return;
197} 206}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index a33fa394e349..21c640d3b579 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -235,11 +235,18 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
235 break; 235 break;
236 case EVENT_UAP_BSS_IDLE: 236 case EVENT_UAP_BSS_IDLE:
237 priv->media_connected = false; 237 priv->media_connected = false;
238 if (netif_carrier_ok(priv->netdev))
239 netif_carrier_off(priv->netdev);
240 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
241
238 mwifiex_clean_txrx(priv); 242 mwifiex_clean_txrx(priv);
239 mwifiex_del_all_sta_list(priv); 243 mwifiex_del_all_sta_list(priv);
240 break; 244 break;
241 case EVENT_UAP_BSS_ACTIVE: 245 case EVENT_UAP_BSS_ACTIVE:
242 priv->media_connected = true; 246 priv->media_connected = true;
247 if (!netif_carrier_ok(priv->netdev))
248 netif_carrier_on(priv->netdev);
249 mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
243 break; 250 break;
244 case EVENT_UAP_BSS_START: 251 case EVENT_UAP_BSS_START:
245 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause); 252 dev_dbg(adapter->dev, "AP EVENT: event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 0966ac24b3b4..a018e42d117e 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -146,7 +146,7 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
146 } 146 }
147 147
148 /* Forward unicat/Inter-BSS packets to kernel. */ 148 /* Forward unicat/Inter-BSS packets to kernel. */
149 return mwifiex_process_rx_packet(adapter, skb); 149 return mwifiex_process_rx_packet(priv, skb);
150} 150}
151 151
152/* 152/*
@@ -159,24 +159,17 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
159 * 159 *
160 * The completion callback is called after processing is complete. 160 * The completion callback is called after processing is complete.
161 */ 161 */
162int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter, 162int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
163 struct sk_buff *skb) 163 struct sk_buff *skb)
164{ 164{
165 struct mwifiex_adapter *adapter = priv->adapter;
165 int ret; 166 int ret;
166 struct uap_rxpd *uap_rx_pd; 167 struct uap_rxpd *uap_rx_pd;
167 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
168 struct rx_packet_hdr *rx_pkt_hdr; 168 struct rx_packet_hdr *rx_pkt_hdr;
169 u16 rx_pkt_type; 169 u16 rx_pkt_type;
170 u8 ta[ETH_ALEN], pkt_type; 170 u8 ta[ETH_ALEN], pkt_type;
171 struct mwifiex_sta_node *node; 171 struct mwifiex_sta_node *node;
172 172
173 struct mwifiex_private *priv =
174 mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
175 rx_info->bss_type);
176
177 if (!priv)
178 return -1;
179
180 uap_rx_pd = (struct uap_rxpd *)(skb->data); 173 uap_rx_pd = (struct uap_rxpd *)(skb->data);
181 rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type); 174 rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
182 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); 175 rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -210,7 +203,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
210 203
211 while (!skb_queue_empty(&list)) { 204 while (!skb_queue_empty(&list)) {
212 rx_skb = __skb_dequeue(&list); 205 rx_skb = __skb_dequeue(&list);
213 ret = mwifiex_recv_packet(adapter, rx_skb); 206 ret = mwifiex_recv_packet(priv, rx_skb);
214 if (ret) 207 if (ret)
215 dev_err(adapter->dev, 208 dev_err(adapter->dev,
216 "AP:Rx A-MSDU failed"); 209 "AP:Rx A-MSDU failed");
@@ -218,7 +211,7 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_adapter *adapter,
218 211
219 return 0; 212 return 0;
220 } else if (rx_pkt_type == PKT_TYPE_MGMT) { 213 } else if (rx_pkt_type == PKT_TYPE_MGMT) {
221 ret = mwifiex_process_mgmt_packet(adapter, skb); 214 ret = mwifiex_process_mgmt_packet(priv, skb);
222 if (ret) 215 if (ret)
223 dev_err(adapter->dev, "Rx of mgmt packet failed"); 216 dev_err(adapter->dev, "Rx of mgmt packet failed");
224 dev_kfree_skb_any(skb); 217 dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 22a5916564b8..63ac9f2d11ae 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -238,7 +238,7 @@ static void mwifiex_usb_tx_complete(struct urb *urb)
238 } else { 238 } else {
239 dev_dbg(adapter->dev, "%s: DATA\n", __func__); 239 dev_dbg(adapter->dev, "%s: DATA\n", __func__);
240 atomic_dec(&card->tx_data_urb_pending); 240 atomic_dec(&card->tx_data_urb_pending);
241 mwifiex_write_data_complete(adapter, context->skb, 241 mwifiex_write_data_complete(adapter, context->skb, 0,
242 urb->status ? -1 : 0); 242 urb->status ? -1 : 0);
243 } 243 }
244 244
@@ -351,7 +351,7 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
351 card->udev = udev; 351 card->udev = udev;
352 card->intf = intf; 352 card->intf = intf;
353 353
354 pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocl=%#x\n", 354 pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n",
355 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass, 355 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
356 udev->descriptor.bDeviceSubClass, 356 udev->descriptor.bDeviceSubClass,
357 udev->descriptor.bDeviceProtocol); 357 udev->descriptor.bDeviceProtocol);
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index ae88f80cf86b..0982375ba3b1 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -146,20 +146,16 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv,
146 * to the kernel. 146 * to the kernel.
147 */ 147 */
148int 148int
149mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter, 149mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
150 struct sk_buff *skb) 150 struct sk_buff *skb)
151{ 151{
152 struct rxpd *rx_pd; 152 struct rxpd *rx_pd;
153 struct mwifiex_private *priv;
154 u16 pkt_len; 153 u16 pkt_len;
155 154
156 if (!skb) 155 if (!skb)
157 return -1; 156 return -1;
158 157
159 rx_pd = (struct rxpd *)skb->data; 158 rx_pd = (struct rxpd *)skb->data;
160 priv = mwifiex_get_priv_by_id(adapter, rx_pd->bss_num, rx_pd->bss_type);
161 if (!priv)
162 return -1;
163 159
164 skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset)); 160 skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
165 skb_pull(skb, sizeof(pkt_len)); 161 skb_pull(skb, sizeof(pkt_len));
@@ -190,20 +186,11 @@ mwifiex_process_mgmt_packet(struct mwifiex_adapter *adapter,
190 * the function creates a blank SKB, fills it with the data from the 186 * the function creates a blank SKB, fills it with the data from the
191 * received buffer and then sends this new SKB to the kernel. 187 * received buffer and then sends this new SKB to the kernel.
192 */ 188 */
193int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb) 189int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
194{ 190{
195 struct mwifiex_rxinfo *rx_info;
196 struct mwifiex_private *priv;
197
198 if (!skb) 191 if (!skb)
199 return -1; 192 return -1;
200 193
201 rx_info = MWIFIEX_SKB_RXCB(skb);
202 priv = mwifiex_get_priv_by_id(adapter, rx_info->bss_num,
203 rx_info->bss_type);
204 if (!priv)
205 return -1;
206
207 skb->dev = priv->netdev; 194 skb->dev = priv->netdev;
208 skb->protocol = eth_type_trans(skb, priv->netdev); 195 skb->protocol = eth_type_trans(skb, priv->netdev);
209 skb->ip_summed = CHECKSUM_NONE; 196 skb->ip_summed = CHECKSUM_NONE;
@@ -225,7 +212,7 @@ int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
225 * fragments. Currently we fail the Filesndl-ht.scr script 212 * fragments. Currently we fail the Filesndl-ht.scr script
226 * for UDP, hence this fix 213 * for UDP, hence this fix
227 */ 214 */
228 if ((adapter->iface_type == MWIFIEX_USB) && 215 if ((priv->adapter->iface_type == MWIFIEX_USB) &&
229 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) 216 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
230 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); 217 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
231 218
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 600d8194610e..818f871ae987 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -483,7 +483,7 @@ mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
483 struct sk_buff *skb, *tmp; 483 struct sk_buff *skb, *tmp;
484 484
485 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) 485 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp)
486 mwifiex_write_data_complete(adapter, skb, -1); 486 mwifiex_write_data_complete(adapter, skb, 0, -1);
487} 487}
488 488
489/* 489/*
@@ -650,7 +650,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
650 650
651 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) { 651 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
652 dev_dbg(adapter->dev, "data: drop packet in disconnect\n"); 652 dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
653 mwifiex_write_data_complete(adapter, skb, -1); 653 mwifiex_write_data_complete(adapter, skb, 0, -1);
654 return; 654 return;
655 } 655 }
656 656
@@ -680,7 +680,7 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
680 680
681 if (!ra_list) { 681 if (!ra_list) {
682 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 682 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
683 mwifiex_write_data_complete(adapter, skb, -1); 683 mwifiex_write_data_complete(adapter, skb, 0, -1);
684 return; 684 return;
685 } 685 }
686 686
@@ -1090,7 +1090,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
1090 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { 1090 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1091 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1091 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1092 ra_list_flags); 1092 ra_list_flags);
1093 mwifiex_write_data_complete(adapter, skb, -1); 1093 mwifiex_write_data_complete(adapter, skb, 0, -1);
1094 return; 1094 return;
1095 } 1095 }
1096 1096
@@ -1195,7 +1195,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
1195 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { 1195 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1196 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1196 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
1197 ra_list_flags); 1197 ra_list_flags);
1198 mwifiex_write_data_complete(adapter, skb, -1); 1198 mwifiex_write_data_complete(adapter, skb, 0, -1);
1199 return; 1199 return;
1200 } 1200 }
1201 1201
@@ -1209,7 +1209,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
1209 adapter->data_sent = false; 1209 adapter->data_sent = false;
1210 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret); 1210 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret);
1211 adapter->dbg.num_tx_host_to_card_failure++; 1211 adapter->dbg.num_tx_host_to_card_failure++;
1212 mwifiex_write_data_complete(adapter, skb, ret); 1212 mwifiex_write_data_complete(adapter, skb, 0, ret);
1213 break; 1213 break;
1214 case -EINPROGRESS: 1214 case -EINPROGRESS:
1215 adapter->data_sent = false; 1215 adapter->data_sent = false;
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index ec839952d2e7..b92f39d8963b 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -31,6 +31,8 @@ enum ieee_types_wmm_ecw_bitmasks {
31 MWIFIEX_ECW_MAX = (BIT(4) | BIT(5) | BIT(6) | BIT(7)), 31 MWIFIEX_ECW_MAX = (BIT(4) | BIT(5) | BIT(6) | BIT(7)),
32}; 32};
33 33
34static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
35
34/* 36/*
35 * This function retrieves the TID of the given RA list. 37 * This function retrieves the TID of the given RA list.
36 */ 38 */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 5099e5375cb3..f221b95b90b3 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1851,6 +1851,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
1851 bool start_ba_session = false; 1851 bool start_ba_session = false;
1852 bool mgmtframe = false; 1852 bool mgmtframe = false;
1853 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1853 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1854 bool eapol_frame = false;
1854 1855
1855 wh = (struct ieee80211_hdr *)skb->data; 1856 wh = (struct ieee80211_hdr *)skb->data;
1856 if (ieee80211_is_data_qos(wh->frame_control)) 1857 if (ieee80211_is_data_qos(wh->frame_control))
@@ -1858,6 +1859,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
1858 else 1859 else
1859 qos = 0; 1860 qos = 0;
1860 1861
1862 if (skb->protocol == cpu_to_be16(ETH_P_PAE))
1863 eapol_frame = true;
1864
1861 if (ieee80211_is_mgmt(wh->frame_control)) 1865 if (ieee80211_is_mgmt(wh->frame_control))
1862 mgmtframe = true; 1866 mgmtframe = true;
1863 1867
@@ -1916,9 +1920,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
1916 1920
1917 txpriority = index; 1921 txpriority = index;
1918 1922
1919 if (priv->ap_fw && sta && sta->ht_cap.ht_supported 1923 if (priv->ap_fw && sta && sta->ht_cap.ht_supported && !eapol_frame &&
1920 && skb->protocol != cpu_to_be16(ETH_P_PAE) 1924 ieee80211_is_data_qos(wh->frame_control)) {
1921 && ieee80211_is_data_qos(wh->frame_control)) {
1922 tid = qos & 0xf; 1925 tid = qos & 0xf;
1923 mwl8k_tx_count_packet(sta, tid); 1926 mwl8k_tx_count_packet(sta, tid);
1924 spin_lock(&priv->stream_lock); 1927 spin_lock(&priv->stream_lock);
@@ -2005,6 +2008,8 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
2005 spin_unlock(&priv->stream_lock); 2008 spin_unlock(&priv->stream_lock);
2006 } 2009 }
2007 spin_unlock_bh(&priv->tx_lock); 2010 spin_unlock_bh(&priv->tx_lock);
2011 pci_unmap_single(priv->pdev, dma, skb->len,
2012 PCI_DMA_TODEVICE);
2008 dev_kfree_skb(skb); 2013 dev_kfree_skb(skb);
2009 return; 2014 return;
2010 } 2015 }
@@ -2025,9 +2030,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw,
2025 else 2030 else
2026 tx->peer_id = 0; 2031 tx->peer_id = 0;
2027 2032
2028 if (priv->ap_fw) 2033 if (priv->ap_fw && ieee80211_is_data(wh->frame_control) && !eapol_frame)
2029 tx->timestamp = cpu_to_le32(ioread32(priv->regs + 2034 tx->timestamp = cpu_to_le32(ioread32(priv->regs +
2030 MWL8K_HW_TIMER_REGISTER)); 2035 MWL8K_HW_TIMER_REGISTER));
2036 else
2037 tx->timestamp = 0;
2031 2038
2032 wmb(); 2039 wmb();
2033 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); 2040 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
@@ -3679,7 +3686,8 @@ struct mwl8k_cmd_bastream {
3679} __packed; 3686} __packed;
3680 3687
3681static int 3688static int
3682mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream) 3689mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
3690 struct ieee80211_vif *vif)
3683{ 3691{
3684 struct mwl8k_cmd_bastream *cmd; 3692 struct mwl8k_cmd_bastream *cmd;
3685 int rc; 3693 int rc;
@@ -3702,7 +3710,7 @@ mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
3702 cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE) | 3710 cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE) |
3703 cpu_to_le32(BASTREAM_FLAG_DIRECTION_UPSTREAM); 3711 cpu_to_le32(BASTREAM_FLAG_DIRECTION_UPSTREAM);
3704 3712
3705 rc = mwl8k_post_cmd(hw, &cmd->header); 3713 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
3706 3714
3707 kfree(cmd); 3715 kfree(cmd);
3708 3716
@@ -3711,7 +3719,7 @@ mwl8k_check_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream)
3711 3719
3712static int 3720static int
3713mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream, 3721mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
3714 u8 buf_size) 3722 u8 buf_size, struct ieee80211_vif *vif)
3715{ 3723{
3716 struct mwl8k_cmd_bastream *cmd; 3724 struct mwl8k_cmd_bastream *cmd;
3717 int rc; 3725 int rc;
@@ -3745,7 +3753,7 @@ mwl8k_create_ba(struct ieee80211_hw *hw, struct mwl8k_ampdu_stream *stream,
3745 cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE | 3753 cpu_to_le32(BASTREAM_FLAG_IMMEDIATE_TYPE |
3746 BASTREAM_FLAG_DIRECTION_UPSTREAM); 3754 BASTREAM_FLAG_DIRECTION_UPSTREAM);
3747 3755
3748 rc = mwl8k_post_cmd(hw, &cmd->header); 3756 rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
3749 3757
3750 wiphy_debug(hw->wiphy, "Created a BA stream for %pM : tid %d\n", 3758 wiphy_debug(hw->wiphy, "Created a BA stream for %pM : tid %d\n",
3751 stream->sta->addr, stream->tid); 3759 stream->sta->addr, stream->tid);
@@ -5085,6 +5093,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5085 struct mwl8k_priv *priv = hw->priv; 5093 struct mwl8k_priv *priv = hw->priv;
5086 struct mwl8k_ampdu_stream *stream; 5094 struct mwl8k_ampdu_stream *stream;
5087 u8 *addr = sta->addr; 5095 u8 *addr = sta->addr;
5096 struct mwl8k_sta *sta_info = MWL8K_STA(sta);
5088 5097
5089 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION)) 5098 if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
5090 return -ENOTSUPP; 5099 return -ENOTSUPP;
@@ -5127,7 +5136,16 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5127 /* Release the lock before we do the time consuming stuff */ 5136 /* Release the lock before we do the time consuming stuff */
5128 spin_unlock(&priv->stream_lock); 5137 spin_unlock(&priv->stream_lock);
5129 for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) { 5138 for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
5130 rc = mwl8k_check_ba(hw, stream); 5139
5140 /* Check if link is still valid */
5141 if (!sta_info->is_ampdu_allowed) {
5142 spin_lock(&priv->stream_lock);
5143 mwl8k_remove_stream(hw, stream);
5144 spin_unlock(&priv->stream_lock);
5145 return -EBUSY;
5146 }
5147
5148 rc = mwl8k_check_ba(hw, stream, vif);
5131 5149
5132 /* If HW restart is in progress mwl8k_post_cmd will 5150 /* If HW restart is in progress mwl8k_post_cmd will
5133 * return -EBUSY. Avoid retrying mwl8k_check_ba in 5151 * return -EBUSY. Avoid retrying mwl8k_check_ba in
@@ -5167,7 +5185,7 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5167 BUG_ON(stream == NULL); 5185 BUG_ON(stream == NULL);
5168 BUG_ON(stream->state != AMPDU_STREAM_IN_PROGRESS); 5186 BUG_ON(stream->state != AMPDU_STREAM_IN_PROGRESS);
5169 spin_unlock(&priv->stream_lock); 5187 spin_unlock(&priv->stream_lock);
5170 rc = mwl8k_create_ba(hw, stream, buf_size); 5188 rc = mwl8k_create_ba(hw, stream, buf_size, vif);
5171 spin_lock(&priv->stream_lock); 5189 spin_lock(&priv->stream_lock);
5172 if (!rc) 5190 if (!rc)
5173 stream->state = AMPDU_STREAM_ACTIVE; 5191 stream->state = AMPDU_STREAM_ACTIVE;
@@ -5240,7 +5258,7 @@ enum {
5240#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw" 5258#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
5241#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api) 5259#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
5242 5260
5243static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = { 5261static struct mwl8k_device_info mwl8k_info_tbl[] = {
5244 [MWL8363] = { 5262 [MWL8363] = {
5245 .part_name = "88w8363", 5263 .part_name = "88w8363",
5246 .helper_image = "mwl8k/helper_8363.fw", 5264 .helper_image = "mwl8k/helper_8363.fw",
@@ -5617,6 +5635,18 @@ fail:
5617 return rc; 5635 return rc;
5618} 5636}
5619 5637
5638static const struct ieee80211_iface_limit ap_if_limits[] = {
5639 { .max = 8, .types = BIT(NL80211_IFTYPE_AP) },
5640};
5641
5642static const struct ieee80211_iface_combination ap_if_comb = {
5643 .limits = ap_if_limits,
5644 .n_limits = ARRAY_SIZE(ap_if_limits),
5645 .max_interfaces = 8,
5646 .num_different_channels = 1,
5647};
5648
5649
5620static int mwl8k_firmware_load_success(struct mwl8k_priv *priv) 5650static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
5621{ 5651{
5622 struct ieee80211_hw *hw = priv->hw; 5652 struct ieee80211_hw *hw = priv->hw;
@@ -5696,8 +5726,13 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
5696 goto err_free_cookie; 5726 goto err_free_cookie;
5697 5727
5698 hw->wiphy->interface_modes = 0; 5728 hw->wiphy->interface_modes = 0;
5699 if (priv->ap_macids_supported || priv->device_info->fw_image_ap) 5729
5730 if (priv->ap_macids_supported || priv->device_info->fw_image_ap) {
5700 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP); 5731 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
5732 hw->wiphy->iface_combinations = &ap_if_comb;
5733 hw->wiphy->n_iface_combinations = 1;
5734 }
5735
5701 if (priv->sta_macids_supported || priv->device_info->fw_image_sta) 5736 if (priv->sta_macids_supported || priv->device_info->fw_image_sta)
5702 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION); 5737 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
5703 5738
@@ -5721,7 +5756,7 @@ err_free_cookie:
5721 5756
5722 return rc; 5757 return rc;
5723} 5758}
5724static int __devinit mwl8k_probe(struct pci_dev *pdev, 5759static int mwl8k_probe(struct pci_dev *pdev,
5725 const struct pci_device_id *id) 5760 const struct pci_device_id *id)
5726{ 5761{
5727 static int printed_version; 5762 static int printed_version;
@@ -5838,12 +5873,7 @@ err_disable_device:
5838 return rc; 5873 return rc;
5839} 5874}
5840 5875
5841static void __devexit mwl8k_shutdown(struct pci_dev *pdev) 5876static void mwl8k_remove(struct pci_dev *pdev)
5842{
5843 printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__);
5844}
5845
5846static void __devexit mwl8k_remove(struct pci_dev *pdev)
5847{ 5877{
5848 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 5878 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
5849 struct mwl8k_priv *priv; 5879 struct mwl8k_priv *priv;
@@ -5895,8 +5925,7 @@ static struct pci_driver mwl8k_driver = {
5895 .name = MWL8K_NAME, 5925 .name = MWL8K_NAME,
5896 .id_table = mwl8k_pci_id_table, 5926 .id_table = mwl8k_pci_id_table,
5897 .probe = mwl8k_probe, 5927 .probe = mwl8k_probe,
5898 .remove = __devexit_p(mwl8k_remove), 5928 .remove = mwl8k_remove,
5899 .shutdown = __devexit_p(mwl8k_shutdown),
5900}; 5929};
5901 5930
5902module_pci_driver(mwl8k_driver); 5931module_pci_driver(mwl8k_driver);
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 7b751fba7e1f..d01edd2c50c5 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -161,24 +161,23 @@ static int orinoco_scan(struct wiphy *wiphy,
161} 161}
162 162
163static int orinoco_set_monitor_channel(struct wiphy *wiphy, 163static int orinoco_set_monitor_channel(struct wiphy *wiphy,
164 struct ieee80211_channel *chan, 164 struct cfg80211_chan_def *chandef)
165 enum nl80211_channel_type channel_type)
166{ 165{
167 struct orinoco_private *priv = wiphy_priv(wiphy); 166 struct orinoco_private *priv = wiphy_priv(wiphy);
168 int err = 0; 167 int err = 0;
169 unsigned long flags; 168 unsigned long flags;
170 int channel; 169 int channel;
171 170
172 if (!chan) 171 if (!chandef->chan)
173 return -EINVAL; 172 return -EINVAL;
174 173
175 if (channel_type != NL80211_CHAN_NO_HT) 174 if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
176 return -EINVAL; 175 return -EINVAL;
177 176
178 if (chan->band != IEEE80211_BAND_2GHZ) 177 if (chandef->chan->band != IEEE80211_BAND_2GHZ)
179 return -EINVAL; 178 return -EINVAL;
180 179
181 channel = ieee80211_freq_to_dsss_chan(chan->center_freq); 180 channel = ieee80211_freq_to_dsss_chan(chandef->chan->center_freq);
182 181
183 if ((channel < 1) || (channel > NUM_CHANNELS) || 182 if ((channel < 1) || (channel > NUM_CHANNELS) ||
184 !(priv->channel_mask & (1 << (channel - 1)))) 183 !(priv->channel_mask & (1 << (channel - 1))))
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
index 4dadf9880a97..5a8fec26136e 100644
--- a/drivers/net/wireless/orinoco/main.h
+++ b/drivers/net/wireless/orinoco/main.h
@@ -39,7 +39,7 @@ static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
39{ 39{
40 u8 *p = data; 40 u8 *p = data;
41 while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) { 41 while ((p + 2 + WPA_SELECTOR_LEN) < (data + len)) {
42 if ((p[0] == WLAN_EID_GENERIC) && 42 if ((p[0] == WLAN_EID_VENDOR_SPECIFIC) &&
43 (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0)) 43 (memcmp(&p[2], WPA_OUI_TYPE, WPA_SELECTOR_LEN) == 0))
44 return p; 44 return p;
45 p += p[1] + 2; 45 p += p[1] + 2;
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 326396b313a6..d73fdf6185a2 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -255,7 +255,7 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
255 return err; 255 return err;
256} 256}
257 257
258static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev) 258static void orinoco_nortel_remove_one(struct pci_dev *pdev)
259{ 259{
260 struct orinoco_private *priv = pci_get_drvdata(pdev); 260 struct orinoco_private *priv = pci_get_drvdata(pdev);
261 struct orinoco_pci_card *card = priv->card; 261 struct orinoco_pci_card *card = priv->card;
@@ -288,7 +288,7 @@ static struct pci_driver orinoco_nortel_driver = {
288 .name = DRIVER_NAME, 288 .name = DRIVER_NAME,
289 .id_table = orinoco_nortel_id_table, 289 .id_table = orinoco_nortel_id_table,
290 .probe = orinoco_nortel_init_one, 290 .probe = orinoco_nortel_init_one,
291 .remove = __devexit_p(orinoco_nortel_remove_one), 291 .remove = orinoco_nortel_remove_one,
292 .suspend = orinoco_pci_suspend, 292 .suspend = orinoco_pci_suspend,
293 .resume = orinoco_pci_resume, 293 .resume = orinoco_pci_resume,
294}; 294};
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 6058c66b844e..677bf14eca84 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -199,7 +199,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
199 return err; 199 return err;
200} 200}
201 201
202static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev) 202static void orinoco_pci_remove_one(struct pci_dev *pdev)
203{ 203{
204 struct orinoco_private *priv = pci_get_drvdata(pdev); 204 struct orinoco_private *priv = pci_get_drvdata(pdev);
205 205
@@ -228,7 +228,7 @@ static struct pci_driver orinoco_pci_driver = {
228 .name = DRIVER_NAME, 228 .name = DRIVER_NAME,
229 .id_table = orinoco_pci_id_table, 229 .id_table = orinoco_pci_id_table,
230 .probe = orinoco_pci_init_one, 230 .probe = orinoco_pci_init_one,
231 .remove = __devexit_p(orinoco_pci_remove_one), 231 .remove = orinoco_pci_remove_one,
232 .suspend = orinoco_pci_suspend, 232 .suspend = orinoco_pci_suspend,
233 .resume = orinoco_pci_resume, 233 .resume = orinoco_pci_resume,
234}; 234};
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 2bac8248a991..2559dbd6184b 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -294,7 +294,7 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
294 return err; 294 return err;
295} 295}
296 296
297static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev) 297static void orinoco_plx_remove_one(struct pci_dev *pdev)
298{ 298{
299 struct orinoco_private *priv = pci_get_drvdata(pdev); 299 struct orinoco_private *priv = pci_get_drvdata(pdev);
300 struct orinoco_pci_card *card = priv->card; 300 struct orinoco_pci_card *card = priv->card;
@@ -334,7 +334,7 @@ static struct pci_driver orinoco_plx_driver = {
334 .name = DRIVER_NAME, 334 .name = DRIVER_NAME,
335 .id_table = orinoco_plx_id_table, 335 .id_table = orinoco_plx_id_table,
336 .probe = orinoco_plx_init_one, 336 .probe = orinoco_plx_init_one,
337 .remove = __devexit_p(orinoco_plx_remove_one), 337 .remove = orinoco_plx_remove_one,
338 .suspend = orinoco_pci_suspend, 338 .suspend = orinoco_pci_suspend,
339 .resume = orinoco_pci_resume, 339 .resume = orinoco_pci_resume,
340}; 340};
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 93159d68ec93..42afeeea2c40 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -188,7 +188,7 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
188 return err; 188 return err;
189} 189}
190 190
191static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev) 191static void orinoco_tmd_remove_one(struct pci_dev *pdev)
192{ 192{
193 struct orinoco_private *priv = pci_get_drvdata(pdev); 193 struct orinoco_private *priv = pci_get_drvdata(pdev);
194 struct orinoco_pci_card *card = priv->card; 194 struct orinoco_pci_card *card = priv->card;
@@ -214,7 +214,7 @@ static struct pci_driver orinoco_tmd_driver = {
214 .name = DRIVER_NAME, 214 .name = DRIVER_NAME,
215 .id_table = orinoco_tmd_id_table, 215 .id_table = orinoco_tmd_id_table,
216 .probe = orinoco_tmd_init_one, 216 .probe = orinoco_tmd_init_one,
217 .remove = __devexit_p(orinoco_tmd_remove_one), 217 .remove = orinoco_tmd_remove_one,
218 .suspend = orinoco_pci_suspend, 218 .suspend = orinoco_pci_suspend,
219 .resume = orinoco_pci_resume, 219 .resume = orinoco_pci_resume,
220}; 220};
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 7f53cea2f205..01624dcaf73e 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -865,7 +865,7 @@ static int ezusb_firmware_download(struct ezusb_priv *upriv,
865static int ezusb_access_ltv(struct ezusb_priv *upriv, 865static int ezusb_access_ltv(struct ezusb_priv *upriv,
866 struct request_context *ctx, 866 struct request_context *ctx,
867 u16 length, const void *data, u16 frame_type, 867 u16 length, const void *data, u16 frame_type,
868 void *ans_buff, int ans_size, u16 *ans_length) 868 void *ans_buff, unsigned ans_size, u16 *ans_length)
869{ 869{
870 int req_size; 870 int req_size;
871 int retval = 0; 871 int retval = 0;
@@ -933,7 +933,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
933 } 933 }
934 if (ctx->in_rid) { 934 if (ctx->in_rid) {
935 struct ezusb_packet *ans = ctx->buf; 935 struct ezusb_packet *ans = ctx->buf;
936 int exp_len; 936 unsigned exp_len;
937 937
938 if (ans->hermes_len != 0) 938 if (ans->hermes_len != 0)
939 exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12; 939 exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
@@ -949,8 +949,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
949 } 949 }
950 950
951 if (ans_buff) 951 if (ans_buff)
952 memcpy(ans_buff, ans->data, 952 memcpy(ans_buff, ans->data, min(exp_len, ans_size));
953 min_t(int, exp_len, ans_size));
954 if (ans_length) 953 if (ans_length)
955 *ans_length = le16_to_cpu(ans->hermes_len); 954 *ans_length = le16_to_cpu(ans->hermes_len);
956 } 955 }
@@ -995,7 +994,7 @@ static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid,
995 struct ezusb_priv *upriv = hw->priv; 994 struct ezusb_priv *upriv = hw->priv;
996 struct request_context *ctx; 995 struct request_context *ctx;
997 996
998 if ((bufsize < 0) || (bufsize % 2)) 997 if (bufsize % 2)
999 return -EINVAL; 998 return -EINVAL;
1000 999
1001 ctx = ezusb_alloc_ctx(upriv, rid, rid); 1000 ctx = ezusb_alloc_ctx(upriv, rid, rid);
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 1ef1bfe6a9d7..d43e3740e45d 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -541,8 +541,9 @@ static int p54_parse_rssical(struct ieee80211_hw *dev,
541 entries = (len - offset) / 541 entries = (len - offset) /
542 sizeof(struct pda_rssi_cal_ext_entry); 542 sizeof(struct pda_rssi_cal_ext_entry);
543 543
544 if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) || 544 if (len < offset ||
545 entries <= 0) { 545 (len - offset) % sizeof(struct pda_rssi_cal_ext_entry) ||
546 entries == 0) {
546 wiphy_err(dev->wiphy, "invalid rssi database.\n"); 547 wiphy_err(dev->wiphy, "invalid rssi database.\n");
547 goto err_data; 548 goto err_data;
548 } 549 }
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index b4390797d78c..933e5d941937 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -540,7 +540,7 @@ out:
540 pci_dev_put(pdev); 540 pci_dev_put(pdev);
541} 541}
542 542
543static int __devinit p54p_probe(struct pci_dev *pdev, 543static int p54p_probe(struct pci_dev *pdev,
544 const struct pci_device_id *id) 544 const struct pci_device_id *id)
545{ 545{
546 struct p54p_priv *priv; 546 struct p54p_priv *priv;
@@ -639,7 +639,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
639 return err; 639 return err;
640} 640}
641 641
642static void __devexit p54p_remove(struct pci_dev *pdev) 642static void p54p_remove(struct pci_dev *pdev)
643{ 643{
644 struct ieee80211_hw *dev = pci_get_drvdata(pdev); 644 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
645 struct p54p_priv *priv; 645 struct p54p_priv *priv;
@@ -659,7 +659,7 @@ static void __devexit p54p_remove(struct pci_dev *pdev)
659 p54_free_common(dev); 659 p54_free_common(dev);
660} 660}
661 661
662#ifdef CONFIG_PM 662#ifdef CONFIG_PM_SLEEP
663static int p54p_suspend(struct device *device) 663static int p54p_suspend(struct device *device)
664{ 664{
665 struct pci_dev *pdev = to_pci_dev(device); 665 struct pci_dev *pdev = to_pci_dev(device);
@@ -681,25 +681,18 @@ static int p54p_resume(struct device *device)
681 return pci_set_power_state(pdev, PCI_D0); 681 return pci_set_power_state(pdev, PCI_D0);
682} 682}
683 683
684static const struct dev_pm_ops p54pci_pm_ops = { 684static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume);
685 .suspend = p54p_suspend,
686 .resume = p54p_resume,
687 .freeze = p54p_suspend,
688 .thaw = p54p_resume,
689 .poweroff = p54p_suspend,
690 .restore = p54p_resume,
691};
692 685
693#define P54P_PM_OPS (&p54pci_pm_ops) 686#define P54P_PM_OPS (&p54pci_pm_ops)
694#else 687#else
695#define P54P_PM_OPS (NULL) 688#define P54P_PM_OPS (NULL)
696#endif /* CONFIG_PM */ 689#endif /* CONFIG_PM_SLEEP */
697 690
698static struct pci_driver p54p_driver = { 691static struct pci_driver p54p_driver = {
699 .name = "p54pci", 692 .name = "p54pci",
700 .id_table = p54p_table, 693 .id_table = p54p_table,
701 .probe = p54p_probe, 694 .probe = p54p_probe,
702 .remove = __devexit_p(p54p_remove), 695 .remove = p54p_remove,
703 .driver.pm = P54P_PM_OPS, 696 .driver.pm = P54P_PM_OPS,
704}; 697};
705 698
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index f7929906d437..4fd49a007b51 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -595,7 +595,7 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
595 cancel_work_sync(&priv->work); 595 cancel_work_sync(&priv->work);
596} 596}
597 597
598static int __devinit p54spi_probe(struct spi_device *spi) 598static int p54spi_probe(struct spi_device *spi)
599{ 599{
600 struct p54s_priv *priv = NULL; 600 struct p54s_priv *priv = NULL;
601 struct ieee80211_hw *hw; 601 struct ieee80211_hw *hw;
@@ -683,7 +683,7 @@ err_free:
683 return ret; 683 return ret;
684} 684}
685 685
686static int __devexit p54spi_remove(struct spi_device *spi) 686static int p54spi_remove(struct spi_device *spi)
687{ 687{
688 struct p54s_priv *priv = dev_get_drvdata(&spi->dev); 688 struct p54s_priv *priv = dev_get_drvdata(&spi->dev);
689 689
@@ -710,7 +710,7 @@ static struct spi_driver p54spi_driver = {
710 }, 710 },
711 711
712 .probe = p54spi_probe, 712 .probe = p54spi_probe,
713 .remove = __devexit_p(p54spi_remove), 713 .remove = p54spi_remove,
714}; 714};
715 715
716static int __init p54spi_init(void) 716static int __init p54spi_init(void)
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index effb044a8a9d..e71c702e2eb1 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -986,7 +986,7 @@ static int p54u_load_firmware(struct ieee80211_hw *dev,
986 return err; 986 return err;
987} 987}
988 988
989static int __devinit p54u_probe(struct usb_interface *intf, 989static int p54u_probe(struct usb_interface *intf,
990 const struct usb_device_id *id) 990 const struct usb_device_id *id)
991{ 991{
992 struct usb_device *udev = interface_to_usbdev(intf); 992 struct usb_device *udev = interface_to_usbdev(intf);
@@ -1057,7 +1057,7 @@ static int __devinit p54u_probe(struct usb_interface *intf,
1057 return err; 1057 return err;
1058} 1058}
1059 1059
1060static void __devexit p54u_disconnect(struct usb_interface *intf) 1060static void p54u_disconnect(struct usb_interface *intf)
1061{ 1061{
1062 struct ieee80211_hw *dev = usb_get_intfdata(intf); 1062 struct ieee80211_hw *dev = usb_get_intfdata(intf);
1063 struct p54u_priv *priv; 1063 struct p54u_priv *priv;
@@ -1131,7 +1131,7 @@ static struct usb_driver p54u_driver = {
1131 .name = "p54usb", 1131 .name = "p54usb",
1132 .id_table = p54u_table, 1132 .id_table = p54u_table,
1133 .probe = p54u_probe, 1133 .probe = p54u_probe,
1134 .disconnect = __devexit_p(p54u_disconnect), 1134 .disconnect = p54u_disconnect,
1135 .pre_reset = p54u_pre_reset, 1135 .pre_reset = p54u_pre_reset,
1136 .post_reset = p54u_post_reset, 1136 .post_reset = p54u_post_reset,
1137#ifdef CONFIG_PM 1137#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 5861e13a6fd8..12f0a34477f2 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -369,7 +369,11 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
369 rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32; 369 rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32;
370 priv->tsf_low32 = tsf32; 370 priv->tsf_low32 = tsf32;
371 371
372 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 372 /* LMAC API Page 10/29 - s_lm_data_in - clock
373 * "usec accurate timestamp of hardware clock
374 * at end of frame (before OFDM SIFS EOF padding"
375 */
376 rx_status->flag |= RX_FLAG_MACTIME_END;
373 377
374 if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) 378 if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
375 header_len += hdr->align[0]; 379 header_len += hdr->align[0];
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index bd1f0cb56085..abe1d039be81 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -490,9 +490,12 @@ static int rndis_scan(struct wiphy *wiphy,
490static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed); 490static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed);
491 491
492static int rndis_set_tx_power(struct wiphy *wiphy, 492static int rndis_set_tx_power(struct wiphy *wiphy,
493 struct wireless_dev *wdev,
493 enum nl80211_tx_power_setting type, 494 enum nl80211_tx_power_setting type,
494 int mbm); 495 int mbm);
495static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm); 496static int rndis_get_tx_power(struct wiphy *wiphy,
497 struct wireless_dev *wdev,
498 int *dbm);
496 499
497static int rndis_connect(struct wiphy *wiphy, struct net_device *dev, 500static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
498 struct cfg80211_connect_params *sme); 501 struct cfg80211_connect_params *sme);
@@ -1903,6 +1906,7 @@ static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1903} 1906}
1904 1907
1905static int rndis_set_tx_power(struct wiphy *wiphy, 1908static int rndis_set_tx_power(struct wiphy *wiphy,
1909 struct wireless_dev *wdev,
1906 enum nl80211_tx_power_setting type, 1910 enum nl80211_tx_power_setting type,
1907 int mbm) 1911 int mbm)
1908{ 1912{
@@ -1930,7 +1934,9 @@ static int rndis_set_tx_power(struct wiphy *wiphy,
1930 return -ENOTSUPP; 1934 return -ENOTSUPP;
1931} 1935}
1932 1936
1933static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm) 1937static int rndis_get_tx_power(struct wiphy *wiphy,
1938 struct wireless_dev *wdev,
1939 int *dbm)
1934{ 1940{
1935 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 1941 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
1936 struct usbnet *usbdev = priv->usbdev; 1942 struct usbnet *usbdev = priv->usbdev;
@@ -2287,7 +2293,7 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
2287{ 2293{
2288 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2294 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2289 struct usbnet *usbdev = priv->usbdev; 2295 struct usbnet *usbdev = priv->usbdev;
2290 struct ieee80211_channel *channel = params->channel; 2296 struct ieee80211_channel *channel = params->chandef.chan;
2291 struct ndis_80211_ssid ssid; 2297 struct ndis_80211_ssid ssid;
2292 enum nl80211_auth_type auth_type; 2298 enum nl80211_auth_type auth_type;
2293 int ret, alg, length, chan = -1; 2299 int ret, alg, length, chan = -1;
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index e3a2d9070cf6..a2d2bc2c7b3d 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1831,7 +1831,7 @@ static struct pci_driver rt2400pci_driver = {
1831 .name = KBUILD_MODNAME, 1831 .name = KBUILD_MODNAME,
1832 .id_table = rt2400pci_device_table, 1832 .id_table = rt2400pci_device_table,
1833 .probe = rt2400pci_probe, 1833 .probe = rt2400pci_probe,
1834 .remove = __devexit_p(rt2x00pci_remove), 1834 .remove = rt2x00pci_remove,
1835 .suspend = rt2x00pci_suspend, 1835 .suspend = rt2x00pci_suspend,
1836 .resume = rt2x00pci_resume, 1836 .resume = rt2x00pci_resume,
1837}; 1837};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 479d756e275b..9bea10f53f0a 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -2122,7 +2122,7 @@ static struct pci_driver rt2500pci_driver = {
2122 .name = KBUILD_MODNAME, 2122 .name = KBUILD_MODNAME,
2123 .id_table = rt2500pci_device_table, 2123 .id_table = rt2500pci_device_table,
2124 .probe = rt2500pci_probe, 2124 .probe = rt2500pci_probe,
2125 .remove = __devexit_p(rt2x00pci_remove), 2125 .remove = rt2x00pci_remove,
2126 .suspend = rt2x00pci_suspend, 2126 .suspend = rt2x00pci_suspend,
2127 .resume = rt2x00pci_resume, 2127 .resume = rt2x00pci_resume,
2128}; 2128};
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 6d67c3ede651..4db1088a847f 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -1993,8 +1993,10 @@ struct mac_iveiv_entry {
1993 */ 1993 */
1994#define RFCSR3_K FIELD8(0x0f) 1994#define RFCSR3_K FIELD8(0x0f)
1995/* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */ 1995/* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */
1996#define RFCSR3_PA1_BIAS_CCK FIELD8(0x70); 1996#define RFCSR3_PA1_BIAS_CCK FIELD8(0x70)
1997#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80); 1997#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80)
1998/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
1999#define RFCSR3_VCOCAL_EN FIELD8(0x80)
1998 2000
1999/* 2001/*
2000 * FRCSR 5: 2002 * FRCSR 5:
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 59474ae0aec0..197b4466a5d2 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2173,7 +2173,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2173 rt2800_rfcsr_write(rt2x00dev, 59, 2173 rt2800_rfcsr_write(rt2x00dev, 59,
2174 r59_nonbt_rev[idx]); 2174 r59_nonbt_rev[idx]);
2175 } else if (rt2x00_rt(rt2x00dev, RT5390) || 2175 } else if (rt2x00_rt(rt2x00dev, RT5390) ||
2176 rt2x00_rt(rt2x00dev, RT5392)) { 2176 rt2x00_rt(rt2x00dev, RT5392)) {
2177 static const char r59_non_bt[] = {0x8f, 0x8f, 2177 static const char r59_non_bt[] = {0x8f, 0x8f,
2178 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 2178 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
2179 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; 2179 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
@@ -2243,7 +2243,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2243 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2243 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
2244 2244
2245 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); 2245 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2246 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 2246 rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
2247 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); 2247 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2248 } 2248 }
2249 2249
@@ -2264,7 +2264,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2264 2264
2265 if (rf->channel <= 14) { 2265 if (rf->channel <= 14) {
2266 if (!rt2x00_rt(rt2x00dev, RT5390) && 2266 if (!rt2x00_rt(rt2x00dev, RT5390) &&
2267 !rt2x00_rt(rt2x00dev, RT5392)) { 2267 !rt2x00_rt(rt2x00dev, RT5392)) {
2268 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, 2268 if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
2269 &rt2x00dev->cap_flags)) { 2269 &rt2x00dev->cap_flags)) {
2270 rt2800_bbp_write(rt2x00dev, 82, 0x62); 2270 rt2800_bbp_write(rt2x00dev, 82, 0x62);
@@ -2520,20 +2520,37 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
2520 return comp_value; 2520 return comp_value;
2521} 2521}
2522 2522
2523static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
2524 int power_level, int max_power)
2525{
2526 int delta;
2527
2528 if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
2529 return 0;
2530
2531 /*
2532 * XXX: We don't know the maximum transmit power of our hardware since
2533 * the EEPROM doesn't expose it. We only know that we are calibrated
2534 * to 100% tx power.
2535 *
2536 * Hence, we assume the regulatory limit that cfg80211 calulated for
2537 * the current channel is our maximum and if we are requested to lower
2538 * the value we just reduce our tx power accordingly.
2539 */
2540 delta = power_level - max_power;
2541 return min(delta, 0);
2542}
2543
2523static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, 2544static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
2524 enum ieee80211_band band, int power_level, 2545 enum ieee80211_band band, int power_level,
2525 u8 txpower, int delta) 2546 u8 txpower, int delta)
2526{ 2547{
2527 u32 reg;
2528 u16 eeprom; 2548 u16 eeprom;
2529 u8 criterion; 2549 u8 criterion;
2530 u8 eirp_txpower; 2550 u8 eirp_txpower;
2531 u8 eirp_txpower_criterion; 2551 u8 eirp_txpower_criterion;
2532 u8 reg_limit; 2552 u8 reg_limit;
2533 2553
2534 if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b))
2535 return txpower;
2536
2537 if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) { 2554 if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
2538 /* 2555 /*
2539 * Check if eirp txpower exceed txpower_limit. 2556 * Check if eirp txpower exceed txpower_limit.
@@ -2542,11 +2559,13 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
2542 * .11b data rate need add additional 4dbm 2559 * .11b data rate need add additional 4dbm
2543 * when calculating eirp txpower. 2560 * when calculating eirp txpower.
2544 */ 2561 */
2545 rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg); 2562 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1,
2546 criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS); 2563 &eeprom);
2564 criterion = rt2x00_get_field16(eeprom,
2565 EEPROM_TXPOWER_BYRATE_RATE0);
2547 2566
2548 rt2x00_eeprom_read(rt2x00dev, 2567 rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
2549 EEPROM_EIRP_MAX_TX_POWER, &eeprom); 2568 &eeprom);
2550 2569
2551 if (band == IEEE80211_BAND_2GHZ) 2570 if (band == IEEE80211_BAND_2GHZ)
2552 eirp_txpower_criterion = rt2x00_get_field16(eeprom, 2571 eirp_txpower_criterion = rt2x00_get_field16(eeprom,
@@ -2563,36 +2582,71 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
2563 } else 2582 } else
2564 reg_limit = 0; 2583 reg_limit = 0;
2565 2584
2566 return txpower + delta - reg_limit; 2585 txpower = max(0, txpower + delta - reg_limit);
2586 return min_t(u8, txpower, 0xc);
2567} 2587}
2568 2588
2589/*
2590 * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
2591 * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
2592 * 4 bits for each rate (tune from 0 to 15 dBm). BBP_R1 controls transmit power
2593 * for all rates, but allow to set only 4 discrete values: -12, -6, 0 and 6 dBm.
2594 * Reference per rate transmit power values are located in the EEPROM at
2595 * EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to
2596 * current conditions (i.e. band, bandwidth, temperature, user settings).
2597 */
2569static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, 2598static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
2570 enum ieee80211_band band, 2599 struct ieee80211_channel *chan,
2571 int power_level) 2600 int power_level)
2572{ 2601{
2573 u8 txpower; 2602 u8 txpower, r1;
2574 u16 eeprom; 2603 u16 eeprom;
2575 int i, is_rate_b; 2604 u32 reg, offset;
2576 u32 reg; 2605 int i, is_rate_b, delta, power_ctrl;
2577 u8 r1; 2606 enum ieee80211_band band = chan->band;
2578 u32 offset;
2579 int delta;
2580 2607
2581 /* 2608 /*
2582 * Calculate HT40 compensation delta 2609 * Calculate HT40 compensation. For 40MHz we need to add or subtract
2610 * value read from EEPROM (different for 2GHz and for 5GHz).
2583 */ 2611 */
2584 delta = rt2800_get_txpower_bw_comp(rt2x00dev, band); 2612 delta = rt2800_get_txpower_bw_comp(rt2x00dev, band);
2585 2613
2586 /* 2614 /*
2587 * calculate temperature compensation delta 2615 * Calculate temperature compensation. Depends on measurement of current
2616 * TSSI (Transmitter Signal Strength Indication) we know TX power (due
2617 * to temperature or maybe other factors) is smaller or bigger than
2618 * expected. We adjust it, based on TSSI reference and boundaries values
2619 * provided in EEPROM.
2588 */ 2620 */
2589 delta += rt2800_get_gain_calibration_delta(rt2x00dev); 2621 delta += rt2800_get_gain_calibration_delta(rt2x00dev);
2590 2622
2591 /* 2623 /*
2592 * set to normal bbp tx power control mode: +/- 0dBm 2624 * Decrease power according to user settings, on devices with unknown
2625 * maximum tx power. For other devices we take user power_level into
2626 * consideration on rt2800_compensate_txpower().
2627 */
2628 delta += rt2800_get_txpower_reg_delta(rt2x00dev, power_level,
2629 chan->max_power);
2630
2631 /*
2632 * BBP_R1 controls TX power for all rates, it allow to set the following
2633 * gains -12, -6, 0, +6 dBm by setting values 2, 1, 0, 3 respectively.
2634 *
2635 * TODO: we do not use +6 dBm option to do not increase power beyond
2636 * regulatory limit, however this could be utilized for devices with
2637 * CAPABILITY_POWER_LIMIT.
2593 */ 2638 */
2594 rt2800_bbp_read(rt2x00dev, 1, &r1); 2639 rt2800_bbp_read(rt2x00dev, 1, &r1);
2595 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0); 2640 if (delta <= -12) {
2641 power_ctrl = 2;
2642 delta += 12;
2643 } else if (delta <= -6) {
2644 power_ctrl = 1;
2645 delta += 6;
2646 } else {
2647 power_ctrl = 0;
2648 }
2649 rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
2596 rt2800_bbp_write(rt2x00dev, 1, r1); 2650 rt2800_bbp_write(rt2x00dev, 1, r1);
2597 offset = TX_PWR_CFG_0; 2651 offset = TX_PWR_CFG_0;
2598 2652
@@ -2710,7 +2764,7 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
2710 2764
2711void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev) 2765void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
2712{ 2766{
2713 rt2800_config_txpower(rt2x00dev, rt2x00dev->curr_band, 2767 rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.channel,
2714 rt2x00dev->tx_power); 2768 rt2x00dev->tx_power);
2715} 2769}
2716EXPORT_SYMBOL_GPL(rt2800_gain_calibration); 2770EXPORT_SYMBOL_GPL(rt2800_gain_calibration);
@@ -2750,7 +2804,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
2750 case RF5390: 2804 case RF5390:
2751 case RF5392: 2805 case RF5392:
2752 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); 2806 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2753 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 2807 rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
2754 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); 2808 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2755 break; 2809 break;
2756 default: 2810 default:
@@ -2845,11 +2899,11 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev,
2845 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) { 2899 if (flags & IEEE80211_CONF_CHANGE_CHANNEL) {
2846 rt2800_config_channel(rt2x00dev, libconf->conf, 2900 rt2800_config_channel(rt2x00dev, libconf->conf,
2847 &libconf->rf, &libconf->channel); 2901 &libconf->rf, &libconf->channel);
2848 rt2800_config_txpower(rt2x00dev, libconf->conf->channel->band, 2902 rt2800_config_txpower(rt2x00dev, libconf->conf->channel,
2849 libconf->conf->power_level); 2903 libconf->conf->power_level);
2850 } 2904 }
2851 if (flags & IEEE80211_CONF_CHANGE_POWER) 2905 if (flags & IEEE80211_CONF_CHANGE_POWER)
2852 rt2800_config_txpower(rt2x00dev, libconf->conf->channel->band, 2906 rt2800_config_txpower(rt2x00dev, libconf->conf->channel,
2853 libconf->conf->power_level); 2907 libconf->conf->power_level);
2854 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) 2908 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
2855 rt2800_config_retry_limit(rt2x00dev, libconf); 2909 rt2800_config_retry_limit(rt2x00dev, libconf);
@@ -3538,8 +3592,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3538 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) 3592 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
3539 rt2800_bbp_write(rt2x00dev, 84, 0x19); 3593 rt2800_bbp_write(rt2x00dev, 84, 0x19);
3540 else if (rt2x00_rt(rt2x00dev, RT3290) || 3594 else if (rt2x00_rt(rt2x00dev, RT3290) ||
3541 rt2x00_rt(rt2x00dev, RT5390) || 3595 rt2x00_rt(rt2x00dev, RT5390) ||
3542 rt2x00_rt(rt2x00dev, RT5392)) 3596 rt2x00_rt(rt2x00dev, RT5392))
3543 rt2800_bbp_write(rt2x00dev, 84, 0x9a); 3597 rt2800_bbp_write(rt2x00dev, 84, 0x9a);
3544 else 3598 else
3545 rt2800_bbp_write(rt2x00dev, 84, 0x99); 3599 rt2800_bbp_write(rt2x00dev, 84, 0x99);
@@ -3598,7 +3652,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3598 else if (rt2x00_rt(rt2x00dev, RT3352)) 3652 else if (rt2x00_rt(rt2x00dev, RT3352))
3599 rt2800_bbp_write(rt2x00dev, 105, 0x34); 3653 rt2800_bbp_write(rt2x00dev, 105, 0x34);
3600 else if (rt2x00_rt(rt2x00dev, RT5390) || 3654 else if (rt2x00_rt(rt2x00dev, RT5390) ||
3601 rt2x00_rt(rt2x00dev, RT5392)) 3655 rt2x00_rt(rt2x00dev, RT5392))
3602 rt2800_bbp_write(rt2x00dev, 105, 0x3c); 3656 rt2800_bbp_write(rt2x00dev, 105, 0x3c);
3603 else 3657 else
3604 rt2800_bbp_write(rt2x00dev, 105, 0x05); 3658 rt2800_bbp_write(rt2x00dev, 105, 0x05);
@@ -3692,7 +3746,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3692 } 3746 }
3693 3747
3694 if (rt2x00_rt(rt2x00dev, RT5390) || 3748 if (rt2x00_rt(rt2x00dev, RT5390) ||
3695 rt2x00_rt(rt2x00dev, RT5392)) { 3749 rt2x00_rt(rt2x00dev, RT5392)) {
3696 int ant, div_mode; 3750 int ant, div_mode;
3697 3751
3698 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 3752 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
@@ -4166,66 +4220,66 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4166 rt2800_rfcsr_write(rt2x00dev, 61, 0xdd); 4220 rt2800_rfcsr_write(rt2x00dev, 61, 0xdd);
4167 rt2800_rfcsr_write(rt2x00dev, 62, 0x00); 4221 rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
4168 rt2800_rfcsr_write(rt2x00dev, 63, 0x00); 4222 rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
4169 } else if (rt2x00_rt(rt2x00dev, RT5392)) { 4223 } else if (rt2x00_rt(rt2x00dev, RT5392)) {
4170 rt2800_rfcsr_write(rt2x00dev, 1, 0x17); 4224 rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
4171 rt2800_rfcsr_write(rt2x00dev, 2, 0x80); 4225 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
4172 rt2800_rfcsr_write(rt2x00dev, 3, 0x88); 4226 rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
4173 rt2800_rfcsr_write(rt2x00dev, 5, 0x10); 4227 rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
4174 rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); 4228 rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
4175 rt2800_rfcsr_write(rt2x00dev, 7, 0x00); 4229 rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
4176 rt2800_rfcsr_write(rt2x00dev, 10, 0x53); 4230 rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
4177 rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); 4231 rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
4178 rt2800_rfcsr_write(rt2x00dev, 12, 0x46); 4232 rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
4179 rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); 4233 rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
4180 rt2800_rfcsr_write(rt2x00dev, 14, 0x00); 4234 rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
4181 rt2800_rfcsr_write(rt2x00dev, 15, 0x00); 4235 rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
4182 rt2800_rfcsr_write(rt2x00dev, 16, 0x00); 4236 rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
4183 rt2800_rfcsr_write(rt2x00dev, 18, 0x03); 4237 rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
4184 rt2800_rfcsr_write(rt2x00dev, 19, 0x4d); 4238 rt2800_rfcsr_write(rt2x00dev, 19, 0x4d);
4185 rt2800_rfcsr_write(rt2x00dev, 20, 0x00); 4239 rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
4186 rt2800_rfcsr_write(rt2x00dev, 21, 0x8d); 4240 rt2800_rfcsr_write(rt2x00dev, 21, 0x8d);
4187 rt2800_rfcsr_write(rt2x00dev, 22, 0x20); 4241 rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
4188 rt2800_rfcsr_write(rt2x00dev, 23, 0x0b); 4242 rt2800_rfcsr_write(rt2x00dev, 23, 0x0b);
4189 rt2800_rfcsr_write(rt2x00dev, 24, 0x44); 4243 rt2800_rfcsr_write(rt2x00dev, 24, 0x44);
4190 rt2800_rfcsr_write(rt2x00dev, 25, 0x80); 4244 rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
4191 rt2800_rfcsr_write(rt2x00dev, 26, 0x82); 4245 rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
4192 rt2800_rfcsr_write(rt2x00dev, 27, 0x09); 4246 rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
4193 rt2800_rfcsr_write(rt2x00dev, 28, 0x00); 4247 rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
4194 rt2800_rfcsr_write(rt2x00dev, 29, 0x10); 4248 rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
4195 rt2800_rfcsr_write(rt2x00dev, 30, 0x10); 4249 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
4196 rt2800_rfcsr_write(rt2x00dev, 31, 0x80); 4250 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
4197 rt2800_rfcsr_write(rt2x00dev, 32, 0x20); 4251 rt2800_rfcsr_write(rt2x00dev, 32, 0x20);
4198 rt2800_rfcsr_write(rt2x00dev, 33, 0xC0); 4252 rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
4199 rt2800_rfcsr_write(rt2x00dev, 34, 0x07); 4253 rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
4200 rt2800_rfcsr_write(rt2x00dev, 35, 0x12); 4254 rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
4201 rt2800_rfcsr_write(rt2x00dev, 36, 0x00); 4255 rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
4202 rt2800_rfcsr_write(rt2x00dev, 37, 0x08); 4256 rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
4203 rt2800_rfcsr_write(rt2x00dev, 38, 0x89); 4257 rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
4204 rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); 4258 rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
4205 rt2800_rfcsr_write(rt2x00dev, 40, 0x0f); 4259 rt2800_rfcsr_write(rt2x00dev, 40, 0x0f);
4206 rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); 4260 rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
4207 rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); 4261 rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
4208 rt2800_rfcsr_write(rt2x00dev, 43, 0x9b); 4262 rt2800_rfcsr_write(rt2x00dev, 43, 0x9b);
4209 rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); 4263 rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
4210 rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); 4264 rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
4211 rt2800_rfcsr_write(rt2x00dev, 46, 0x73); 4265 rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
4212 rt2800_rfcsr_write(rt2x00dev, 47, 0x0c); 4266 rt2800_rfcsr_write(rt2x00dev, 47, 0x0c);
4213 rt2800_rfcsr_write(rt2x00dev, 48, 0x10); 4267 rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
4214 rt2800_rfcsr_write(rt2x00dev, 49, 0x94); 4268 rt2800_rfcsr_write(rt2x00dev, 49, 0x94);
4215 rt2800_rfcsr_write(rt2x00dev, 50, 0x94); 4269 rt2800_rfcsr_write(rt2x00dev, 50, 0x94);
4216 rt2800_rfcsr_write(rt2x00dev, 51, 0x3a); 4270 rt2800_rfcsr_write(rt2x00dev, 51, 0x3a);
4217 rt2800_rfcsr_write(rt2x00dev, 52, 0x48); 4271 rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
4218 rt2800_rfcsr_write(rt2x00dev, 53, 0x44); 4272 rt2800_rfcsr_write(rt2x00dev, 53, 0x44);
4219 rt2800_rfcsr_write(rt2x00dev, 54, 0x38); 4273 rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
4220 rt2800_rfcsr_write(rt2x00dev, 55, 0x43); 4274 rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
4221 rt2800_rfcsr_write(rt2x00dev, 56, 0xa1); 4275 rt2800_rfcsr_write(rt2x00dev, 56, 0xa1);
4222 rt2800_rfcsr_write(rt2x00dev, 57, 0x00); 4276 rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
4223 rt2800_rfcsr_write(rt2x00dev, 58, 0x39); 4277 rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
4224 rt2800_rfcsr_write(rt2x00dev, 59, 0x07); 4278 rt2800_rfcsr_write(rt2x00dev, 59, 0x07);
4225 rt2800_rfcsr_write(rt2x00dev, 60, 0x45); 4279 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
4226 rt2800_rfcsr_write(rt2x00dev, 61, 0x91); 4280 rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
4227 rt2800_rfcsr_write(rt2x00dev, 62, 0x39); 4281 rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
4228 rt2800_rfcsr_write(rt2x00dev, 63, 0x07); 4282 rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
4229 } 4283 }
4230 4284
4231 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { 4285 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -4302,7 +4356,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4302 rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26); 4356 rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
4303 4357
4304 if (!rt2x00_rt(rt2x00dev, RT5390) && 4358 if (!rt2x00_rt(rt2x00dev, RT5390) &&
4305 !rt2x00_rt(rt2x00dev, RT5392)) { 4359 !rt2x00_rt(rt2x00dev, RT5392)) {
4306 /* 4360 /*
4307 * Set back to initial state 4361 * Set back to initial state
4308 */ 4362 */
@@ -4331,7 +4385,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4331 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg); 4385 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
4332 4386
4333 if (!rt2x00_rt(rt2x00dev, RT5390) && 4387 if (!rt2x00_rt(rt2x00dev, RT5390) &&
4334 !rt2x00_rt(rt2x00dev, RT5392)) { 4388 !rt2x00_rt(rt2x00dev, RT5392)) {
4335 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 4389 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
4336 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); 4390 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
4337 if (rt2x00_rt(rt2x00dev, RT3070) || 4391 if (rt2x00_rt(rt2x00dev, RT3070) ||
@@ -4403,7 +4457,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4403 } 4457 }
4404 4458
4405 if (rt2x00_rt(rt2x00dev, RT5390) || 4459 if (rt2x00_rt(rt2x00dev, RT5390) ||
4406 rt2x00_rt(rt2x00dev, RT5392)) { 4460 rt2x00_rt(rt2x00dev, RT5392)) {
4407 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr); 4461 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
4408 rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0); 4462 rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
4409 rt2800_rfcsr_write(rt2x00dev, 38, rfcsr); 4463 rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
@@ -5036,7 +5090,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5036 IEEE80211_HW_SUPPORTS_PS | 5090 IEEE80211_HW_SUPPORTS_PS |
5037 IEEE80211_HW_PS_NULLFUNC_STACK | 5091 IEEE80211_HW_PS_NULLFUNC_STACK |
5038 IEEE80211_HW_AMPDU_AGGREGATION | 5092 IEEE80211_HW_AMPDU_AGGREGATION |
5039 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 5093 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5094 IEEE80211_HW_TEARDOWN_AGGR_ON_BAR_FAIL;
5040 5095
5041 /* 5096 /*
5042 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices 5097 * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 27829e1e2e38..9224d874bf24 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -1176,7 +1176,7 @@ static struct platform_driver rt2800soc_driver = {
1176 .mod_name = KBUILD_MODNAME, 1176 .mod_name = KBUILD_MODNAME,
1177 }, 1177 },
1178 .probe = rt2800soc_probe, 1178 .probe = rt2800soc_probe,
1179 .remove = __devexit_p(rt2x00soc_remove), 1179 .remove = rt2x00soc_remove,
1180 .suspend = rt2x00soc_suspend, 1180 .suspend = rt2x00soc_suspend,
1181 .resume = rt2x00soc_resume, 1181 .resume = rt2x00soc_resume,
1182}; 1182};
@@ -1193,7 +1193,7 @@ static struct pci_driver rt2800pci_driver = {
1193 .name = KBUILD_MODNAME, 1193 .name = KBUILD_MODNAME,
1194 .id_table = rt2800pci_device_table, 1194 .id_table = rt2800pci_device_table,
1195 .probe = rt2800pci_probe, 1195 .probe = rt2800pci_probe,
1196 .remove = __devexit_p(rt2x00pci_remove), 1196 .remove = rt2x00pci_remove,
1197 .suspend = rt2x00pci_suspend, 1197 .suspend = rt2x00pci_suspend,
1198 .resume = rt2x00pci_resume, 1198 .resume = rt2x00pci_resume,
1199}; 1199};
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 3b8fb5a603f2..5c149b58ab46 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1096,6 +1096,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1096 { USB_DEVICE(0x177f, 0x0153) }, 1096 { USB_DEVICE(0x177f, 0x0153) },
1097 { USB_DEVICE(0x177f, 0x0302) }, 1097 { USB_DEVICE(0x177f, 0x0302) },
1098 { USB_DEVICE(0x177f, 0x0313) }, 1098 { USB_DEVICE(0x177f, 0x0313) },
1099 { USB_DEVICE(0x177f, 0x0323) },
1099 /* U-Media */ 1100 /* U-Media */
1100 { USB_DEVICE(0x157e, 0x300e) }, 1101 { USB_DEVICE(0x157e, 0x300e) },
1101 { USB_DEVICE(0x157e, 0x3013) }, 1102 { USB_DEVICE(0x157e, 0x3013) },
@@ -1169,6 +1170,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1169 { USB_DEVICE(0x2001, 0x3c19) }, 1170 { USB_DEVICE(0x2001, 0x3c19) },
1170 { USB_DEVICE(0x2001, 0x3c1c) }, 1171 { USB_DEVICE(0x2001, 0x3c1c) },
1171 { USB_DEVICE(0x2001, 0x3c1d) }, 1172 { USB_DEVICE(0x2001, 0x3c1d) },
1173 { USB_DEVICE(0x2001, 0x3c1e) },
1172 /* LG innotek */ 1174 /* LG innotek */
1173 { USB_DEVICE(0x043e, 0x7a22) }, 1175 { USB_DEVICE(0x043e, 0x7a22) },
1174 /* Panasonic */ 1176 /* Panasonic */
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 69097d1faeb6..4ffb6a584cd0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -157,6 +157,7 @@ static void rt2x00lib_intf_scheduled(struct work_struct *work)
157 * requested configurations. 157 * requested configurations.
158 */ 158 */
159 ieee80211_iterate_active_interfaces(rt2x00dev->hw, 159 ieee80211_iterate_active_interfaces(rt2x00dev->hw,
160 IEEE80211_IFACE_ITER_RESUME_ALL,
160 rt2x00lib_intf_scheduled_iter, 161 rt2x00lib_intf_scheduled_iter,
161 rt2x00dev); 162 rt2x00dev);
162} 163}
@@ -225,9 +226,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
225 return; 226 return;
226 227
227 /* send buffered bc/mc frames out for every bssid */ 228 /* send buffered bc/mc frames out for every bssid */
228 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, 229 ieee80211_iterate_active_interfaces_atomic(
229 rt2x00lib_bc_buffer_iter, 230 rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
230 rt2x00dev); 231 rt2x00lib_bc_buffer_iter, rt2x00dev);
231 /* 232 /*
232 * Devices with pre tbtt interrupt don't need to update the beacon 233 * Devices with pre tbtt interrupt don't need to update the beacon
233 * here as they will fetch the next beacon directly prior to 234 * here as they will fetch the next beacon directly prior to
@@ -237,9 +238,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
237 return; 238 return;
238 239
239 /* fetch next beacon */ 240 /* fetch next beacon */
240 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, 241 ieee80211_iterate_active_interfaces_atomic(
241 rt2x00lib_beaconupdate_iter, 242 rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
242 rt2x00dev); 243 rt2x00lib_beaconupdate_iter, rt2x00dev);
243} 244}
244EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); 245EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
245 246
@@ -249,9 +250,9 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
249 return; 250 return;
250 251
251 /* fetch next beacon */ 252 /* fetch next beacon */
252 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, 253 ieee80211_iterate_active_interfaces_atomic(
253 rt2x00lib_beaconupdate_iter, 254 rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
254 rt2x00dev); 255 rt2x00lib_beaconupdate_iter, rt2x00dev);
255} 256}
256EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); 257EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
257 258
@@ -391,10 +392,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
391 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 392 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
392 tx_info->status.ampdu_len = 1; 393 tx_info->status.ampdu_len = 1;
393 tx_info->status.ampdu_ack_len = success ? 1 : 0; 394 tx_info->status.ampdu_ack_len = success ? 1 : 0;
394 /* 395
395 * TODO: Need to tear down BA session here 396 if (!success)
396 * if not successful. 397 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
397 */
398 } 398 }
399 399
400 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 400 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -1123,6 +1123,9 @@ static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev)
1123 struct ieee80211_iface_limit *if_limit; 1123 struct ieee80211_iface_limit *if_limit;
1124 struct ieee80211_iface_combination *if_combination; 1124 struct ieee80211_iface_combination *if_combination;
1125 1125
1126 if (rt2x00dev->ops->max_ap_intf < 2)
1127 return;
1128
1126 /* 1129 /*
1127 * Build up AP interface limits structure. 1130 * Build up AP interface limits structure.
1128 */ 1131 */
@@ -1182,6 +1185,13 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1182 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf); 1185 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);
1183 1186
1184 /* 1187 /*
1188 * rt2x00 devices can only use the last n bits of the MAC address
1189 * for virtual interfaces.
1190 */
1191 rt2x00dev->hw->wiphy->addr_mask[ETH_ALEN - 1] =
1192 (rt2x00dev->ops->max_ap_intf - 1);
1193
1194 /*
1185 * Determine which operating modes are supported, all modes 1195 * Determine which operating modes are supported, all modes
1186 * which require beaconing, depend on the availability of 1196 * which require beaconing, depend on the availability of
1187 * beacon entries. 1197 * beacon entries.
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 98a9e48f8e4a..ed7a1bb3f245 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -424,9 +424,9 @@ int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
424 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 424 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
425 return 0; 425 return 0;
426 426
427 ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, 427 ieee80211_iterate_active_interfaces_atomic(
428 rt2x00mac_set_tim_iter, 428 rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
429 rt2x00dev); 429 rt2x00mac_set_tim_iter, rt2x00dev);
430 430
431 /* queue work to upodate the beacon template */ 431 /* queue work to upodate the beacon template */
432 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work); 432 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index d6582a2fa353..f95792cfcf89 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -3094,7 +3094,7 @@ static struct pci_driver rt61pci_driver = {
3094 .name = KBUILD_MODNAME, 3094 .name = KBUILD_MODNAME,
3095 .id_table = rt61pci_device_table, 3095 .id_table = rt61pci_device_table,
3096 .probe = rt61pci_probe, 3096 .probe = rt61pci_probe,
3097 .remove = __devexit_p(rt2x00pci_remove), 3097 .remove = rt2x00pci_remove,
3098 .suspend = rt2x00pci_suspend, 3098 .suspend = rt2x00pci_suspend,
3099 .resume = rt2x00pci_resume, 3099 .resume = rt2x00pci_resume,
3100}; 3100};
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 021d83e1b1d3..1b3c2843221d 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -150,7 +150,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
150 rx_status.freq = dev->conf.channel->center_freq; 150 rx_status.freq = dev->conf.channel->center_freq;
151 rx_status.band = dev->conf.channel->band; 151 rx_status.band = dev->conf.channel->band;
152 rx_status.mactime = le64_to_cpu(entry->tsft); 152 rx_status.mactime = le64_to_cpu(entry->tsft);
153 rx_status.flag |= RX_FLAG_MACTIME_MPDU; 153 rx_status.flag |= RX_FLAG_MACTIME_START;
154 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 154 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
155 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 155 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
156 156
@@ -901,7 +901,7 @@ static void rtl8180_eeprom_register_write(struct eeprom_93cx6 *eeprom)
901 udelay(10); 901 udelay(10);
902} 902}
903 903
904static int __devinit rtl8180_probe(struct pci_dev *pdev, 904static int rtl8180_probe(struct pci_dev *pdev,
905 const struct pci_device_id *id) 905 const struct pci_device_id *id)
906{ 906{
907 struct ieee80211_hw *dev; 907 struct ieee80211_hw *dev;
@@ -1131,7 +1131,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
1131 return err; 1131 return err;
1132} 1132}
1133 1133
1134static void __devexit rtl8180_remove(struct pci_dev *pdev) 1134static void rtl8180_remove(struct pci_dev *pdev)
1135{ 1135{
1136 struct ieee80211_hw *dev = pci_get_drvdata(pdev); 1136 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
1137 struct rtl8180_priv *priv; 1137 struct rtl8180_priv *priv;
@@ -1170,7 +1170,7 @@ static struct pci_driver rtl8180_driver = {
1170 .name = KBUILD_MODNAME, 1170 .name = KBUILD_MODNAME,
1171 .id_table = rtl8180_table, 1171 .id_table = rtl8180_table,
1172 .probe = rtl8180_probe, 1172 .probe = rtl8180_probe,
1173 .remove = __devexit_p(rtl8180_remove), 1173 .remove = rtl8180_remove,
1174#ifdef CONFIG_PM 1174#ifdef CONFIG_PM
1175 .suspend = rtl8180_suspend, 1175 .suspend = rtl8180_suspend,
1176 .resume = rtl8180_resume, 1176 .resume = rtl8180_resume,
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 7811b6315973..4574bd213705 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -381,7 +381,7 @@ static void rtl8187_rx_cb(struct urb *urb)
381 rx_status.rate_idx = rate; 381 rx_status.rate_idx = rate;
382 rx_status.freq = dev->conf.channel->center_freq; 382 rx_status.freq = dev->conf.channel->center_freq;
383 rx_status.band = dev->conf.channel->band; 383 rx_status.band = dev->conf.channel->band;
384 rx_status.flag |= RX_FLAG_MACTIME_MPDU; 384 rx_status.flag |= RX_FLAG_MACTIME_START;
385 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) 385 if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR)
386 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; 386 rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
387 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 387 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
@@ -1411,7 +1411,7 @@ static void rtl8187_eeprom_register_write(struct eeprom_93cx6 *eeprom)
1411 udelay(10); 1411 udelay(10);
1412} 1412}
1413 1413
1414static int __devinit rtl8187_probe(struct usb_interface *intf, 1414static int rtl8187_probe(struct usb_interface *intf,
1415 const struct usb_device_id *id) 1415 const struct usb_device_id *id)
1416{ 1416{
1417 struct usb_device *udev = interface_to_usbdev(intf); 1417 struct usb_device *udev = interface_to_usbdev(intf);
@@ -1639,7 +1639,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1639 return err; 1639 return err;
1640} 1640}
1641 1641
1642static void __devexit rtl8187_disconnect(struct usb_interface *intf) 1642static void rtl8187_disconnect(struct usb_interface *intf)
1643{ 1643{
1644 struct ieee80211_hw *dev = usb_get_intfdata(intf); 1644 struct ieee80211_hw *dev = usb_get_intfdata(intf);
1645 struct rtl8187_priv *priv; 1645 struct rtl8187_priv *priv;
@@ -1664,7 +1664,7 @@ static struct usb_driver rtl8187_driver = {
1664 .name = KBUILD_MODNAME, 1664 .name = KBUILD_MODNAME,
1665 .id_table = rtl8187_table, 1665 .id_table = rtl8187_table,
1666 .probe = rtl8187_probe, 1666 .probe = rtl8187_probe,
1667 .disconnect = __devexit_p(rtl8187_disconnect), 1667 .disconnect = rtl8187_disconnect,
1668 .disable_hub_initiated_lpm = 1, 1668 .disable_hub_initiated_lpm = 1,
1669}; 1669};
1670 1670
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 6b28e92d1d21..21b1bbb93a7e 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -32,6 +32,17 @@ config RTL8192DE
32 32
33 If you choose to build it as a module, it will be called rtl8192de 33 If you choose to build it as a module, it will be called rtl8192de
34 34
35config RTL8723AE
36 tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
37 depends on MAC80211 && PCI && EXPERIMENTAL
38 select FW_LOADER
39 select RTLWIFI
40 ---help---
41 This is the driver for Realtek RTL8723AE 802.11n PCIe
42 wireless network adapters.
43
44 If you choose to build it as a module, it will be called rtl8723ae
45
35config RTL8192CU 46config RTL8192CU
36 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" 47 tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
37 depends on MAC80211 && USB 48 depends on MAC80211 && USB
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index 97935c565bab..3b1cbac741e3 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -7,7 +7,8 @@ rtlwifi-objs := \
7 efuse.o \ 7 efuse.o \
8 ps.o \ 8 ps.o \
9 rc.o \ 9 rc.o \
10 regd.o 10 regd.o \
11 stats.o
11 12
12rtl8192c_common-objs += \ 13rtl8192c_common-objs += \
13 14
@@ -24,5 +25,6 @@ obj-$(CONFIG_RTL8192CE) += rtl8192ce/
24obj-$(CONFIG_RTL8192CU) += rtl8192cu/ 25obj-$(CONFIG_RTL8192CU) += rtl8192cu/
25obj-$(CONFIG_RTL8192SE) += rtl8192se/ 26obj-$(CONFIG_RTL8192SE) += rtl8192se/
26obj-$(CONFIG_RTL8192DE) += rtl8192de/ 27obj-$(CONFIG_RTL8192DE) += rtl8192de/
28obj-$(CONFIG_RTL8723AE) += rtl8723ae/
27 29
28ccflags-y += -D__CHECK_ENDIAN__ 30ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 59381fe8ed06..4494d130b37c 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -826,6 +826,30 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
826} 826}
827EXPORT_SYMBOL(rtlwifi_rate_mapping); 827EXPORT_SYMBOL(rtlwifi_rate_mapping);
828 828
829bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
830{
831 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
832 struct rtl_priv *rtlpriv = rtl_priv(hw);
833 __le16 fc = rtl_get_fc(skb);
834
835 if (rtlpriv->dm.supp_phymode_switch &&
836 mac->link_state < MAC80211_LINKED &&
837 (ieee80211_is_auth(fc) || ieee80211_is_probe_req(fc))) {
838 if (rtlpriv->cfg->ops->check_switch_to_dmdp)
839 rtlpriv->cfg->ops->check_switch_to_dmdp(hw);
840 }
841 if (ieee80211_is_auth(fc)) {
842 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
843 rtl_ips_nic_on(hw);
844
845 mac->link_state = MAC80211_LINKING;
846 /* Dual mac */
847 rtlpriv->phy.need_iqk = true;
848 }
849
850 return true;
851}
852
829void rtl_get_tcb_desc(struct ieee80211_hw *hw, 853void rtl_get_tcb_desc(struct ieee80211_hw *hw,
830 struct ieee80211_tx_info *info, 854 struct ieee80211_tx_info *info,
831 struct ieee80211_sta *sta, 855 struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index f35af0fdaaf0..5a8c80e259f7 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -142,4 +142,6 @@ u8 rtl_tid_to_ac(u8 tid);
142extern struct attribute_group rtl_attribute_group; 142extern struct attribute_group rtl_attribute_group;
143int rtlwifi_rate_mapping(struct ieee80211_hw *hw, 143int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
144 bool isht, u8 desc_rate, bool first_ampdu); 144 bool isht, u8 desc_rate, bool first_ampdu);
145bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
146
145#endif 147#endif
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 5b4b4d4eaf9e..0e510f73041a 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -52,11 +52,8 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
52 u32 target_content = 0; 52 u32 target_content = 0;
53 u8 entry_i; 53 u8 entry_i;
54 54
55 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, 55 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "key_cont_128: %6phC\n",
56 "key_cont_128:\n %x:%x:%x:%x:%x:%x\n", 56 key_cont_128);
57 key_cont_128[0], key_cont_128[1],
58 key_cont_128[2], key_cont_128[3],
59 key_cont_128[4], key_cont_128[5]);
60 57
61 for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) { 58 for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
62 target_command = entry_i + CAM_CONTENT_COUNT * entry_no; 59 target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
@@ -340,7 +337,7 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
340 if (((bitmap & BIT(0)) == BIT(0)) && 337 if (((bitmap & BIT(0)) == BIT(0)) &&
341 (memcmp(addr, sta_addr, ETH_ALEN) == 0)) { 338 (memcmp(addr, sta_addr, ETH_ALEN) == 0)) {
342 /* Remove from HW Security CAM */ 339 /* Remove from HW Security CAM */
343 memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN); 340 eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
344 rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i); 341 rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
345 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, 342 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
346 "del CAM entry %d\n", i); 343 "del CAM entry %d\n", i);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index a7c0e52869ba..be33aa14c8af 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -962,7 +962,6 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
962 int err = 0; 962 int err = 0;
963 u8 mac_addr[ETH_ALEN]; 963 u8 mac_addr[ETH_ALEN];
964 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 964 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
965 u8 zero_addr[ETH_ALEN] = { 0 };
966 965
967 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { 966 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
968 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, 967 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -1057,7 +1056,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1057 memcpy(rtlpriv->sec.key_buf[key_idx], 1056 memcpy(rtlpriv->sec.key_buf[key_idx],
1058 key->key, key->keylen); 1057 key->key, key->keylen);
1059 rtlpriv->sec.key_len[key_idx] = key->keylen; 1058 rtlpriv->sec.key_len[key_idx] = key->keylen;
1060 memcpy(mac_addr, zero_addr, ETH_ALEN); 1059 eth_zero_addr(mac_addr);
1061 } else if (group_key) { /* group key */ 1060 } else if (group_key) { /* group key */
1062 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, 1061 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
1063 "set group key\n"); 1062 "set group key\n");
@@ -1108,7 +1107,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1108 } 1107 }
1109 memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen); 1108 memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
1110 rtlpriv->sec.key_len[key_idx] = 0; 1109 rtlpriv->sec.key_len[key_idx] = 0;
1111 memcpy(mac_addr, zero_addr, ETH_ALEN); 1110 eth_zero_addr(mac_addr);
1112 /* 1111 /*
1113 *mac80211 will delete entrys one by one, 1112 *mac80211 will delete entrys one by one,
1114 *so don't use rtl_cam_reset_all_entry 1113 *so don't use rtl_cam_reset_all_entry
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
index 07493d2957f2..fd3269f47685 100644
--- a/drivers/net/wireless/rtlwifi/debug.h
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -106,6 +106,8 @@
106#define COMP_REGD BIT(27) 106#define COMP_REGD BIT(27)
107#define COMP_CHAN BIT(28) 107#define COMP_CHAN BIT(28)
108#define COMP_USB BIT(29) 108#define COMP_USB BIT(29)
109#define COMP_EASY_CONCURRENT COMP_USB /* reuse of this bit is OK */
110#define COMP_BT_COEXIST BIT(30)
109 111
110/*-------------------------------------------------------------- 112/*--------------------------------------------------------------
111 Define the rt_print components 113 Define the rt_print components
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index abc306b502ac..3deacafdcd5e 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1309,6 +1309,7 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1309 struct rtl_priv *rtlpriv = rtl_priv(hw); 1309 struct rtl_priv *rtlpriv = rtl_priv(hw);
1310 struct rtl_sta_info *sta_entry = NULL; 1310 struct rtl_sta_info *sta_entry = NULL;
1311 u8 tid = rtl_get_tid(skb); 1311 u8 tid = rtl_get_tid(skb);
1312 __le16 fc = rtl_get_fc(skb);
1312 1313
1313 if (!sta) 1314 if (!sta)
1314 return false; 1315 return false;
@@ -1316,6 +1317,12 @@ static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1316 1317
1317 if (!rtlpriv->rtlhal.earlymode_enable) 1318 if (!rtlpriv->rtlhal.earlymode_enable)
1318 return false; 1319 return false;
1320 if (ieee80211_is_nullfunc(fc))
1321 return false;
1322 if (ieee80211_is_qos_nullfunc(fc))
1323 return false;
1324 if (ieee80211_is_pspoll(fc))
1325 return false;
1319 if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL) 1326 if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1320 return false; 1327 return false;
1321 if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE) 1328 if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
@@ -1357,10 +1364,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
1357 u8 own; 1364 u8 own;
1358 u8 temp_one = 1; 1365 u8 temp_one = 1;
1359 1366
1360 if (ieee80211_is_auth(fc)) { 1367 if (ieee80211_is_mgmt(fc))
1361 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n"); 1368 rtl_tx_mgmt_proc(hw, skb);
1362 rtl_ips_nic_on(hw);
1363 }
1364 1369
1365 if (rtlpriv->psc.sw_ps_enabled) { 1370 if (rtlpriv->psc.sw_ps_enabled) {
1366 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) && 1371 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
@@ -1628,7 +1633,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1628 "8192 PCI-E is found - vid/did=%x/%x\n", 1633 "8192 PCI-E is found - vid/did=%x/%x\n",
1629 venderid, deviceid); 1634 venderid, deviceid);
1630 rtlhal->hw_type = HARDWARE_TYPE_RTL8192E; 1635 rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1631 break; 1636 return false;
1632 case RTL_PCI_REVISION_ID_8192SE: 1637 case RTL_PCI_REVISION_ID_8192SE:
1633 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 1638 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1634 "8192SE is found - vid/did=%x/%x\n", 1639 "8192SE is found - vid/did=%x/%x\n",
@@ -1643,6 +1648,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1643 break; 1648 break;
1644 1649
1645 } 1650 }
1651 } else if (deviceid == RTL_PCI_8723AE_DID) {
1652 rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
1653 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1654 "8723AE PCI-E is found - "
1655 "vid/did=%x/%x\n", venderid, deviceid);
1646 } else if (deviceid == RTL_PCI_8192CET_DID || 1656 } else if (deviceid == RTL_PCI_8192CET_DID ||
1647 deviceid == RTL_PCI_8192CE_DID || 1657 deviceid == RTL_PCI_8192CE_DID ||
1648 deviceid == RTL_PCI_8191CE_DID || 1658 deviceid == RTL_PCI_8191CE_DID ||
@@ -1746,7 +1756,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1746 return true; 1756 return true;
1747} 1757}
1748 1758
1749int __devinit rtl_pci_probe(struct pci_dev *pdev, 1759int rtl_pci_probe(struct pci_dev *pdev,
1750 const struct pci_device_id *id) 1760 const struct pci_device_id *id)
1751{ 1761{
1752 struct ieee80211_hw *hw = NULL; 1762 struct ieee80211_hw *hw = NULL;
@@ -1972,6 +1982,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
1972} 1982}
1973EXPORT_SYMBOL(rtl_pci_disconnect); 1983EXPORT_SYMBOL(rtl_pci_disconnect);
1974 1984
1985#ifdef CONFIG_PM_SLEEP
1975/*************************************** 1986/***************************************
1976kernel pci power state define: 1987kernel pci power state define:
1977PCI_D0 ((pci_power_t __force) 0) 1988PCI_D0 ((pci_power_t __force) 0)
@@ -2011,6 +2022,7 @@ int rtl_pci_resume(struct device *dev)
2011 return 0; 2022 return 0;
2012} 2023}
2013EXPORT_SYMBOL(rtl_pci_resume); 2024EXPORT_SYMBOL(rtl_pci_resume);
2025#endif /* CONFIG_PM_SLEEP */
2014 2026
2015struct rtl_intf_ops rtl_pci_ops = { 2027struct rtl_intf_ops rtl_pci_ops = {
2016 .read_efuse_byte = read_efuse_byte, 2028 .read_efuse_byte = read_efuse_byte,
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
index 241448fc9ed5..65b08f50022e 100644
--- a/drivers/net/wireless/rtlwifi/pci.h
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -79,6 +79,7 @@
79#define RTL_PCI_8173_DID 0x8173 /*8191 SE Crab */ 79#define RTL_PCI_8173_DID 0x8173 /*8191 SE Crab */
80#define RTL_PCI_8172_DID 0x8172 /*8191 SE RE */ 80#define RTL_PCI_8172_DID 0x8172 /*8191 SE RE */
81#define RTL_PCI_8171_DID 0x8171 /*8191 SE Unicron */ 81#define RTL_PCI_8171_DID 0x8171 /*8191 SE Unicron */
82#define RTL_PCI_8723AE_DID 0x8723 /*8723AE */
82#define RTL_PCI_0045_DID 0x0045 /*8190 PCI for Ceraga */ 83#define RTL_PCI_0045_DID 0x0045 /*8190 PCI for Ceraga */
83#define RTL_PCI_0046_DID 0x0046 /*8190 Cardbus for Ceraga */ 84#define RTL_PCI_0046_DID 0x0046 /*8190 Cardbus for Ceraga */
84#define RTL_PCI_0044_DID 0x0044 /*8192e PCIE for Ceraga */ 85#define RTL_PCI_0044_DID 0x0044 /*8192e PCIE for Ceraga */
@@ -152,6 +153,7 @@ struct rtl8192_rx_ring {
152 153
153struct rtl_pci { 154struct rtl_pci {
154 struct pci_dev *pdev; 155 struct pci_dev *pdev;
156 bool irq_enabled;
155 157
156 bool driver_is_goingto_unload; 158 bool driver_is_goingto_unload;
157 bool up_first_time; 159 bool up_first_time;
@@ -234,11 +236,13 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
234 236
235extern struct rtl_intf_ops rtl_pci_ops; 237extern struct rtl_intf_ops rtl_pci_ops;
236 238
237int __devinit rtl_pci_probe(struct pci_dev *pdev, 239int rtl_pci_probe(struct pci_dev *pdev,
238 const struct pci_device_id *id); 240 const struct pci_device_id *id);
239void rtl_pci_disconnect(struct pci_dev *pdev); 241void rtl_pci_disconnect(struct pci_dev *pdev);
242#ifdef CONFIG_PM_SLEEP
240int rtl_pci_suspend(struct device *dev); 243int rtl_pci_suspend(struct device *dev);
241int rtl_pci_resume(struct device *dev); 244int rtl_pci_resume(struct device *dev);
245#endif /* CONFIG_PM_SLEEP */
242static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr) 246static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
243{ 247{
244 return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr); 248 return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr);
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index d5cbf01da8ac..c1e065f136ba 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -55,7 +55,8 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
55 * 1M we will not use FW rate but user rate. 55 * 1M we will not use FW rate but user rate.
56 */ 56 */
57 if (rtlmac->opmode == NL80211_IFTYPE_AP || 57 if (rtlmac->opmode == NL80211_IFTYPE_AP ||
58 rtlmac->opmode == NL80211_IFTYPE_ADHOC) { 58 rtlmac->opmode == NL80211_IFTYPE_ADHOC ||
59 rtlmac->opmode == NL80211_IFTYPE_MESH_POINT) {
59 if (sta) { 60 if (sta) {
60 sta_entry = (struct rtl_sta_info *) sta->drv_priv; 61 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
61 wireless_mode = sta_entry->wireless_mode; 62 wireless_mode = sta_entry->wireless_mode;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 1ca4e25c143b..1cdf5a271c9f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -43,8 +43,8 @@
43#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \ 43#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
44 ((RTLPRIV(_priv))->mac80211.opmode == \ 44 ((RTLPRIV(_priv))->mac80211.opmode == \
45 NL80211_IFTYPE_ADHOC) ? \ 45 NL80211_IFTYPE_ADHOC) ? \
46 ((RTLPRIV(_priv))->dm.entry_min_undecoratedsmoothed_pwdb) : \ 46 ((RTLPRIV(_priv))->dm.entry_min_undec_sm_pwdb) : \
47 ((RTLPRIV(_priv))->dm.undecorated_smoothed_pwdb) 47 ((RTLPRIV(_priv))->dm.undec_sm_pwdb)
48 48
49static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { 49static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
50 0x7f8001fe, 50 0x7f8001fe,
@@ -167,18 +167,18 @@ static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
167 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 167 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
168 dm_digtable->cur_igvalue = 0x20; 168 dm_digtable->cur_igvalue = 0x20;
169 dm_digtable->pre_igvalue = 0x0; 169 dm_digtable->pre_igvalue = 0x0;
170 dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT; 170 dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
171 dm_digtable->presta_connectstate = DIG_STA_DISCONNECT; 171 dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
172 dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 172 dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
173 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; 173 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
174 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; 174 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
175 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 175 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
176 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 176 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
177 dm_digtable->rx_gain_range_max = DM_DIG_MAX; 177 dm_digtable->rx_gain_range_max = DM_DIG_MAX;
178 dm_digtable->rx_gain_range_min = DM_DIG_MIN; 178 dm_digtable->rx_gain_range_min = DM_DIG_MIN;
179 dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT; 179 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
180 dm_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX; 180 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
181 dm_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN; 181 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
182 dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX; 182 dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
183 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; 183 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
184} 184}
@@ -189,22 +189,21 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
189 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 189 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
190 long rssi_val_min = 0; 190 long rssi_val_min = 0;
191 191
192 if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) && 192 if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) &&
193 (dm_digtable->cursta_connectstate == DIG_STA_CONNECT)) { 193 (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) {
194 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) 194 if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0)
195 rssi_val_min = 195 rssi_val_min =
196 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > 196 (rtlpriv->dm.entry_min_undec_sm_pwdb >
197 rtlpriv->dm.undecorated_smoothed_pwdb) ? 197 rtlpriv->dm.undec_sm_pwdb) ?
198 rtlpriv->dm.undecorated_smoothed_pwdb : 198 rtlpriv->dm.undec_sm_pwdb :
199 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 199 rtlpriv->dm.entry_min_undec_sm_pwdb;
200 else 200 else
201 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 201 rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
202 } else if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT || 202 } else if (dm_digtable->cursta_cstate == DIG_STA_CONNECT ||
203 dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT) { 203 dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT) {
204 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 204 rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
205 } else if (dm_digtable->curmultista_connectstate == 205 } else if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
206 DIG_MULTISTA_CONNECT) { 206 rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
207 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
208 } 207 }
209 208
210 return (u8) rssi_val_min; 209 return (u8) rssi_val_min;
@@ -286,37 +285,33 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
286static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) 285static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
287{ 286{
288 struct rtl_priv *rtlpriv = rtl_priv(hw); 287 struct rtl_priv *rtlpriv = rtl_priv(hw);
289 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 288 struct dig_t *digtable = &rtlpriv->dm_digtable;
290 289
291 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable->fa_highthresh) { 290 if (rtlpriv->falsealm_cnt.cnt_all > digtable->fa_highthresh) {
292 if ((dm_digtable->backoff_val - 2) < 291 if ((digtable->back_val - 2) < digtable->back_range_min)
293 dm_digtable->backoff_val_range_min) 292 digtable->back_val = digtable->back_range_min;
294 dm_digtable->backoff_val =
295 dm_digtable->backoff_val_range_min;
296 else 293 else
297 dm_digtable->backoff_val -= 2; 294 digtable->back_val -= 2;
298 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable->fa_lowthresh) { 295 } else if (rtlpriv->falsealm_cnt.cnt_all < digtable->fa_lowthresh) {
299 if ((dm_digtable->backoff_val + 2) > 296 if ((digtable->back_val + 2) > digtable->back_range_max)
300 dm_digtable->backoff_val_range_max) 297 digtable->back_val = digtable->back_range_max;
301 dm_digtable->backoff_val =
302 dm_digtable->backoff_val_range_max;
303 else 298 else
304 dm_digtable->backoff_val += 2; 299 digtable->back_val += 2;
305 } 300 }
306 301
307 if ((dm_digtable->rssi_val_min + 10 - dm_digtable->backoff_val) > 302 if ((digtable->rssi_val_min + 10 - digtable->back_val) >
308 dm_digtable->rx_gain_range_max) 303 digtable->rx_gain_range_max)
309 dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_max; 304 digtable->cur_igvalue = digtable->rx_gain_range_max;
310 else if ((dm_digtable->rssi_val_min + 10 - 305 else if ((digtable->rssi_val_min + 10 -
311 dm_digtable->backoff_val) < dm_digtable->rx_gain_range_min) 306 digtable->back_val) < digtable->rx_gain_range_min)
312 dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_min; 307 digtable->cur_igvalue = digtable->rx_gain_range_min;
313 else 308 else
314 dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 - 309 digtable->cur_igvalue = digtable->rssi_val_min + 10 -
315 dm_digtable->backoff_val; 310 digtable->back_val;
316 311
317 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 312 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
318 "rssi_val_min = %x backoff_val %x\n", 313 "rssi_val_min = %x back_val %x\n",
319 dm_digtable->rssi_val_min, dm_digtable->backoff_val); 314 digtable->rssi_val_min, digtable->back_val);
320 315
321 rtl92c_dm_write_dig(hw); 316 rtl92c_dm_write_dig(hw);
322} 317}
@@ -327,14 +322,14 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
327 struct rtl_priv *rtlpriv = rtl_priv(hw); 322 struct rtl_priv *rtlpriv = rtl_priv(hw);
328 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 323 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
329 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 324 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
330 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 325 long rssi_strength = rtlpriv->dm.entry_min_undec_sm_pwdb;
331 bool multi_sta = false; 326 bool multi_sta = false;
332 327
333 if (mac->opmode == NL80211_IFTYPE_ADHOC) 328 if (mac->opmode == NL80211_IFTYPE_ADHOC)
334 multi_sta = true; 329 multi_sta = true;
335 330
336 if (!multi_sta || 331 if (!multi_sta ||
337 dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) { 332 dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
338 initialized = false; 333 initialized = false;
339 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 334 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
340 return; 335 return;
@@ -345,7 +340,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
345 rtl92c_dm_write_dig(hw); 340 rtl92c_dm_write_dig(hw);
346 } 341 }
347 342
348 if (dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) { 343 if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
349 if ((rssi_strength < dm_digtable->rssi_lowthresh) && 344 if ((rssi_strength < dm_digtable->rssi_lowthresh) &&
350 (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { 345 (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
351 346
@@ -367,8 +362,8 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
367 } 362 }
368 363
369 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 364 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
370 "curmultista_connectstate = %x dig_ext_port_stage %x\n", 365 "curmultista_cstate = %x dig_ext_port_stage %x\n",
371 dm_digtable->curmultista_connectstate, 366 dm_digtable->curmultista_cstate,
372 dm_digtable->dig_ext_port_stage); 367 dm_digtable->dig_ext_port_stage);
373} 368}
374 369
@@ -378,15 +373,14 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
378 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 373 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
379 374
380 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 375 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
381 "presta_connectstate = %x, cursta_connectstate = %x\n", 376 "presta_cstate = %x, cursta_cstate = %x\n",
382 dm_digtable->presta_connectstate, 377 dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
383 dm_digtable->cursta_connectstate);
384 378
385 if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectstate 379 if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
386 || dm_digtable->cursta_connectstate == DIG_STA_BEFORE_CONNECT 380 dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
387 || dm_digtable->cursta_connectstate == DIG_STA_CONNECT) { 381 dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
388 382
389 if (dm_digtable->cursta_connectstate != DIG_STA_DISCONNECT) { 383 if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
390 dm_digtable->rssi_val_min = 384 dm_digtable->rssi_val_min =
391 rtl92c_dm_initial_gain_min_pwdb(hw); 385 rtl92c_dm_initial_gain_min_pwdb(hw);
392 rtl92c_dm_ctrl_initgain_by_rssi(hw); 386 rtl92c_dm_ctrl_initgain_by_rssi(hw);
@@ -394,7 +388,7 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
394 } else { 388 } else {
395 dm_digtable->rssi_val_min = 0; 389 dm_digtable->rssi_val_min = 0;
396 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 390 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
397 dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT; 391 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
398 dm_digtable->cur_igvalue = 0x20; 392 dm_digtable->cur_igvalue = 0x20;
399 dm_digtable->pre_igvalue = 0; 393 dm_digtable->pre_igvalue = 0;
400 rtl92c_dm_write_dig(hw); 394 rtl92c_dm_write_dig(hw);
@@ -407,7 +401,7 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
407 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 401 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
408 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 402 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
409 403
410 if (dm_digtable->cursta_connectstate == DIG_STA_CONNECT) { 404 if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
411 dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); 405 dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
412 406
413 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { 407 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
@@ -484,15 +478,15 @@ static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
484 return; 478 return;
485 479
486 if (mac->link_state >= MAC80211_LINKED) 480 if (mac->link_state >= MAC80211_LINKED)
487 dm_digtable->cursta_connectstate = DIG_STA_CONNECT; 481 dm_digtable->cursta_cstate = DIG_STA_CONNECT;
488 else 482 else
489 dm_digtable->cursta_connectstate = DIG_STA_DISCONNECT; 483 dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
490 484
491 rtl92c_dm_initial_gain_sta(hw); 485 rtl92c_dm_initial_gain_sta(hw);
492 rtl92c_dm_initial_gain_multi_sta(hw); 486 rtl92c_dm_initial_gain_multi_sta(hw);
493 rtl92c_dm_cck_packet_detection_thresh(hw); 487 rtl92c_dm_cck_packet_detection_thresh(hw);
494 488
495 dm_digtable->presta_connectstate = dm_digtable->cursta_connectstate; 489 dm_digtable->presta_cstate = dm_digtable->cursta_cstate;
496 490
497} 491}
498 492
@@ -526,9 +520,9 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
526 struct dig_t *dm_digtable = &rtlpriv->dm_digtable; 520 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
527 521
528 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 522 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
529 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", 523 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
530 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue, 524 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
531 dm_digtable->backoff_val); 525 dm_digtable->back_val);
532 526
533 dm_digtable->cur_igvalue += 2; 527 dm_digtable->cur_igvalue += 2;
534 if (dm_digtable->cur_igvalue > 0x3f) 528 if (dm_digtable->cur_igvalue > 0x3f)
@@ -555,20 +549,18 @@ static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
555 return; 549 return;
556 550
557 if (tmpentry_max_pwdb != 0) { 551 if (tmpentry_max_pwdb != 0) {
558 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 552 rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
559 tmpentry_max_pwdb;
560 } else { 553 } else {
561 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0; 554 rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
562 } 555 }
563 556
564 if (tmpentry_min_pwdb != 0xff) { 557 if (tmpentry_min_pwdb != 0xff) {
565 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 558 rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
566 tmpentry_min_pwdb;
567 } else { 559 } else {
568 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0; 560 rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
569 } 561 }
570 562
571 h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF); 563 h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
572 h2c_parameter[0] = 0; 564 h2c_parameter[0] = 0;
573 565
574 rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter); 566 rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
@@ -1160,7 +1152,7 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1160 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1152 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1161 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1153 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1162 struct rate_adaptive *p_ra = &(rtlpriv->ra); 1154 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1163 u32 low_rssithresh_for_ra, high_rssithresh_for_ra; 1155 u32 low_rssi_thresh, high_rssi_thresh;
1164 struct ieee80211_sta *sta = NULL; 1156 struct ieee80211_sta *sta = NULL;
1165 1157
1166 if (is_hal_stop(rtlhal)) { 1158 if (is_hal_stop(rtlhal)) {
@@ -1179,35 +1171,33 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1179 mac->opmode == NL80211_IFTYPE_STATION) { 1171 mac->opmode == NL80211_IFTYPE_STATION) {
1180 switch (p_ra->pre_ratr_state) { 1172 switch (p_ra->pre_ratr_state) {
1181 case DM_RATR_STA_HIGH: 1173 case DM_RATR_STA_HIGH:
1182 high_rssithresh_for_ra = 50; 1174 high_rssi_thresh = 50;
1183 low_rssithresh_for_ra = 20; 1175 low_rssi_thresh = 20;
1184 break; 1176 break;
1185 case DM_RATR_STA_MIDDLE: 1177 case DM_RATR_STA_MIDDLE:
1186 high_rssithresh_for_ra = 55; 1178 high_rssi_thresh = 55;
1187 low_rssithresh_for_ra = 20; 1179 low_rssi_thresh = 20;
1188 break; 1180 break;
1189 case DM_RATR_STA_LOW: 1181 case DM_RATR_STA_LOW:
1190 high_rssithresh_for_ra = 50; 1182 high_rssi_thresh = 50;
1191 low_rssithresh_for_ra = 25; 1183 low_rssi_thresh = 25;
1192 break; 1184 break;
1193 default: 1185 default:
1194 high_rssithresh_for_ra = 50; 1186 high_rssi_thresh = 50;
1195 low_rssithresh_for_ra = 20; 1187 low_rssi_thresh = 20;
1196 break; 1188 break;
1197 } 1189 }
1198 1190
1199 if (rtlpriv->dm.undecorated_smoothed_pwdb > 1191 if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssi_thresh)
1200 (long)high_rssithresh_for_ra)
1201 p_ra->ratr_state = DM_RATR_STA_HIGH; 1192 p_ra->ratr_state = DM_RATR_STA_HIGH;
1202 else if (rtlpriv->dm.undecorated_smoothed_pwdb > 1193 else if (rtlpriv->dm.undec_sm_pwdb > (long)low_rssi_thresh)
1203 (long)low_rssithresh_for_ra)
1204 p_ra->ratr_state = DM_RATR_STA_MIDDLE; 1194 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
1205 else 1195 else
1206 p_ra->ratr_state = DM_RATR_STA_LOW; 1196 p_ra->ratr_state = DM_RATR_STA_LOW;
1207 1197
1208 if (p_ra->pre_ratr_state != p_ra->ratr_state) { 1198 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
1209 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n", 1199 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, "RSSI = %ld\n",
1210 rtlpriv->dm.undecorated_smoothed_pwdb); 1200 rtlpriv->dm.undec_sm_pwdb);
1211 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, 1201 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1212 "RSSI_LEVEL = %d\n", p_ra->ratr_state); 1202 "RSSI_LEVEL = %d\n", p_ra->ratr_state);
1213 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, 1203 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
@@ -1315,7 +1305,7 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1315 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1305 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1316 1306
1317 if (((mac->link_state == MAC80211_NOLINK)) && 1307 if (((mac->link_state == MAC80211_NOLINK)) &&
1318 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { 1308 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
1319 dm_pstable->rssi_val_min = 0; 1309 dm_pstable->rssi_val_min = 0;
1320 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n"); 1310 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
1321 } 1311 }
@@ -1323,20 +1313,19 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1323 if (mac->link_state == MAC80211_LINKED) { 1313 if (mac->link_state == MAC80211_LINKED) {
1324 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 1314 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1325 dm_pstable->rssi_val_min = 1315 dm_pstable->rssi_val_min =
1326 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 1316 rtlpriv->dm.entry_min_undec_sm_pwdb;
1327 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1317 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1328 "AP Client PWDB = 0x%lx\n", 1318 "AP Client PWDB = 0x%lx\n",
1329 dm_pstable->rssi_val_min); 1319 dm_pstable->rssi_val_min);
1330 } else { 1320 } else {
1331 dm_pstable->rssi_val_min = 1321 dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
1332 rtlpriv->dm.undecorated_smoothed_pwdb;
1333 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1322 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1334 "STA Default Port PWDB = 0x%lx\n", 1323 "STA Default Port PWDB = 0x%lx\n",
1335 dm_pstable->rssi_val_min); 1324 dm_pstable->rssi_val_min);
1336 } 1325 }
1337 } else { 1326 } else {
1338 dm_pstable->rssi_val_min = 1327 dm_pstable->rssi_val_min =
1339 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 1328 rtlpriv->dm.entry_min_undec_sm_pwdb;
1340 1329
1341 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1330 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1342 "AP Ext Port PWDB = 0x%lx\n", 1331 "AP Ext Port PWDB = 0x%lx\n",
@@ -1368,7 +1357,7 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
1368 struct rtl_priv *rtlpriv = rtl_priv(hw); 1357 struct rtl_priv *rtlpriv = rtl_priv(hw);
1369 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1358 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1370 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1359 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1371 long undecorated_smoothed_pwdb; 1360 long undec_sm_pwdb;
1372 1361
1373 if (!rtlpriv->dm.dynamic_txpower_enable) 1362 if (!rtlpriv->dm.dynamic_txpower_enable)
1374 return; 1363 return;
@@ -1379,7 +1368,7 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
1379 } 1368 }
1380 1369
1381 if ((mac->link_state < MAC80211_LINKED) && 1370 if ((mac->link_state < MAC80211_LINKED) &&
1382 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { 1371 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
1383 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, 1372 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
1384 "Not connected to any\n"); 1373 "Not connected to any\n");
1385 1374
@@ -1391,41 +1380,35 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
1391 1380
1392 if (mac->link_state >= MAC80211_LINKED) { 1381 if (mac->link_state >= MAC80211_LINKED) {
1393 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 1382 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1394 undecorated_smoothed_pwdb = 1383 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
1395 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1396 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 1384 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1397 "AP Client PWDB = 0x%lx\n", 1385 "AP Client PWDB = 0x%lx\n",
1398 undecorated_smoothed_pwdb); 1386 undec_sm_pwdb);
1399 } else { 1387 } else {
1400 undecorated_smoothed_pwdb = 1388 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
1401 rtlpriv->dm.undecorated_smoothed_pwdb;
1402 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 1389 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1403 "STA Default Port PWDB = 0x%lx\n", 1390 "STA Default Port PWDB = 0x%lx\n",
1404 undecorated_smoothed_pwdb); 1391 undec_sm_pwdb);
1405 } 1392 }
1406 } else { 1393 } else {
1407 undecorated_smoothed_pwdb = 1394 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
1408 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1409 1395
1410 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 1396 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1411 "AP Ext Port PWDB = 0x%lx\n", 1397 "AP Ext Port PWDB = 0x%lx\n",
1412 undecorated_smoothed_pwdb); 1398 undec_sm_pwdb);
1413 } 1399 }
1414 1400
1415 if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { 1401 if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
1416 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; 1402 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
1417 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 1403 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1418 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); 1404 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
1419 } else if ((undecorated_smoothed_pwdb < 1405 } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
1420 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && 1406 (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
1421 (undecorated_smoothed_pwdb >=
1422 TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
1423 1407
1424 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; 1408 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
1425 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 1409 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1426 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); 1410 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
1427 } else if (undecorated_smoothed_pwdb < 1411 } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
1428 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
1429 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; 1412 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
1430 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 1413 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
1431 "TXHIGHPWRLEVEL_NORMAL\n"); 1414 "TXHIGHPWRLEVEL_NORMAL\n");
@@ -1473,48 +1456,46 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
1473{ 1456{
1474 struct rtl_priv *rtlpriv = rtl_priv(hw); 1457 struct rtl_priv *rtlpriv = rtl_priv(hw);
1475 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); 1458 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1476 long undecorated_smoothed_pwdb; 1459 long undec_sm_pwdb;
1477 u8 curr_bt_rssi_state = 0x00; 1460 u8 curr_bt_rssi_state = 0x00;
1478 1461
1479 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { 1462 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1480 undecorated_smoothed_pwdb = 1463 undec_sm_pwdb = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
1481 GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
1482 } else { 1464 } else {
1483 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0) 1465 if (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)
1484 undecorated_smoothed_pwdb = 100; 1466 undec_sm_pwdb = 100;
1485 else 1467 else
1486 undecorated_smoothed_pwdb = 1468 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
1487 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1488 } 1469 }
1489 1470
1490 /* Check RSSI to determine HighPower/NormalPower state for 1471 /* Check RSSI to determine HighPower/NormalPower state for
1491 * BT coexistence. */ 1472 * BT coexistence. */
1492 if (undecorated_smoothed_pwdb >= 67) 1473 if (undec_sm_pwdb >= 67)
1493 curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER); 1474 curr_bt_rssi_state &= (~BT_RSSI_STATE_NORMAL_POWER);
1494 else if (undecorated_smoothed_pwdb < 62) 1475 else if (undec_sm_pwdb < 62)
1495 curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER; 1476 curr_bt_rssi_state |= BT_RSSI_STATE_NORMAL_POWER;
1496 1477
1497 /* Check RSSI to determine AMPDU setting for BT coexistence. */ 1478 /* Check RSSI to determine AMPDU setting for BT coexistence. */
1498 if (undecorated_smoothed_pwdb >= 40) 1479 if (undec_sm_pwdb >= 40)
1499 curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF); 1480 curr_bt_rssi_state &= (~BT_RSSI_STATE_AMDPU_OFF);
1500 else if (undecorated_smoothed_pwdb <= 32) 1481 else if (undec_sm_pwdb <= 32)
1501 curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF; 1482 curr_bt_rssi_state |= BT_RSSI_STATE_AMDPU_OFF;
1502 1483
1503 /* Marked RSSI state. It will be used to determine BT coexistence 1484 /* Marked RSSI state. It will be used to determine BT coexistence
1504 * setting later. */ 1485 * setting later. */
1505 if (undecorated_smoothed_pwdb < 35) 1486 if (undec_sm_pwdb < 35)
1506 curr_bt_rssi_state |= BT_RSSI_STATE_SPECIAL_LOW; 1487 curr_bt_rssi_state |= BT_RSSI_STATE_SPECIAL_LOW;
1507 else 1488 else
1508 curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW); 1489 curr_bt_rssi_state &= (~BT_RSSI_STATE_SPECIAL_LOW);
1509 1490
1510 /* Set Tx Power according to BT status. */ 1491 /* Set Tx Power according to BT status. */
1511 if (undecorated_smoothed_pwdb >= 30) 1492 if (undec_sm_pwdb >= 30)
1512 curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW; 1493 curr_bt_rssi_state |= BT_RSSI_STATE_TXPOWER_LOW;
1513 else if (undecorated_smoothed_pwdb < 25) 1494 else if (undec_sm_pwdb < 25)
1514 curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW); 1495 curr_bt_rssi_state &= (~BT_RSSI_STATE_TXPOWER_LOW);
1515 1496
1516 /* Check BT state related to BT_Idle in B/G mode. */ 1497 /* Check BT state related to BT_Idle in B/G mode. */
1517 if (undecorated_smoothed_pwdb < 15) 1498 if (undec_sm_pwdb < 15)
1518 curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW; 1499 curr_bt_rssi_state |= BT_RSSI_STATE_BG_EDCA_LOW;
1519 else 1500 else
1520 curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW); 1501 curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index cdcad7d9f15e..1d5d3604e3e0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -34,9 +34,6 @@
34#include "dm_common.h" 34#include "dm_common.h"
35#include "phy_common.h" 35#include "phy_common.h"
36 36
37/* Define macro to shorten lines */
38#define MCS_TXPWR mcs_txpwrlevel_origoffset
39
40u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) 37u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
41{ 38{
42 struct rtl_priv *rtlpriv = rtl_priv(hw); 39 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -138,13 +135,13 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
138 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, 135 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
139 BIT(8)); 136 BIT(8));
140 if (rfpi_enable) 137 if (rfpi_enable)
141 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi, 138 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
142 BLSSIREADBACKDATA); 139 BLSSIREADBACKDATA);
143 else 140 else
144 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback, 141 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
145 BLSSIREADBACKDATA); 142 BLSSIREADBACKDATA);
146 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n", 143 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
147 rfpath, pphyreg->rflssi_readback, retvalue); 144 rfpath, pphyreg->rf_rb, retvalue);
148 return retvalue; 145 return retvalue;
149} 146}
150EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read); 147EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
@@ -290,11 +287,11 @@ void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
290 else 287 else
291 return; 288 return;
292 289
293 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][index] = data; 290 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
294 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 291 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
295 "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n", 292 "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
296 rtlphy->pwrgroup_cnt, index, 293 rtlphy->pwrgroup_cnt, index,
297 rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][index]); 294 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
298 295
299 if (index == 13) 296 if (index == 13)
300 rtlphy->pwrgroup_cnt++; 297 rtlphy->pwrgroup_cnt++;
@@ -374,14 +371,10 @@ void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
374 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; 371 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
375 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; 372 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
376 373
377 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control = 374 rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
378 RFPGA0_XAB_SWITCHCONTROL; 375 rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
379 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control = 376 rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
380 RFPGA0_XAB_SWITCHCONTROL; 377 rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
381 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
382 RFPGA0_XCD_SWITCHCONTROL;
383 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
384 RFPGA0_XCD_SWITCHCONTROL;
385 378
386 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; 379 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
387 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1; 380 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
@@ -393,47 +386,33 @@ void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
393 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; 386 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
394 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; 387 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
395 388
396 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance = 389 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
397 ROFDM0_XARXIQIMBALANCE; 390 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
398 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance = 391 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
399 ROFDM0_XBRXIQIMBALANCE; 392 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
400 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
401 ROFDM0_XCRXIQIMBANLANCE;
402 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
403 ROFDM0_XDRXIQIMBALANCE;
404 393
405 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; 394 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
406 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE; 395 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
407 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; 396 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
408 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; 397 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
409 398
410 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance = 399 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
411 ROFDM0_XATXIQIMBALANCE; 400 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
412 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance = 401 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
413 ROFDM0_XBTXIQIMBALANCE; 402 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
414 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
415 ROFDM0_XCTXIQIMBALANCE;
416 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
417 ROFDM0_XDTXIQIMBALANCE;
418 403
419 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; 404 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
420 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; 405 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
421 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE; 406 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
422 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; 407 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
423 408
424 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = 409 rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
425 RFPGA0_XA_LSSIREADBACK; 410 rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
426 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = 411 rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
427 RFPGA0_XB_LSSIREADBACK; 412 rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
428 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
429 RFPGA0_XC_LSSIREADBACK;
430 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
431 RFPGA0_XD_LSSIREADBACK;
432 413
433 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = 414 rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
434 TRANSCEIVEA_HSPI_READBACK; 415 rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
435 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
436 TRANSCEIVEB_HSPI_READBACK;
437 416
438} 417}
439EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition); 418EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition);
@@ -724,6 +703,26 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
724} 703}
725EXPORT_SYMBOL(rtl92c_phy_sw_chnl); 704EXPORT_SYMBOL(rtl92c_phy_sw_chnl);
726 705
706static void _rtl92c_phy_sw_rf_setting(struct ieee80211_hw *hw, u8 channel)
707{
708 struct rtl_priv *rtlpriv = rtl_priv(hw);
709 struct rtl_phy *rtlphy = &(rtlpriv->phy);
710 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
711
712 if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
713 if (channel == 6 && rtlphy->current_chan_bw ==
714 HT_CHANNEL_WIDTH_20)
715 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
716 0x00255);
717 else{
718 u32 backupRF0x1A = (u32)rtl_get_rfreg(hw, RF90_PATH_A,
719 RF_RX_G1, RFREG_OFFSET_MASK);
720 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
721 backupRF0x1A);
722 }
723 }
724}
725
727static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, 726static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
728 u32 cmdtableidx, u32 cmdtablesz, 727 u32 cmdtableidx, u32 cmdtablesz,
729 enum swchnlcmd_id cmdid, 728 enum swchnlcmd_id cmdid,
@@ -837,6 +836,7 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
837 currentcmd->para1, 836 currentcmd->para1,
838 RFREG_OFFSET_MASK, 837 RFREG_OFFSET_MASK,
839 rtlphy->rfreg_chnlval[rfpath]); 838 rtlphy->rfreg_chnlval[rfpath]);
839 _rtl92c_phy_sw_rf_setting(hw, channel);
840 } 840 }
841 break; 841 break;
842 default: 842 default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 2925094b2d91..3cfa1bb0f476 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -116,6 +116,9 @@
116 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) 116 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
117 117
118#define CHIP_VER_B BIT(4) 118#define CHIP_VER_B BIT(4)
119#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
120#define CHIP_BONDING_92C_1T2R 0x1
121#define RF_TYPE_1T2R BIT(1)
119#define CHIP_92C_BITMASK BIT(0) 122#define CHIP_92C_BITMASK BIT(0)
120#define CHIP_UNKNOWN BIT(7) 123#define CHIP_UNKNOWN BIT(7)
121#define CHIP_92C_1T2R 0x03 124#define CHIP_92C_1T2R 0x03
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
index 27b3af880d96..74f9c083b80d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -41,7 +41,7 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
41 struct rtl_priv *rtlpriv = rtl_priv(hw); 41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 struct rtl_phy *rtlphy = &(rtlpriv->phy); 42 struct rtl_phy *rtlphy = &(rtlpriv->phy);
43 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 43 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
44 long undecorated_smoothed_pwdb; 44 long undec_sm_pwdb;
45 45
46 if (!rtlpriv->dm.dynamic_txpower_enable) 46 if (!rtlpriv->dm.dynamic_txpower_enable)
47 return; 47 return;
@@ -52,7 +52,7 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
52 } 52 }
53 53
54 if ((mac->link_state < MAC80211_LINKED) && 54 if ((mac->link_state < MAC80211_LINKED) &&
55 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { 55 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
56 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, 56 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
57 "Not connected to any\n"); 57 "Not connected to any\n");
58 58
@@ -64,41 +64,35 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
64 64
65 if (mac->link_state >= MAC80211_LINKED) { 65 if (mac->link_state >= MAC80211_LINKED) {
66 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 66 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
67 undecorated_smoothed_pwdb = 67 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
68 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
69 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 68 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
70 "AP Client PWDB = 0x%lx\n", 69 "AP Client PWDB = 0x%lx\n",
71 undecorated_smoothed_pwdb); 70 undec_sm_pwdb);
72 } else { 71 } else {
73 undecorated_smoothed_pwdb = 72 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
74 rtlpriv->dm.undecorated_smoothed_pwdb;
75 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 73 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
76 "STA Default Port PWDB = 0x%lx\n", 74 "STA Default Port PWDB = 0x%lx\n",
77 undecorated_smoothed_pwdb); 75 undec_sm_pwdb);
78 } 76 }
79 } else { 77 } else {
80 undecorated_smoothed_pwdb = 78 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
81 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
82 79
83 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 80 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
84 "AP Ext Port PWDB = 0x%lx\n", 81 "AP Ext Port PWDB = 0x%lx\n",
85 undecorated_smoothed_pwdb); 82 undec_sm_pwdb);
86 } 83 }
87 84
88 if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { 85 if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
89 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; 86 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
90 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 87 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
91 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); 88 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
92 } else if ((undecorated_smoothed_pwdb < 89 } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
93 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && 90 (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
94 (undecorated_smoothed_pwdb >=
95 TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
96 91
97 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; 92 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
98 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 93 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
99 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); 94 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
100 } else if (undecorated_smoothed_pwdb < 95 } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
101 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
102 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; 96 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
103 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 97 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
104 "TXHIGHPWRLEVEL_NORMAL\n"); 98 "TXHIGHPWRLEVEL_NORMAL\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 86d73b32d995..d1f34f6ffbdf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -896,7 +896,6 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
896 struct rtl_phy *rtlphy = &(rtlpriv->phy); 896 struct rtl_phy *rtlphy = &(rtlpriv->phy);
897 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 897 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
898 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 898 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
899 static bool iqk_initialized; /* initialized to false */
900 bool rtstatus = true; 899 bool rtstatus = true;
901 bool is92c; 900 bool is92c;
902 int err; 901 int err;
@@ -921,9 +920,28 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
921 920
922 rtlhal->last_hmeboxnum = 0; 921 rtlhal->last_hmeboxnum = 0;
923 rtl92c_phy_mac_config(hw); 922 rtl92c_phy_mac_config(hw);
923 /* because last function modify RCR, so we update
924 * rcr var here, or TP will unstable for receive_config
925 * is wrong, RX RCR_ACRC32 will cause TP unstabel & Rx
926 * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252*/
927 rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
928 rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
929 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
924 rtl92c_phy_bb_config(hw); 930 rtl92c_phy_bb_config(hw);
925 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; 931 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
926 rtl92c_phy_rf_config(hw); 932 rtl92c_phy_rf_config(hw);
933 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
934 !IS_92C_SERIAL(rtlhal->version)) {
935 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
936 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
937 } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
938 rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE);
939 rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31);
940 rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425);
941 rtl_set_rfreg(hw, RF90_PATH_A, RF_SYN_G2, MASKDWORD, 0x4F200);
942 rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK1, MASKDWORD, 0x44053);
943 rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK2, MASKDWORD, 0x80201);
944 }
927 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, 945 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
928 RF_CHNLBW, RFREG_OFFSET_MASK); 946 RF_CHNLBW, RFREG_OFFSET_MASK);
929 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1, 947 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
@@ -945,11 +963,11 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
945 963
946 if (ppsc->rfpwr_state == ERFON) { 964 if (ppsc->rfpwr_state == ERFON) {
947 rtl92c_phy_set_rfpath_switch(hw, 1); 965 rtl92c_phy_set_rfpath_switch(hw, 1);
948 if (iqk_initialized) { 966 if (rtlphy->iqk_initialized) {
949 rtl92c_phy_iq_calibrate(hw, true); 967 rtl92c_phy_iq_calibrate(hw, true);
950 } else { 968 } else {
951 rtl92c_phy_iq_calibrate(hw, false); 969 rtl92c_phy_iq_calibrate(hw, false);
952 iqk_initialized = true; 970 rtlphy->iqk_initialized = true;
953 } 971 }
954 972
955 rtl92c_dm_check_txpower_tracking(hw); 973 rtl92c_dm_check_txpower_tracking(hw);
@@ -1004,6 +1022,13 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
1004 ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) | 1022 ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) |
1005 CHIP_VENDOR_UMC)); 1023 CHIP_VENDOR_UMC));
1006 } 1024 }
1025 if (IS_92C_SERIAL(version)) {
1026 value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
1027 version = (enum version_8192c)(version |
1028 ((CHIP_BONDING_IDENTIFIER(value32)
1029 == CHIP_BONDING_92C_1T2R) ?
1030 RF_TYPE_1T2R : 0));
1031 }
1007 } 1032 }
1008 1033
1009 switch (version) { 1034 switch (version) {
@@ -1019,12 +1044,30 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
1019 case VERSION_A_CHIP_88C: 1044 case VERSION_A_CHIP_88C:
1020 versionid = "A_CHIP_88C"; 1045 versionid = "A_CHIP_88C";
1021 break; 1046 break;
1047 case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
1048 versionid = "A_CUT_92C_1T2R";
1049 break;
1050 case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
1051 versionid = "A_CUT_92C";
1052 break;
1053 case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
1054 versionid = "A_CUT_88C";
1055 break;
1056 case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
1057 versionid = "B_CUT_92C_1T2R";
1058 break;
1059 case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
1060 versionid = "B_CUT_92C";
1061 break;
1062 case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
1063 versionid = "B_CUT_88C";
1064 break;
1022 default: 1065 default:
1023 versionid = "Unknown. Bug?"; 1066 versionid = "Unknown. Bug?";
1024 break; 1067 break;
1025 } 1068 }
1026 1069
1027 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 1070 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
1028 "Chip Version ID: %s\n", versionid); 1071 "Chip Version ID: %s\n", versionid);
1029 1072
1030 switch (version & 0x3) { 1073 switch (version & 0x3) {
@@ -1197,6 +1240,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
1197{ 1240{
1198 struct rtl_priv *rtlpriv = rtl_priv(hw); 1241 struct rtl_priv *rtlpriv = rtl_priv(hw);
1199 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); 1242 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1243 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1200 u8 u1b_tmp; 1244 u8 u1b_tmp;
1201 u32 u4b_tmp; 1245 u32 u4b_tmp;
1202 1246
@@ -1225,7 +1269,8 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
1225 rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790); 1269 rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790);
1226 rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080); 1270 rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
1227 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80); 1271 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
1228 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23); 1272 if (!IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
1273 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
1229 if (rtlpcipriv->bt_coexist.bt_coexistence) { 1274 if (rtlpcipriv->bt_coexist.bt_coexistence) {
1230 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL); 1275 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
1231 u4b_tmp |= 0x03824800; 1276 u4b_tmp |= 0x03824800;
@@ -1254,6 +1299,9 @@ void rtl92ce_card_disable(struct ieee80211_hw *hw)
1254 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); 1299 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1255 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); 1300 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1256 _rtl92ce_poweroff_adapter(hw); 1301 _rtl92ce_poweroff_adapter(hw);
1302
1303 /* after power off we should do iqk again */
1304 rtlpriv->phy.iqk_initialized = false;
1257} 1305}
1258 1306
1259void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw, 1307void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
@@ -1355,9 +1403,9 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1355 tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i]; 1403 tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
1356 else 1404 else
1357 tempval = EEPROM_DEFAULT_HT40_2SDIFF; 1405 tempval = EEPROM_DEFAULT_HT40_2SDIFF;
1358 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] = 1406 rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] =
1359 (tempval & 0xf); 1407 (tempval & 0xf);
1360 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] = 1408 rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] =
1361 ((tempval & 0xf0) >> 4); 1409 ((tempval & 0xf0) >> 4);
1362 } 1410 }
1363 1411
@@ -1381,7 +1429,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1381 "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n", 1429 "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
1382 rf_path, i, 1430 rf_path, i,
1383 rtlefuse-> 1431 rtlefuse->
1384 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]); 1432 eprom_chnl_txpwr_ht40_2sdf[rf_path][i]);
1385 1433
1386 for (rf_path = 0; rf_path < 2; rf_path++) { 1434 for (rf_path = 0; rf_path < 2; rf_path++) {
1387 for (i = 0; i < 14; i++) { 1435 for (i = 0; i < 14; i++) {
@@ -1396,14 +1444,14 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1396 if ((rtlefuse-> 1444 if ((rtlefuse->
1397 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] - 1445 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
1398 rtlefuse-> 1446 rtlefuse->
1399 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index]) 1447 eprom_chnl_txpwr_ht40_2sdf[rf_path][index])
1400 > 0) { 1448 > 0) {
1401 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 1449 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
1402 rtlefuse-> 1450 rtlefuse->
1403 eeprom_chnlarea_txpwr_ht40_1s[rf_path] 1451 eeprom_chnlarea_txpwr_ht40_1s[rf_path]
1404 [index] - 1452 [index] -
1405 rtlefuse-> 1453 rtlefuse->
1406 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path] 1454 eprom_chnl_txpwr_ht40_2sdf[rf_path]
1407 [index]; 1455 [index];
1408 } else { 1456 } else {
1409 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0; 1457 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
@@ -1912,16 +1960,16 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
1912 ratr_bitmap &= 0x0f0ff0ff; 1960 ratr_bitmap &= 0x0f0ff0ff;
1913 break; 1961 break;
1914 } 1962 }
1963 sta_entry->ratr_index = ratr_index;
1964
1915 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1965 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1916 "ratr_bitmap :%x\n", ratr_bitmap); 1966 "ratr_bitmap :%x\n", ratr_bitmap);
1917 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | 1967 *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
1918 (ratr_index << 28); 1968 (ratr_index << 28);
1919 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; 1969 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
1920 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 1970 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1921 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n", 1971 "Rate_index:%x, ratr_val:%x, %5phC\n",
1922 ratr_index, ratr_bitmap, 1972 ratr_index, ratr_bitmap, rate_mask);
1923 rate_mask[0], rate_mask[1], rate_mask[2], rate_mask[3],
1924 rate_mask[4]);
1925 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); 1973 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
1926 1974
1927 if (macid != 0) 1975 if (macid != 0)
@@ -2176,7 +2224,7 @@ static void rtl8192ce_bt_var_init(struct ieee80211_hw *hw)
2176 2224
2177 if (rtlpcipriv->bt_coexist.reg_bt_iso == 2) 2225 if (rtlpcipriv->bt_coexist.reg_bt_iso == 2)
2178 rtlpcipriv->bt_coexist.bt_ant_isolation = 2226 rtlpcipriv->bt_coexist.bt_ant_isolation =
2179 rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation; 2227 rtlpcipriv->bt_coexist.eeprom_bt_ant_isol;
2180 else 2228 else
2181 rtlpcipriv->bt_coexist.bt_ant_isolation = 2229 rtlpcipriv->bt_coexist.bt_ant_isolation =
2182 rtlpcipriv->bt_coexist.reg_bt_iso; 2230 rtlpcipriv->bt_coexist.reg_bt_iso;
@@ -2207,23 +2255,22 @@ void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
2207 bool auto_load_fail, u8 *hwinfo) 2255 bool auto_load_fail, u8 *hwinfo)
2208{ 2256{
2209 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); 2257 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
2210 u8 value; 2258 u8 val;
2211 2259
2212 if (!auto_load_fail) { 2260 if (!auto_load_fail) {
2213 rtlpcipriv->bt_coexist.eeprom_bt_coexist = 2261 rtlpcipriv->bt_coexist.eeprom_bt_coexist =
2214 ((hwinfo[RF_OPTION1] & 0xe0) >> 5); 2262 ((hwinfo[RF_OPTION1] & 0xe0) >> 5);
2215 value = hwinfo[RF_OPTION4]; 2263 val = hwinfo[RF_OPTION4];
2216 rtlpcipriv->bt_coexist.eeprom_bt_type = ((value & 0xe) >> 1); 2264 rtlpcipriv->bt_coexist.eeprom_bt_type = ((val & 0xe) >> 1);
2217 rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (value & 0x1); 2265 rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (val & 0x1);
2218 rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation = 2266 rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
2219 ((value & 0x10) >> 4);
2220 rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = 2267 rtlpcipriv->bt_coexist.eeprom_bt_radio_shared =
2221 ((value & 0x20) >> 5); 2268 ((val & 0x20) >> 5);
2222 } else { 2269 } else {
2223 rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0; 2270 rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0;
2224 rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE; 2271 rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE;
2225 rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2; 2272 rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
2226 rtlpcipriv->bt_coexist.eeprom_bt_ant_isolation = 0; 2273 rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = 0;
2227 rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED; 2274 rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
2228 } 2275 }
2229 2276
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
index 88deae67cc14..73262ca3864b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -82,6 +82,8 @@ bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
82 82
83 if (is92c) 83 if (is92c)
84 rtl_write_byte(rtlpriv, 0x14, 0x71); 84 rtl_write_byte(rtlpriv, 0x14, 0x71);
85 else
86 rtl_write_byte(rtlpriv, 0x04CA, 0x0A);
85 return rtstatus; 87 return rtstatus;
86} 88}
87 89
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
index 54c7614958a8..a9c406f33d0a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -97,15 +97,12 @@ void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
97 } 97 }
98 98
99 if (rtlefuse->eeprom_regulatory == 0) { 99 if (rtlefuse->eeprom_regulatory == 0) {
100 tmpval = 100 tmpval = (rtlphy->mcs_offset[0][6]) +
101 (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + 101 (rtlphy->mcs_offset[0][7] << 8);
102 (rtlphy->mcs_txpwrlevel_origoffset[0][7] <<
103 8);
104 tx_agc[RF90_PATH_A] += tmpval; 102 tx_agc[RF90_PATH_A] += tmpval;
105 103
106 tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + 104 tmpval = (rtlphy->mcs_offset[0][14]) +
107 (rtlphy->mcs_txpwrlevel_origoffset[0][15] << 105 (rtlphy->mcs_offset[0][15] << 24);
108 24);
109 tx_agc[RF90_PATH_B] += tmpval; 106 tx_agc[RF90_PATH_B] += tmpval;
110 } 107 }
111 } 108 }
@@ -209,8 +206,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
209 case 0: 206 case 0:
210 chnlgroup = 0; 207 chnlgroup = 0;
211 208
212 writeVal = 209 writeVal = rtlphy->mcs_offset[chnlgroup][index +
213 rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index +
214 (rf ? 8 : 0)] 210 (rf ? 8 : 0)]
215 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); 211 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
216 212
@@ -240,8 +236,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
240 chnlgroup++; 236 chnlgroup++;
241 } 237 }
242 238
243 writeVal = 239 writeVal = rtlphy->mcs_offset[chnlgroup]
244 rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
245 [index + (rf ? 8 : 0)] + ((index < 2) ? 240 [index + (rf ? 8 : 0)] + ((index < 2) ?
246 powerBase0[rf] : 241 powerBase0[rf] :
247 powerBase1[rf]); 242 powerBase1[rf]);
@@ -276,8 +271,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
276 1]); 271 1]);
277 } 272 }
278 for (i = 0; i < 4; i++) { 273 for (i = 0; i < 4; i++) {
279 pwr_diff_limit[i] = 274 pwr_diff_limit[i] = (u8) ((rtlphy->mcs_offset
280 (u8) ((rtlphy->mcs_txpwrlevel_origoffset
281 [chnlgroup][index + 275 [chnlgroup][index +
282 (rf ? 8 : 0)] & (0x7f << (i * 8))) >> 276 (rf ? 8 : 0)] & (0x7f << (i * 8))) >>
283 (i * 8)); 277 (i * 8));
@@ -317,8 +311,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
317 break; 311 break;
318 default: 312 default:
319 chnlgroup = 0; 313 chnlgroup = 0;
320 writeVal = 314 writeVal = rtlphy->mcs_offset[chnlgroup]
321 rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
322 [index + (rf ? 8 : 0)] 315 [index + (rf ? 8 : 0)]
323 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); 316 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
324 317
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index ea2e1bd847c8..49f663bd93ff 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -162,12 +162,10 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
162 162
163 /* request fw */ 163 /* request fw */
164 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && 164 if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
165 !IS_92C_SERIAL(rtlhal->version)) { 165 !IS_92C_SERIAL(rtlhal->version))
166 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; 166 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin";
167 } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { 167 else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
168 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; 168 rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin";
169 pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n");
170 }
171 169
172 rtlpriv->max_fw_size = 0x4000; 170 rtlpriv->max_fw_size = 0x4000;
173 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); 171 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
@@ -374,14 +372,7 @@ MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
374MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); 372MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
375MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 373MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
376 374
377static const struct dev_pm_ops rtlwifi_pm_ops = { 375static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
378 .suspend = rtl_pci_suspend,
379 .resume = rtl_pci_resume,
380 .freeze = rtl_pci_suspend,
381 .thaw = rtl_pci_resume,
382 .poweroff = rtl_pci_suspend,
383 .restore = rtl_pci_resume,
384};
385 376
386static struct pci_driver rtl92ce_driver = { 377static struct pci_driver rtl92ce_driver = {
387 .name = KBUILD_MODNAME, 378 .name = KBUILD_MODNAME,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 390d6d4fcaa0..173424756149 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -127,11 +127,11 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
127{ 127{
128 struct rtl_priv *rtlpriv = rtl_priv(hw); 128 struct rtl_priv *rtlpriv = rtl_priv(hw);
129 struct phy_sts_cck_8192s_t *cck_buf; 129 struct phy_sts_cck_8192s_t *cck_buf;
130 struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
130 s8 rx_pwr_all = 0, rx_pwr[4]; 131 s8 rx_pwr_all = 0, rx_pwr[4];
131 u8 evm, pwdb_all, rf_rx_num = 0; 132 u8 evm, pwdb_all, rf_rx_num = 0;
132 u8 i, max_spatial_stream; 133 u8 i, max_spatial_stream;
133 u32 rssi, total_rssi = 0; 134 u32 rssi, total_rssi = 0;
134 bool in_powersavemode = false;
135 bool is_cck_rate; 135 bool is_cck_rate;
136 136
137 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); 137 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
@@ -140,14 +140,14 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
140 pstats->is_cck = is_cck_rate; 140 pstats->is_cck = is_cck_rate;
141 pstats->packet_beacon = packet_beacon; 141 pstats->packet_beacon = packet_beacon;
142 pstats->is_cck = is_cck_rate; 142 pstats->is_cck = is_cck_rate;
143 pstats->rx_mimo_signalquality[0] = -1; 143 pstats->rx_mimo_sig_qual[0] = -1;
144 pstats->rx_mimo_signalquality[1] = -1; 144 pstats->rx_mimo_sig_qual[1] = -1;
145 145
146 if (is_cck_rate) { 146 if (is_cck_rate) {
147 u8 report, cck_highpwr; 147 u8 report, cck_highpwr;
148 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; 148 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
149 149
150 if (!in_powersavemode) 150 if (ppsc->rfpwr_state == ERFON)
151 cck_highpwr = (u8) rtl_get_bbreg(hw, 151 cck_highpwr = (u8) rtl_get_bbreg(hw,
152 RFPGA0_XA_HSSIPARAMETER2, 152 RFPGA0_XA_HSSIPARAMETER2,
153 BIT(9)); 153 BIT(9));
@@ -211,8 +211,8 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
211 } 211 }
212 212
213 pstats->signalquality = sq; 213 pstats->signalquality = sq;
214 pstats->rx_mimo_signalquality[0] = sq; 214 pstats->rx_mimo_sig_qual[0] = sq;
215 pstats->rx_mimo_signalquality[1] = -1; 215 pstats->rx_mimo_sig_qual[1] = -1;
216 } 216 }
217 } else { 217 } else {
218 rtlpriv->dm.rfpath_rxenable[0] = 218 rtlpriv->dm.rfpath_rxenable[0] =
@@ -251,8 +251,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
251 if (i == 0) 251 if (i == 0)
252 pstats->signalquality = 252 pstats->signalquality =
253 (u8) (evm & 0xff); 253 (u8) (evm & 0xff);
254 pstats->rx_mimo_signalquality[i] = 254 pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
255 (u8) (evm & 0xff);
256 } 255 }
257 } 256 }
258 } 257 }
@@ -362,36 +361,31 @@ static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
362{ 361{
363 struct rtl_priv *rtlpriv = rtl_priv(hw); 362 struct rtl_priv *rtlpriv = rtl_priv(hw);
364 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 363 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
365 long undecorated_smoothed_pwdb; 364 long undec_sm_pwdb;
366 365
367 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 366 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
368 return; 367 return;
369 } else { 368 } else {
370 undecorated_smoothed_pwdb = 369 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
371 rtlpriv->dm.undecorated_smoothed_pwdb;
372 } 370 }
373 371
374 if (pstats->packet_toself || pstats->packet_beacon) { 372 if (pstats->packet_toself || pstats->packet_beacon) {
375 if (undecorated_smoothed_pwdb < 0) 373 if (undec_sm_pwdb < 0)
376 undecorated_smoothed_pwdb = pstats->rx_pwdb_all; 374 undec_sm_pwdb = pstats->rx_pwdb_all;
377 375
378 if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) { 376 if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
379 undecorated_smoothed_pwdb = 377 undec_sm_pwdb = (((undec_sm_pwdb) *
380 (((undecorated_smoothed_pwdb) *
381 (RX_SMOOTH_FACTOR - 1)) + 378 (RX_SMOOTH_FACTOR - 1)) +
382 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); 379 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
383 380
384 undecorated_smoothed_pwdb = undecorated_smoothed_pwdb 381 undec_sm_pwdb += 1;
385 + 1;
386 } else { 382 } else {
387 undecorated_smoothed_pwdb = 383 undec_sm_pwdb = (((undec_sm_pwdb) *
388 (((undecorated_smoothed_pwdb) *
389 (RX_SMOOTH_FACTOR - 1)) + 384 (RX_SMOOTH_FACTOR - 1)) +
390 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); 385 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
391 } 386 }
392 387
393 rtlpriv->dm.undecorated_smoothed_pwdb = 388 rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
394 undecorated_smoothed_pwdb;
395 _rtl92ce_update_rxsignalstatistics(hw, pstats); 389 _rtl92ce_update_rxsignalstatistics(hw, pstats);
396 } 390 }
397} 391}
@@ -438,15 +432,14 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
438 for (n_spatialstream = 0; n_spatialstream < 2; 432 for (n_spatialstream = 0; n_spatialstream < 2;
439 n_spatialstream++) { 433 n_spatialstream++) {
440 if (pstats-> 434 if (pstats->
441 rx_mimo_signalquality[n_spatialstream] != 435 rx_mimo_sig_qual[n_spatialstream] != -1) {
442 -1) {
443 if (rtlpriv->stats. 436 if (rtlpriv->stats.
444 rx_evm_percentage[n_spatialstream] 437 rx_evm_percentage[n_spatialstream]
445 == 0) { 438 == 0) {
446 rtlpriv->stats. 439 rtlpriv->stats.
447 rx_evm_percentage 440 rx_evm_percentage
448 [n_spatialstream] = 441 [n_spatialstream] =
449 pstats->rx_mimo_signalquality 442 pstats->rx_mimo_sig_qual
450 [n_spatialstream]; 443 [n_spatialstream];
451 } 444 }
452 445
@@ -456,8 +449,7 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
456 stats.rx_evm_percentage 449 stats.rx_evm_percentage
457 [n_spatialstream] * 450 [n_spatialstream] *
458 (RX_SMOOTH_FACTOR - 1)) + 451 (RX_SMOOTH_FACTOR - 1)) +
459 (pstats-> 452 (pstats->rx_mimo_sig_qual
460 rx_mimo_signalquality
461 [n_spatialstream] * 1)) / 453 [n_spatialstream] * 1)) /
462 (RX_SMOOTH_FACTOR); 454 (RX_SMOOTH_FACTOR);
463 } 455 }
@@ -567,7 +559,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
567 if (GET_RX_DESC_RXHT(pdesc)) 559 if (GET_RX_DESC_RXHT(pdesc))
568 rx_status->flag |= RX_FLAG_HT; 560 rx_status->flag |= RX_FLAG_HT;
569 561
570 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 562 rx_status->flag |= RX_FLAG_MACTIME_START;
571 563
572 if (stats->decrypted) 564 if (stats->decrypted)
573 rx_status->flag |= RX_FLAG_DECRYPTED; 565 rx_status->flag |= RX_FLAG_DECRYPTED;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
index 6fd39eaf361e..16a0b9e59acf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c
@@ -39,7 +39,7 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
39 struct rtl_priv *rtlpriv = rtl_priv(hw); 39 struct rtl_priv *rtlpriv = rtl_priv(hw);
40 struct rtl_phy *rtlphy = &(rtlpriv->phy); 40 struct rtl_phy *rtlphy = &(rtlpriv->phy);
41 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 41 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
42 long undecorated_smoothed_pwdb; 42 long undec_sm_pwdb;
43 43
44 if (!rtlpriv->dm.dynamic_txpower_enable) 44 if (!rtlpriv->dm.dynamic_txpower_enable)
45 return; 45 return;
@@ -50,7 +50,7 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
50 } 50 }
51 51
52 if ((mac->link_state < MAC80211_LINKED) && 52 if ((mac->link_state < MAC80211_LINKED) &&
53 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { 53 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
54 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, 54 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
55 "Not connected to any\n"); 55 "Not connected to any\n");
56 56
@@ -62,41 +62,35 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
62 62
63 if (mac->link_state >= MAC80211_LINKED) { 63 if (mac->link_state >= MAC80211_LINKED) {
64 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 64 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
65 undecorated_smoothed_pwdb = 65 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
66 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
67 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 66 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
68 "AP Client PWDB = 0x%lx\n", 67 "AP Client PWDB = 0x%lx\n",
69 undecorated_smoothed_pwdb); 68 undec_sm_pwdb);
70 } else { 69 } else {
71 undecorated_smoothed_pwdb = 70 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
72 rtlpriv->dm.undecorated_smoothed_pwdb;
73 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 71 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
74 "STA Default Port PWDB = 0x%lx\n", 72 "STA Default Port PWDB = 0x%lx\n",
75 undecorated_smoothed_pwdb); 73 undec_sm_pwdb);
76 } 74 }
77 } else { 75 } else {
78 undecorated_smoothed_pwdb = 76 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
79 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
80 77
81 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 78 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
82 "AP Ext Port PWDB = 0x%lx\n", 79 "AP Ext Port PWDB = 0x%lx\n",
83 undecorated_smoothed_pwdb); 80 undec_sm_pwdb);
84 } 81 }
85 82
86 if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { 83 if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
87 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; 84 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
88 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 85 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
89 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); 86 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
90 } else if ((undecorated_smoothed_pwdb < 87 } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
91 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && 88 (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
92 (undecorated_smoothed_pwdb >=
93 TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
94 89
95 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; 90 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
96 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 91 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
97 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); 92 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
98 } else if (undecorated_smoothed_pwdb < 93 } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
99 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
100 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; 94 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
101 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 95 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
102 "TXHIGHPWRLEVEL_NORMAL\n"); 96 "TXHIGHPWRLEVEL_NORMAL\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 4bbb711a36c5..b1ccff474c79 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -152,9 +152,9 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
152 tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i]; 152 tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
153 else 153 else
154 tempval = EEPROM_DEFAULT_HT40_2SDIFF; 154 tempval = EEPROM_DEFAULT_HT40_2SDIFF;
155 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] = 155 rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] =
156 (tempval & 0xf); 156 (tempval & 0xf);
157 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] = 157 rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] =
158 ((tempval & 0xf0) >> 4); 158 ((tempval & 0xf0) >> 4);
159 } 159 }
160 for (rf_path = 0; rf_path < 2; rf_path++) 160 for (rf_path = 0; rf_path < 2; rf_path++)
@@ -177,7 +177,7 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
177 "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n", 177 "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
178 rf_path, i, 178 rf_path, i,
179 rtlefuse-> 179 rtlefuse->
180 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i]); 180 eprom_chnl_txpwr_ht40_2sdf[rf_path][i]);
181 for (rf_path = 0; rf_path < 2; rf_path++) { 181 for (rf_path = 0; rf_path < 2; rf_path++) {
182 for (i = 0; i < 14; i++) { 182 for (i = 0; i < 14; i++) {
183 index = _rtl92c_get_chnl_group((u8) i); 183 index = _rtl92c_get_chnl_group((u8) i);
@@ -189,13 +189,13 @@ static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
189 if ((rtlefuse-> 189 if ((rtlefuse->
190 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] - 190 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
191 rtlefuse-> 191 rtlefuse->
192 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index]) 192 eprom_chnl_txpwr_ht40_2sdf[rf_path][index])
193 > 0) { 193 > 0) {
194 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 194 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
195 rtlefuse-> 195 rtlefuse->
196 eeprom_chnlarea_txpwr_ht40_1s[rf_path] 196 eeprom_chnlarea_txpwr_ht40_1s[rf_path]
197 [index] - rtlefuse-> 197 [index] - rtlefuse->
198 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path] 198 eprom_chnl_txpwr_ht40_2sdf[rf_path]
199 [index]; 199 [index];
200 } else { 200 } else {
201 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0; 201 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
@@ -2169,10 +2169,8 @@ void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
2169 ratr_index << 28); 2169 ratr_index << 28);
2170 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; 2170 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
2171 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, 2171 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
2172 "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n", 2172 "Rate_index:%x, ratr_val:%x, %5phC\n",
2173 ratr_index, ratr_bitmap, 2173 ratr_index, ratr_bitmap, rate_mask);
2174 rate_mask[0], rate_mask[1], rate_mask[2], rate_mask[3],
2175 rate_mask[4]);
2176 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); 2174 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
2177} 2175}
2178 2176
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 7e91c76582ec..32ff959a0251 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -46,7 +46,7 @@
46 46
47#define LINK_Q ui_link_quality 47#define LINK_Q ui_link_quality
48#define RX_EVM rx_evm_percentage 48#define RX_EVM rx_evm_percentage
49#define RX_SIGQ rx_mimo_signalquality 49#define RX_SIGQ rx_mimo_sig_qual
50 50
51 51
52void rtl92c_read_chip_version(struct ieee80211_hw *hw) 52void rtl92c_read_chip_version(struct ieee80211_hw *hw)
@@ -982,32 +982,27 @@ static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
982{ 982{
983 struct rtl_priv *rtlpriv = rtl_priv(hw); 983 struct rtl_priv *rtlpriv = rtl_priv(hw);
984 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 984 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
985 long undecorated_smoothed_pwdb = 0; 985 long undec_sm_pwdb = 0;
986 986
987 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 987 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
988 return; 988 return;
989 } else { 989 } else {
990 undecorated_smoothed_pwdb = 990 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
991 rtlpriv->dm.undecorated_smoothed_pwdb;
992 } 991 }
993 if (pstats->packet_toself || pstats->packet_beacon) { 992 if (pstats->packet_toself || pstats->packet_beacon) {
994 if (undecorated_smoothed_pwdb < 0) 993 if (undec_sm_pwdb < 0)
995 undecorated_smoothed_pwdb = pstats->rx_pwdb_all; 994 undec_sm_pwdb = pstats->rx_pwdb_all;
996 if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) { 995 if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
997 undecorated_smoothed_pwdb = 996 undec_sm_pwdb = (((undec_sm_pwdb) *
998 (((undecorated_smoothed_pwdb) *
999 (RX_SMOOTH_FACTOR - 1)) + 997 (RX_SMOOTH_FACTOR - 1)) +
1000 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); 998 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
1001 undecorated_smoothed_pwdb = undecorated_smoothed_pwdb 999 undec_sm_pwdb += 1;
1002 + 1;
1003 } else { 1000 } else {
1004 undecorated_smoothed_pwdb = 1001 undec_sm_pwdb = (((undec_sm_pwdb) *
1005 (((undecorated_smoothed_pwdb) *
1006 (RX_SMOOTH_FACTOR - 1)) + 1002 (RX_SMOOTH_FACTOR - 1)) +
1007 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); 1003 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
1008 } 1004 }
1009 rtlpriv->dm.undecorated_smoothed_pwdb = 1005 rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
1010 undecorated_smoothed_pwdb;
1011 _rtl92c_update_rxsignalstatistics(hw, pstats); 1006 _rtl92c_update_rxsignalstatistics(hw, pstats);
1012 } 1007 }
1013} 1008}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 506b9a078ed1..953f1a0f8532 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -115,15 +115,11 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
115 (ppowerlevel[idx1] << 24); 115 (ppowerlevel[idx1] << 24);
116 } 116 }
117 if (rtlefuse->eeprom_regulatory == 0) { 117 if (rtlefuse->eeprom_regulatory == 0) {
118 tmpval = (rtlphy->mcs_txpwrlevel_origoffset 118 tmpval = (rtlphy->mcs_offset[0][6]) +
119 [0][6]) + 119 (rtlphy->mcs_offset[0][7] << 8);
120 (rtlphy->mcs_txpwrlevel_origoffset
121 [0][7] << 8);
122 tx_agc[RF90_PATH_A] += tmpval; 120 tx_agc[RF90_PATH_A] += tmpval;
123 tmpval = (rtlphy->mcs_txpwrlevel_origoffset 121 tmpval = (rtlphy->mcs_offset[0][14]) +
124 [0][14]) + 122 (rtlphy->mcs_offset[0][15] << 24);
125 (rtlphy->mcs_txpwrlevel_origoffset
126 [0][15] << 24);
127 tx_agc[RF90_PATH_B] += tmpval; 123 tx_agc[RF90_PATH_B] += tmpval;
128 } 124 }
129 } 125 }
@@ -215,7 +211,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
215 switch (rtlefuse->eeprom_regulatory) { 211 switch (rtlefuse->eeprom_regulatory) {
216 case 0: 212 case 0:
217 chnlgroup = 0; 213 chnlgroup = 0;
218 writeVal = rtlphy->mcs_txpwrlevel_origoffset 214 writeVal = rtlphy->mcs_offset
219 [chnlgroup][index + (rf ? 8 : 0)] 215 [chnlgroup][index + (rf ? 8 : 0)]
220 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); 216 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
221 RTPRINT(rtlpriv, FPHY, PHY_TXPWR, 217 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
@@ -238,8 +234,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
238 else 234 else
239 chnlgroup += 4; 235 chnlgroup += 4;
240 } 236 }
241 writeVal = rtlphy->mcs_txpwrlevel_origoffset 237 writeVal = rtlphy->mcs_offset[chnlgroup][index +
242 [chnlgroup][index +
243 (rf ? 8 : 0)] + 238 (rf ? 8 : 0)] +
244 ((index < 2) ? powerBase0[rf] : 239 ((index < 2) ? powerBase0[rf] :
245 powerBase1[rf]); 240 powerBase1[rf]);
@@ -271,8 +266,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
271 [channel - 1]); 266 [channel - 1]);
272 } 267 }
273 for (i = 0; i < 4; i++) { 268 for (i = 0; i < 4; i++) {
274 pwr_diff_limit[i] = 269 pwr_diff_limit[i] = (u8) ((rtlphy->mcs_offset
275 (u8) ((rtlphy->mcs_txpwrlevel_origoffset
276 [chnlgroup][index + (rf ? 8 : 0)] 270 [chnlgroup][index + (rf ? 8 : 0)]
277 & (0x7f << (i * 8))) >> (i * 8)); 271 & (0x7f << (i * 8))) >> (i * 8));
278 if (rtlphy->current_chan_bw == 272 if (rtlphy->current_chan_bw ==
@@ -306,7 +300,7 @@ static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
306 break; 300 break;
307 default: 301 default:
308 chnlgroup = 0; 302 chnlgroup = 0;
309 writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] 303 writeVal = rtlphy->mcs_offset[chnlgroup]
310 [index + (rf ? 8 : 0)] + ((index < 2) ? 304 [index + (rf ? 8 : 0)] + ((index < 2) ?
311 powerBase0[rf] : powerBase1[rf]); 305 powerBase0[rf] : powerBase1[rf]);
312 RTPRINT(rtlpriv, FPHY, PHY_TXPWR, 306 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 6e66f04c363f..b6222eedb835 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -334,7 +334,7 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
334 rx_status->flag |= RX_FLAG_40MHZ; 334 rx_status->flag |= RX_FLAG_40MHZ;
335 if (GET_RX_DESC_RX_HT(pdesc)) 335 if (GET_RX_DESC_RX_HT(pdesc))
336 rx_status->flag |= RX_FLAG_HT; 336 rx_status->flag |= RX_FLAG_HT;
337 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 337 rx_status->flag |= RX_FLAG_MACTIME_START;
338 if (stats->decrypted) 338 if (stats->decrypted)
339 rx_status->flag |= RX_FLAG_DECRYPTED; 339 rx_status->flag |= RX_FLAG_DECRYPTED;
340 rx_status->rate_idx = rtlwifi_rate_mapping(hw, 340 rx_status->rate_idx = rtlwifi_rate_mapping(hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index ed868c396c25..fd8df233ff22 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -35,7 +35,7 @@
35#include "dm.h" 35#include "dm.h"
36#include "fw.h" 36#include "fw.h"
37 37
38#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb 38#define UNDEC_SM_PWDB entry_min_undec_sm_pwdb
39 39
40static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = { 40static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
41 0x7f8001fe, /* 0, +6.0dB */ 41 0x7f8001fe, /* 0, +6.0dB */
@@ -164,18 +164,18 @@ static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
165 de_digtable->cur_igvalue = 0x20; 165 de_digtable->cur_igvalue = 0x20;
166 de_digtable->pre_igvalue = 0x0; 166 de_digtable->pre_igvalue = 0x0;
167 de_digtable->cursta_connectstate = DIG_STA_DISCONNECT; 167 de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
168 de_digtable->presta_connectstate = DIG_STA_DISCONNECT; 168 de_digtable->presta_cstate = DIG_STA_DISCONNECT;
169 de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 169 de_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; 170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
171 de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; 171 de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
172 de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 172 de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
173 de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 173 de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
174 de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER; 174 de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER;
175 de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER; 175 de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER;
176 de_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT; 176 de_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
177 de_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX; 177 de_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
178 de_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN; 178 de_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
179 de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI; 179 de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
180 de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX; 180 de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
181 de_digtable->large_fa_hit = 0; 181 de_digtable->large_fa_hit = 0;
@@ -273,35 +273,34 @@ static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
273 /* Determine the minimum RSSI */ 273 /* Determine the minimum RSSI */
274 if ((mac->link_state < MAC80211_LINKED) && 274 if ((mac->link_state < MAC80211_LINKED) &&
275 (rtlpriv->dm.UNDEC_SM_PWDB == 0)) { 275 (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
276 de_digtable->min_undecorated_pwdb_for_dm = 0; 276 de_digtable->min_undec_pwdb_for_dm = 0;
277 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 277 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
278 "Not connected to any\n"); 278 "Not connected to any\n");
279 } 279 }
280 if (mac->link_state >= MAC80211_LINKED) { 280 if (mac->link_state >= MAC80211_LINKED) {
281 if (mac->opmode == NL80211_IFTYPE_AP || 281 if (mac->opmode == NL80211_IFTYPE_AP ||
282 mac->opmode == NL80211_IFTYPE_ADHOC) { 282 mac->opmode == NL80211_IFTYPE_ADHOC) {
283 de_digtable->min_undecorated_pwdb_for_dm = 283 de_digtable->min_undec_pwdb_for_dm =
284 rtlpriv->dm.UNDEC_SM_PWDB; 284 rtlpriv->dm.UNDEC_SM_PWDB;
285 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 285 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
286 "AP Client PWDB = 0x%lx\n", 286 "AP Client PWDB = 0x%lx\n",
287 rtlpriv->dm.UNDEC_SM_PWDB); 287 rtlpriv->dm.UNDEC_SM_PWDB);
288 } else { 288 } else {
289 de_digtable->min_undecorated_pwdb_for_dm = 289 de_digtable->min_undec_pwdb_for_dm =
290 rtlpriv->dm.undecorated_smoothed_pwdb; 290 rtlpriv->dm.undec_sm_pwdb;
291 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 291 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
292 "STA Default Port PWDB = 0x%x\n", 292 "STA Default Port PWDB = 0x%x\n",
293 de_digtable->min_undecorated_pwdb_for_dm); 293 de_digtable->min_undec_pwdb_for_dm);
294 } 294 }
295 } else { 295 } else {
296 de_digtable->min_undecorated_pwdb_for_dm = 296 de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB;
297 rtlpriv->dm.UNDEC_SM_PWDB;
298 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 297 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
299 "AP Ext Port or disconnect PWDB = 0x%x\n", 298 "AP Ext Port or disconnect PWDB = 0x%x\n",
300 de_digtable->min_undecorated_pwdb_for_dm); 299 de_digtable->min_undec_pwdb_for_dm);
301 } 300 }
302 301
303 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", 302 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
304 de_digtable->min_undecorated_pwdb_for_dm); 303 de_digtable->min_undec_pwdb_for_dm);
305} 304}
306 305
307static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) 306static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -310,16 +309,16 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
310 struct dig_t *de_digtable = &rtlpriv->dm_digtable; 309 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
311 unsigned long flag = 0; 310 unsigned long flag = 0;
312 311
313 if (de_digtable->cursta_connectstate == DIG_STA_CONNECT) { 312 if (de_digtable->cursta_cstate == DIG_STA_CONNECT) {
314 if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { 313 if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
315 if (de_digtable->min_undecorated_pwdb_for_dm <= 25) 314 if (de_digtable->min_undec_pwdb_for_dm <= 25)
316 de_digtable->cur_cck_pd_state = 315 de_digtable->cur_cck_pd_state =
317 CCK_PD_STAGE_LOWRSSI; 316 CCK_PD_STAGE_LOWRSSI;
318 else 317 else
319 de_digtable->cur_cck_pd_state = 318 de_digtable->cur_cck_pd_state =
320 CCK_PD_STAGE_HIGHRSSI; 319 CCK_PD_STAGE_HIGHRSSI;
321 } else { 320 } else {
322 if (de_digtable->min_undecorated_pwdb_for_dm <= 20) 321 if (de_digtable->min_undec_pwdb_for_dm <= 20)
323 de_digtable->cur_cck_pd_state = 322 de_digtable->cur_cck_pd_state =
324 CCK_PD_STAGE_LOWRSSI; 323 CCK_PD_STAGE_LOWRSSI;
325 else 324 else
@@ -342,7 +341,7 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
342 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state; 341 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
343 } 342 }
344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", 343 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
345 de_digtable->cursta_connectstate == DIG_STA_CONNECT ? 344 de_digtable->cursta_cstate == DIG_STA_CONNECT ?
346 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); 345 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", 346 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
348 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? 347 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
@@ -358,9 +357,9 @@ void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
358 struct dig_t *de_digtable = &rtlpriv->dm_digtable; 357 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
359 358
360 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 359 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
361 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", 360 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
362 de_digtable->cur_igvalue, de_digtable->pre_igvalue, 361 de_digtable->cur_igvalue, de_digtable->pre_igvalue,
363 de_digtable->backoff_val); 362 de_digtable->back_val);
364 if (de_digtable->dig_enable_flag == false) { 363 if (de_digtable->dig_enable_flag == false) {
365 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n"); 364 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
366 de_digtable->pre_igvalue = 0x17; 365 de_digtable->pre_igvalue = 0x17;
@@ -382,13 +381,13 @@ static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
382 if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) && 381 if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
383 (rtlpriv->mac80211.vendor == PEER_CISCO)) { 382 (rtlpriv->mac80211.vendor == PEER_CISCO)) {
384 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n"); 383 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
385 if (de_digtable->last_min_undecorated_pwdb_for_dm >= 50 384 if (de_digtable->last_min_undec_pwdb_for_dm >= 50
386 && de_digtable->min_undecorated_pwdb_for_dm < 50) { 385 && de_digtable->min_undec_pwdb_for_dm < 50) {
387 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00); 386 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
388 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 387 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
389 "Early Mode Off\n"); 388 "Early Mode Off\n");
390 } else if (de_digtable->last_min_undecorated_pwdb_for_dm <= 55 && 389 } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
391 de_digtable->min_undecorated_pwdb_for_dm > 55) { 390 de_digtable->min_undec_pwdb_for_dm > 55) {
392 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); 391 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
393 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 392 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
394 "Early Mode On\n"); 393 "Early Mode On\n");
@@ -409,8 +408,8 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
409 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n"); 408 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
410 if (rtlpriv->rtlhal.earlymode_enable) { 409 if (rtlpriv->rtlhal.earlymode_enable) {
411 rtl92d_early_mode_enabled(rtlpriv); 410 rtl92d_early_mode_enabled(rtlpriv);
412 de_digtable->last_min_undecorated_pwdb_for_dm = 411 de_digtable->last_min_undec_pwdb_for_dm =
413 de_digtable->min_undecorated_pwdb_for_dm; 412 de_digtable->min_undec_pwdb_for_dm;
414 } 413 }
415 if (!rtlpriv->dm.dm_initialgain_enable) 414 if (!rtlpriv->dm.dm_initialgain_enable)
416 return; 415 return;
@@ -428,9 +427,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
428 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); 427 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
429 /* Decide the current status and if modify initial gain or not */ 428 /* Decide the current status and if modify initial gain or not */
430 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) 429 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
431 de_digtable->cursta_connectstate = DIG_STA_CONNECT; 430 de_digtable->cursta_cstate = DIG_STA_CONNECT;
432 else 431 else
433 de_digtable->cursta_connectstate = DIG_STA_DISCONNECT; 432 de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
434 433
435 /* adjust initial gain according to false alarm counter */ 434 /* adjust initial gain according to false alarm counter */
436 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) 435 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
@@ -522,7 +521,7 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
522 struct rtl_phy *rtlphy = &(rtlpriv->phy); 521 struct rtl_phy *rtlphy = &(rtlpriv->phy);
523 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 522 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
524 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 523 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
525 long undecorated_smoothed_pwdb; 524 long undec_sm_pwdb;
526 525
527 if ((!rtlpriv->dm.dynamic_txpower_enable) 526 if ((!rtlpriv->dm.dynamic_txpower_enable)
528 || rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { 527 || rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
@@ -539,62 +538,62 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
539 } 538 }
540 if (mac->link_state >= MAC80211_LINKED) { 539 if (mac->link_state >= MAC80211_LINKED) {
541 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 540 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
542 undecorated_smoothed_pwdb = 541 undec_sm_pwdb =
543 rtlpriv->dm.UNDEC_SM_PWDB; 542 rtlpriv->dm.UNDEC_SM_PWDB;
544 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 543 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
545 "IBSS Client PWDB = 0x%lx\n", 544 "IBSS Client PWDB = 0x%lx\n",
546 undecorated_smoothed_pwdb); 545 undec_sm_pwdb);
547 } else { 546 } else {
548 undecorated_smoothed_pwdb = 547 undec_sm_pwdb =
549 rtlpriv->dm.undecorated_smoothed_pwdb; 548 rtlpriv->dm.undec_sm_pwdb;
550 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 549 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
551 "STA Default Port PWDB = 0x%lx\n", 550 "STA Default Port PWDB = 0x%lx\n",
552 undecorated_smoothed_pwdb); 551 undec_sm_pwdb);
553 } 552 }
554 } else { 553 } else {
555 undecorated_smoothed_pwdb = 554 undec_sm_pwdb =
556 rtlpriv->dm.UNDEC_SM_PWDB; 555 rtlpriv->dm.UNDEC_SM_PWDB;
557 556
558 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 557 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
559 "AP Ext Port PWDB = 0x%lx\n", 558 "AP Ext Port PWDB = 0x%lx\n",
560 undecorated_smoothed_pwdb); 559 undec_sm_pwdb);
561 } 560 }
562 if (rtlhal->current_bandtype == BAND_ON_5G) { 561 if (rtlhal->current_bandtype == BAND_ON_5G) {
563 if (undecorated_smoothed_pwdb >= 0x33) { 562 if (undec_sm_pwdb >= 0x33) {
564 rtlpriv->dm.dynamic_txhighpower_lvl = 563 rtlpriv->dm.dynamic_txhighpower_lvl =
565 TXHIGHPWRLEVEL_LEVEL2; 564 TXHIGHPWRLEVEL_LEVEL2;
566 RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD, 565 RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
567 "5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n"); 566 "5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
568 } else if ((undecorated_smoothed_pwdb < 0x33) 567 } else if ((undec_sm_pwdb < 0x33)
569 && (undecorated_smoothed_pwdb >= 0x2b)) { 568 && (undec_sm_pwdb >= 0x2b)) {
570 rtlpriv->dm.dynamic_txhighpower_lvl = 569 rtlpriv->dm.dynamic_txhighpower_lvl =
571 TXHIGHPWRLEVEL_LEVEL1; 570 TXHIGHPWRLEVEL_LEVEL1;
572 RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD, 571 RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
573 "5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n"); 572 "5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
574 } else if (undecorated_smoothed_pwdb < 0x2b) { 573 } else if (undec_sm_pwdb < 0x2b) {
575 rtlpriv->dm.dynamic_txhighpower_lvl = 574 rtlpriv->dm.dynamic_txhighpower_lvl =
576 TXHIGHPWRLEVEL_NORMAL; 575 TXHIGHPWRLEVEL_NORMAL;
577 RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD, 576 RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
578 "5G:TxHighPwrLevel_Normal\n"); 577 "5G:TxHighPwrLevel_Normal\n");
579 } 578 }
580 } else { 579 } else {
581 if (undecorated_smoothed_pwdb >= 580 if (undec_sm_pwdb >=
582 TX_POWER_NEAR_FIELD_THRESH_LVL2) { 581 TX_POWER_NEAR_FIELD_THRESH_LVL2) {
583 rtlpriv->dm.dynamic_txhighpower_lvl = 582 rtlpriv->dm.dynamic_txhighpower_lvl =
584 TXHIGHPWRLEVEL_LEVEL2; 583 TXHIGHPWRLEVEL_LEVEL2;
585 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 584 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
586 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"); 585 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
587 } else 586 } else
588 if ((undecorated_smoothed_pwdb < 587 if ((undec_sm_pwdb <
589 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) 588 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
590 && (undecorated_smoothed_pwdb >= 589 && (undec_sm_pwdb >=
591 TX_POWER_NEAR_FIELD_THRESH_LVL1)) { 590 TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
592 591
593 rtlpriv->dm.dynamic_txhighpower_lvl = 592 rtlpriv->dm.dynamic_txhighpower_lvl =
594 TXHIGHPWRLEVEL_LEVEL1; 593 TXHIGHPWRLEVEL_LEVEL1;
595 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 594 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
596 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"); 595 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
597 } else if (undecorated_smoothed_pwdb < 596 } else if (undec_sm_pwdb <
598 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { 597 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
599 rtlpriv->dm.dynamic_txhighpower_lvl = 598 rtlpriv->dm.dynamic_txhighpower_lvl =
600 TXHIGHPWRLEVEL_NORMAL; 599 TXHIGHPWRLEVEL_NORMAL;
@@ -620,7 +619,7 @@ static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
620 return; 619 return;
621 /* Indicate Rx signal strength to FW. */ 620 /* Indicate Rx signal strength to FW. */
622 if (rtlpriv->dm.useramask) { 621 if (rtlpriv->dm.useramask) {
623 u32 temp = rtlpriv->dm.undecorated_smoothed_pwdb; 622 u32 temp = rtlpriv->dm.undec_sm_pwdb;
624 623
625 temp <<= 16; 624 temp <<= 16;
626 temp |= 0x100; 625 temp |= 0x100;
@@ -629,7 +628,7 @@ static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
629 rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *) (&temp)); 628 rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *) (&temp));
630 } else { 629 } else {
631 rtl_write_byte(rtlpriv, 0x4fe, 630 rtl_write_byte(rtlpriv, 0x4fe,
632 (u8) rtlpriv->dm.undecorated_smoothed_pwdb); 631 (u8) rtlpriv->dm.undec_sm_pwdb);
633 } 632 }
634} 633}
635 634
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index db0086062d05..33041bd4da81 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -298,13 +298,13 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
298 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, 298 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
299 BIT(8)); 299 BIT(8));
300 if (rfpi_enable) 300 if (rfpi_enable)
301 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi, 301 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
302 BLSSIREADBACKDATA); 302 BLSSIREADBACKDATA);
303 else 303 else
304 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback, 304 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
305 BLSSIREADBACKDATA); 305 BLSSIREADBACKDATA);
306 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n", 306 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
307 rfpath, pphyreg->rflssi_readback, retvalue); 307 rfpath, pphyreg->rf_rb, retvalue);
308 return retvalue; 308 return retvalue;
309} 309}
310 310
@@ -478,14 +478,10 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
478 478
479 /* RF switch Control */ 479 /* RF switch Control */
480 /* TR/Ant switch control */ 480 /* TR/Ant switch control */
481 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control = 481 rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
482 RFPGA0_XAB_SWITCHCONTROL; 482 rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
483 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control = 483 rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
484 RFPGA0_XAB_SWITCHCONTROL; 484 rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
485 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
486 RFPGA0_XCD_SWITCHCONTROL;
487 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
488 RFPGA0_XCD_SWITCHCONTROL;
489 485
490 /* AGC control 1 */ 486 /* AGC control 1 */
491 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; 487 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
@@ -500,14 +496,10 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
500 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; 496 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
501 497
502 /* RX AFE control 1 */ 498 /* RX AFE control 1 */
503 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance = 499 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
504 ROFDM0_XARXIQIMBALANCE; 500 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
505 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance = 501 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
506 ROFDM0_XBRXIQIMBALANCE; 502 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
507 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
508 ROFDM0_XCRXIQIMBALANCE;
509 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
510 ROFDM0_XDRXIQIMBALANCE;
511 503
512 /*RX AFE control 1 */ 504 /*RX AFE control 1 */
513 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; 505 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
@@ -516,14 +508,10 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
516 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; 508 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
517 509
518 /* Tx AFE control 1 */ 510 /* Tx AFE control 1 */
519 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance = 511 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATxIQIMBALANCE;
520 ROFDM0_XATxIQIMBALANCE; 512 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTxIQIMBALANCE;
521 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance = 513 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTxIQIMBALANCE;
522 ROFDM0_XBTxIQIMBALANCE; 514 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTxIQIMBALANCE;
523 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
524 ROFDM0_XCTxIQIMBALANCE;
525 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
526 ROFDM0_XDTxIQIMBALANCE;
527 515
528 /* Tx AFE control 2 */ 516 /* Tx AFE control 2 */
529 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATxAFE; 517 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATxAFE;
@@ -532,20 +520,14 @@ static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
532 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTxAFE; 520 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTxAFE;
533 521
534 /* Tranceiver LSSI Readback SI mode */ 522 /* Tranceiver LSSI Readback SI mode */
535 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = 523 rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
536 RFPGA0_XA_LSSIREADBACK; 524 rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
537 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = 525 rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
538 RFPGA0_XB_LSSIREADBACK; 526 rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
539 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
540 RFPGA0_XC_LSSIREADBACK;
541 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
542 RFPGA0_XD_LSSIREADBACK;
543 527
544 /* Tranceiver LSSI Readback PI mode */ 528 /* Tranceiver LSSI Readback PI mode */
545 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = 529 rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
546 TRANSCEIVERA_HSPI_READBACK; 530 rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
547 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
548 TRANSCEIVERB_HSPI_READBACK;
549} 531}
550 532
551static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, 533static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -702,12 +684,11 @@ static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
702 else 684 else
703 return; 685 return;
704 686
705 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][index] = data; 687 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
706 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, 688 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
707 "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%ulx\n", 689 "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%ulx\n",
708 rtlphy->pwrgroup_cnt, index, 690 rtlphy->pwrgroup_cnt, index,
709 rtlphy->mcs_txpwrlevel_origoffset 691 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
710 [rtlphy->pwrgroup_cnt][index]);
711 if (index == 13) 692 if (index == 13)
712 rtlphy->pwrgroup_cnt++; 693 rtlphy->pwrgroup_cnt++;
713} 694}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
index 3066a7fb0b57..20144e0b4142 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/rf.c
@@ -106,11 +106,11 @@ void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
106 (ppowerlevel[idx1] << 24); 106 (ppowerlevel[idx1] << 24);
107 } 107 }
108 if (rtlefuse->eeprom_regulatory == 0) { 108 if (rtlefuse->eeprom_regulatory == 0) {
109 tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][6]) + 109 tmpval = (rtlphy->mcs_offset[0][6]) +
110 (rtlphy->mcs_txpwrlevel_origoffset[0][7] << 8); 110 (rtlphy->mcs_offset[0][7] << 8);
111 tx_agc[RF90_PATH_A] += tmpval; 111 tx_agc[RF90_PATH_A] += tmpval;
112 tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) + 112 tmpval = (rtlphy->mcs_offset[0][14]) +
113 (rtlphy->mcs_txpwrlevel_origoffset[0][15] << 24); 113 (rtlphy->mcs_offset[0][15] << 24);
114 tx_agc[RF90_PATH_B] += tmpval; 114 tx_agc[RF90_PATH_B] += tmpval;
115 } 115 }
116 } 116 }
@@ -227,7 +227,7 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
227 switch (rtlefuse->eeprom_regulatory) { 227 switch (rtlefuse->eeprom_regulatory) {
228 case 0: 228 case 0:
229 chnlgroup = 0; 229 chnlgroup = 0;
230 writeval = rtlphy->mcs_txpwrlevel_origoffset 230 writeval = rtlphy->mcs_offset
231 [chnlgroup][index + 231 [chnlgroup][index +
232 (rf ? 8 : 0)] + ((index < 2) ? 232 (rf ? 8 : 0)] + ((index < 2) ?
233 powerbase0[rf] : 233 powerbase0[rf] :
@@ -247,7 +247,7 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
247 chnlgroup++; 247 chnlgroup++;
248 else 248 else
249 chnlgroup += 4; 249 chnlgroup += 4;
250 writeval = rtlphy->mcs_txpwrlevel_origoffset 250 writeval = rtlphy->mcs_offset
251 [chnlgroup][index + 251 [chnlgroup][index +
252 (rf ? 8 : 0)] + ((index < 2) ? 252 (rf ? 8 : 0)] + ((index < 2) ?
253 powerbase0[rf] : 253 powerbase0[rf] :
@@ -280,8 +280,7 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
280 [channel - 1]); 280 [channel - 1]);
281 } 281 }
282 for (i = 0; i < 4; i++) { 282 for (i = 0; i < 4; i++) {
283 pwr_diff_limit[i] = 283 pwr_diff_limit[i] = (u8)((rtlphy->mcs_offset
284 (u8)((rtlphy->mcs_txpwrlevel_origoffset
285 [chnlgroup][index + (rf ? 8 : 0)] & 284 [chnlgroup][index + (rf ? 8 : 0)] &
286 (0x7f << (i * 8))) >> (i * 8)); 285 (0x7f << (i * 8))) >> (i * 8));
287 if (rtlphy->current_chan_bw == 286 if (rtlphy->current_chan_bw ==
@@ -316,8 +315,7 @@ static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
316 break; 315 break;
317 default: 316 default:
318 chnlgroup = 0; 317 chnlgroup = 0;
319 writeval = rtlphy->mcs_txpwrlevel_origoffset 318 writeval = rtlphy->mcs_offset[chnlgroup][index +
320 [chnlgroup][index +
321 (rf ? 8 : 0)] + ((index < 2) ? 319 (rf ? 8 : 0)] + ((index < 2) ?
322 powerbase0[rf] : powerbase1[rf]); 320 powerbase0[rf] : powerbase1[rf]);
323 RTPRINT(rtlpriv, FPHY, PHY_TXPWR, 321 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 480862c07f92..03c6d18b2e07 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -352,7 +352,7 @@ static struct rtl_hal_cfg rtl92de_hal_cfg = {
352 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, 352 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
353}; 353};
354 354
355static struct pci_device_id rtl92de_pci_ids[] __devinitdata = { 355static struct pci_device_id rtl92de_pci_ids[] = {
356 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8193, rtl92de_hal_cfg)}, 356 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8193, rtl92de_hal_cfg)},
357 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x002B, rtl92de_hal_cfg)}, 357 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x002B, rtl92de_hal_cfg)},
358 {}, 358 {},
@@ -378,14 +378,7 @@ MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
378MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); 378MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
379MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 379MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
380 380
381static const struct dev_pm_ops rtlwifi_pm_ops = { 381static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
382 .suspend = rtl_pci_suspend,
383 .resume = rtl_pci_resume,
384 .freeze = rtl_pci_suspend,
385 .thaw = rtl_pci_resume,
386 .poweroff = rtl_pci_suspend,
387 .restore = rtl_pci_resume,
388};
389 382
390static struct pci_driver rtl92de_driver = { 383static struct pci_driver rtl92de_driver = {
391 .name = KBUILD_MODNAME, 384 .name = KBUILD_MODNAME,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 4686f340b9d6..f9f3861046c1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -132,8 +132,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
132 pstats->packet_toself = packet_toself; 132 pstats->packet_toself = packet_toself;
133 pstats->packet_beacon = packet_beacon; 133 pstats->packet_beacon = packet_beacon;
134 pstats->is_cck = is_cck_rate; 134 pstats->is_cck = is_cck_rate;
135 pstats->rx_mimo_signalquality[0] = -1; 135 pstats->rx_mimo_sig_qual[0] = -1;
136 pstats->rx_mimo_signalquality[1] = -1; 136 pstats->rx_mimo_sig_qual[1] = -1;
137 137
138 if (is_cck_rate) { 138 if (is_cck_rate) {
139 u8 report, cck_highpwr; 139 u8 report, cck_highpwr;
@@ -212,8 +212,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
212 sq = ((64 - sq) * 100) / 44; 212 sq = ((64 - sq) * 100) / 44;
213 } 213 }
214 pstats->signalquality = sq; 214 pstats->signalquality = sq;
215 pstats->rx_mimo_signalquality[0] = sq; 215 pstats->rx_mimo_sig_qual[0] = sq;
216 pstats->rx_mimo_signalquality[1] = -1; 216 pstats->rx_mimo_sig_qual[1] = -1;
217 } 217 }
218 } else { 218 } else {
219 rtlpriv->dm.rfpath_rxenable[0] = true; 219 rtlpriv->dm.rfpath_rxenable[0] = true;
@@ -246,7 +246,7 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
246 if (i == 0) 246 if (i == 0)
247 pstats->signalquality = 247 pstats->signalquality =
248 (u8)(evm & 0xff); 248 (u8)(evm & 0xff);
249 pstats->rx_mimo_signalquality[i] = 249 pstats->rx_mimo_sig_qual[i] =
250 (u8)(evm & 0xff); 250 (u8)(evm & 0xff);
251 } 251 }
252 } 252 }
@@ -345,33 +345,28 @@ static void _rtl92de_process_pwdb(struct ieee80211_hw *hw,
345{ 345{
346 struct rtl_priv *rtlpriv = rtl_priv(hw); 346 struct rtl_priv *rtlpriv = rtl_priv(hw);
347 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 347 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
348 long undecorated_smoothed_pwdb; 348 long undec_sm_pwdb;
349 349
350 if (mac->opmode == NL80211_IFTYPE_ADHOC || 350 if (mac->opmode == NL80211_IFTYPE_ADHOC ||
351 mac->opmode == NL80211_IFTYPE_AP) 351 mac->opmode == NL80211_IFTYPE_AP)
352 return; 352 return;
353 else 353 else
354 undecorated_smoothed_pwdb = 354 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
355 rtlpriv->dm.undecorated_smoothed_pwdb;
356 355
357 if (pstats->packet_toself || pstats->packet_beacon) { 356 if (pstats->packet_toself || pstats->packet_beacon) {
358 if (undecorated_smoothed_pwdb < 0) 357 if (undec_sm_pwdb < 0)
359 undecorated_smoothed_pwdb = pstats->rx_pwdb_all; 358 undec_sm_pwdb = pstats->rx_pwdb_all;
360 if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) { 359 if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
361 undecorated_smoothed_pwdb = 360 undec_sm_pwdb = (((undec_sm_pwdb) *
362 (((undecorated_smoothed_pwdb) *
363 (RX_SMOOTH_FACTOR - 1)) + 361 (RX_SMOOTH_FACTOR - 1)) +
364 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); 362 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
365 undecorated_smoothed_pwdb = 363 undec_sm_pwdb = undec_sm_pwdb + 1;
366 undecorated_smoothed_pwdb + 1;
367 } else { 364 } else {
368 undecorated_smoothed_pwdb = 365 undec_sm_pwdb = (((undec_sm_pwdb) *
369 (((undecorated_smoothed_pwdb) *
370 (RX_SMOOTH_FACTOR - 1)) + 366 (RX_SMOOTH_FACTOR - 1)) +
371 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); 367 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
372 } 368 }
373 rtlpriv->dm.undecorated_smoothed_pwdb = 369 rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
374 undecorated_smoothed_pwdb;
375 _rtl92de_update_rxsignalstatistics(hw, pstats); 370 _rtl92de_update_rxsignalstatistics(hw, pstats);
376 } 371 }
377} 372}
@@ -383,15 +378,15 @@ static void rtl92d_loop_over_streams(struct ieee80211_hw *hw,
383 int stream; 378 int stream;
384 379
385 for (stream = 0; stream < 2; stream++) { 380 for (stream = 0; stream < 2; stream++) {
386 if (pstats->rx_mimo_signalquality[stream] != -1) { 381 if (pstats->rx_mimo_sig_qual[stream] != -1) {
387 if (rtlpriv->stats.rx_evm_percentage[stream] == 0) { 382 if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
388 rtlpriv->stats.rx_evm_percentage[stream] = 383 rtlpriv->stats.rx_evm_percentage[stream] =
389 pstats->rx_mimo_signalquality[stream]; 384 pstats->rx_mimo_sig_qual[stream];
390 } 385 }
391 rtlpriv->stats.rx_evm_percentage[stream] = 386 rtlpriv->stats.rx_evm_percentage[stream] =
392 ((rtlpriv->stats.rx_evm_percentage[stream] 387 ((rtlpriv->stats.rx_evm_percentage[stream]
393 * (RX_SMOOTH_FACTOR - 1)) + 388 * (RX_SMOOTH_FACTOR - 1)) +
394 (pstats->rx_mimo_signalquality[stream] * 1)) / 389 (pstats->rx_mimo_sig_qual[stream] * 1)) /
395 (RX_SMOOTH_FACTOR); 390 (RX_SMOOTH_FACTOR);
396 } 391 }
397 } 392 }
@@ -514,7 +509,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
514 rx_status->flag |= RX_FLAG_40MHZ; 509 rx_status->flag |= RX_FLAG_40MHZ;
515 if (GET_RX_DESC_RXHT(pdesc)) 510 if (GET_RX_DESC_RXHT(pdesc))
516 rx_status->flag |= RX_FLAG_HT; 511 rx_status->flag |= RX_FLAG_HT;
517 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 512 rx_status->flag |= RX_FLAG_MACTIME_START;
518 if (stats->decrypted) 513 if (stats->decrypted)
519 rx_status->flag |= RX_FLAG_DECRYPTED; 514 rx_status->flag |= RX_FLAG_DECRYPTED;
520 rx_status->rate_idx = rtlwifi_rate_mapping(hw, 515 rx_status->rate_idx = rtlwifi_rate_mapping(hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index 20afec62ce05..2d255e02d795 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -522,8 +522,7 @@ enum fwcmd_iotype {
522 FW_CMD_IQK_ENABLE = 30, 522 FW_CMD_IQK_ENABLE = 30,
523}; 523};
524 524
525/* 525/* Driver info contain PHY status
526 * Driver info contain PHY status
527 * and other variabel size info 526 * and other variabel size info
528 * PHY Status content as below 527 * PHY Status content as below
529 */ 528 */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 465f58157101..e551fe5f9ccd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -267,13 +267,12 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
267 break; 267 break;
268 } 268 }
269 269
270 if (rtlpriv->dm.undecorated_smoothed_pwdb > 270 if (rtlpriv->dm.undec_sm_pwdb > (long)high_rssi_thresh) {
271 (long)high_rssi_thresh) {
272 ra->ratr_state = DM_RATR_STA_HIGH; 271 ra->ratr_state = DM_RATR_STA_HIGH;
273 } else if (rtlpriv->dm.undecorated_smoothed_pwdb > 272 } else if (rtlpriv->dm.undec_sm_pwdb >
274 (long)middle_rssi_thresh) { 273 (long)middle_rssi_thresh) {
275 ra->ratr_state = DM_RATR_STA_LOW; 274 ra->ratr_state = DM_RATR_STA_LOW;
276 } else if (rtlpriv->dm.undecorated_smoothed_pwdb > 275 } else if (rtlpriv->dm.undec_sm_pwdb >
277 (long)low_rssi_thresh) { 276 (long)low_rssi_thresh) {
278 ra->ratr_state = DM_RATR_STA_LOW; 277 ra->ratr_state = DM_RATR_STA_LOW;
279 } else { 278 } else {
@@ -283,8 +282,7 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
283 if (ra->pre_ratr_state != ra->ratr_state) { 282 if (ra->pre_ratr_state != ra->ratr_state) {
284 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, 283 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
285 "RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n", 284 "RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
286 rtlpriv->dm.undecorated_smoothed_pwdb, 285 rtlpriv->dm.undec_sm_pwdb, ra->ratr_state,
287 ra->ratr_state,
288 ra->pre_ratr_state, ra->ratr_state); 286 ra->pre_ratr_state, ra->ratr_state);
289 287
290 rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 288 rtlpriv->cfg->ops->update_rate_tbl(hw, sta,
@@ -316,7 +314,7 @@ static void _rtl92s_dm_switch_baseband_mrc(struct ieee80211_hw *hw)
316 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MRC, (u8 *)(&current_mrc)); 314 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MRC, (u8 *)(&current_mrc));
317 315
318 if (mac->link_state >= MAC80211_LINKED) { 316 if (mac->link_state >= MAC80211_LINKED) {
319 if (rtlpriv->dm.undecorated_smoothed_pwdb > tmpentry_maxpwdb) { 317 if (rtlpriv->dm.undec_sm_pwdb > tmpentry_maxpwdb) {
320 rssi_a = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_A]; 318 rssi_a = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_A];
321 rssi_b = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_B]; 319 rssi_b = rtlpriv->stats.rx_rssi_percentage[RF90_PATH_B];
322 } 320 }
@@ -424,18 +422,18 @@ static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw)
424 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); 422 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
425 423
426 if (falsealm_cnt->cnt_all > digtable->fa_highthresh) { 424 if (falsealm_cnt->cnt_all > digtable->fa_highthresh) {
427 if ((digtable->backoff_val - 6) < 425 if ((digtable->back_val - 6) <
428 digtable->backoffval_range_min) 426 digtable->backoffval_range_min)
429 digtable->backoff_val = digtable->backoffval_range_min; 427 digtable->back_val = digtable->backoffval_range_min;
430 else 428 else
431 digtable->backoff_val -= 6; 429 digtable->back_val -= 6;
432 } else if (falsealm_cnt->cnt_all < digtable->fa_lowthresh) { 430 } else if (falsealm_cnt->cnt_all < digtable->fa_lowthresh) {
433 if ((digtable->backoff_val + 6) > 431 if ((digtable->back_val + 6) >
434 digtable->backoffval_range_max) 432 digtable->backoffval_range_max)
435 digtable->backoff_val = 433 digtable->back_val =
436 digtable->backoffval_range_max; 434 digtable->backoffval_range_max;
437 else 435 else
438 digtable->backoff_val += 6; 436 digtable->back_val += 6;
439 } 437 }
440} 438}
441 439
@@ -447,28 +445,28 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
447 static u8 initialized, force_write; 445 static u8 initialized, force_write;
448 u8 initial_gain = 0; 446 u8 initial_gain = 0;
449 447
450 if ((digtable->pre_sta_connectstate == digtable->cur_sta_connectstate) || 448 if ((digtable->pre_sta_cstate == digtable->cur_sta_cstate) ||
451 (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) { 449 (digtable->cur_sta_cstate == DIG_STA_BEFORE_CONNECT)) {
452 if (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) { 450 if (digtable->cur_sta_cstate == DIG_STA_BEFORE_CONNECT) {
453 if (rtlpriv->psc.rfpwr_state != ERFON) 451 if (rtlpriv->psc.rfpwr_state != ERFON)
454 return; 452 return;
455 453
456 if (digtable->backoff_enable_flag) 454 if (digtable->backoff_enable_flag)
457 rtl92s_backoff_enable_flag(hw); 455 rtl92s_backoff_enable_flag(hw);
458 else 456 else
459 digtable->backoff_val = DM_DIG_BACKOFF; 457 digtable->back_val = DM_DIG_BACKOFF;
460 458
461 if ((digtable->rssi_val + 10 - digtable->backoff_val) > 459 if ((digtable->rssi_val + 10 - digtable->back_val) >
462 digtable->rx_gain_range_max) 460 digtable->rx_gain_range_max)
463 digtable->cur_igvalue = 461 digtable->cur_igvalue =
464 digtable->rx_gain_range_max; 462 digtable->rx_gain_range_max;
465 else if ((digtable->rssi_val + 10 - digtable->backoff_val) 463 else if ((digtable->rssi_val + 10 - digtable->back_val)
466 < digtable->rx_gain_range_min) 464 < digtable->rx_gain_range_min)
467 digtable->cur_igvalue = 465 digtable->cur_igvalue =
468 digtable->rx_gain_range_min; 466 digtable->rx_gain_range_min;
469 else 467 else
470 digtable->cur_igvalue = digtable->rssi_val + 10 - 468 digtable->cur_igvalue = digtable->rssi_val + 10
471 digtable->backoff_val; 469 - digtable->back_val;
472 470
473 if (falsealm_cnt->cnt_all > 10000) 471 if (falsealm_cnt->cnt_all > 10000)
474 digtable->cur_igvalue = 472 digtable->cur_igvalue =
@@ -490,7 +488,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
490 digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 488 digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
491 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE); 489 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
492 490
493 digtable->backoff_val = DM_DIG_BACKOFF; 491 digtable->back_val = DM_DIG_BACKOFF;
494 digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0]; 492 digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0];
495 digtable->pre_igvalue = 0; 493 digtable->pre_igvalue = 0;
496 return; 494 return;
@@ -520,7 +518,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
520static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw) 518static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
521{ 519{
522 struct rtl_priv *rtlpriv = rtl_priv(hw); 520 struct rtl_priv *rtlpriv = rtl_priv(hw);
523 struct dig_t *digtable = &rtlpriv->dm_digtable; 521 struct dig_t *dig = &rtlpriv->dm_digtable;
524 522
525 if (rtlpriv->mac80211.act_scanning) 523 if (rtlpriv->mac80211.act_scanning)
526 return; 524 return;
@@ -528,17 +526,17 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
528 /* Decide the current status and if modify initial gain or not */ 526 /* Decide the current status and if modify initial gain or not */
529 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED || 527 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED ||
530 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) 528 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
531 digtable->cur_sta_connectstate = DIG_STA_CONNECT; 529 dig->cur_sta_cstate = DIG_STA_CONNECT;
532 else 530 else
533 digtable->cur_sta_connectstate = DIG_STA_DISCONNECT; 531 dig->cur_sta_cstate = DIG_STA_DISCONNECT;
534 532
535 digtable->rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb; 533 dig->rssi_val = rtlpriv->dm.undec_sm_pwdb;
536 534
537 /* Change dig mode to rssi */ 535 /* Change dig mode to rssi */
538 if (digtable->cur_sta_connectstate != DIG_STA_DISCONNECT) { 536 if (dig->cur_sta_cstate != DIG_STA_DISCONNECT) {
539 if (digtable->dig_twoport_algorithm == 537 if (dig->dig_twoport_algorithm ==
540 DIG_TWO_PORT_ALGO_FALSE_ALARM) { 538 DIG_TWO_PORT_ALGO_FALSE_ALARM) {
541 digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; 539 dig->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
542 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS); 540 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS);
543 } 541 }
544 } 542 }
@@ -546,7 +544,7 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
546 _rtl92s_dm_false_alarm_counter_statistics(hw); 544 _rtl92s_dm_false_alarm_counter_statistics(hw);
547 _rtl92s_dm_initial_gain_sta_beforeconnect(hw); 545 _rtl92s_dm_initial_gain_sta_beforeconnect(hw);
548 546
549 digtable->pre_sta_connectstate = digtable->cur_sta_connectstate; 547 dig->pre_sta_cstate = dig->cur_sta_cstate;
550} 548}
551 549
552static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw) 550static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
@@ -573,7 +571,7 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
573 struct rtl_priv *rtlpriv = rtl_priv(hw); 571 struct rtl_priv *rtlpriv = rtl_priv(hw);
574 struct rtl_phy *rtlphy = &(rtlpriv->phy); 572 struct rtl_phy *rtlphy = &(rtlpriv->phy);
575 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 573 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
576 long undecorated_smoothed_pwdb; 574 long undec_sm_pwdb;
577 long txpwr_threshold_lv1, txpwr_threshold_lv2; 575 long txpwr_threshold_lv1, txpwr_threshold_lv2;
578 576
579 /* 2T2R TP issue */ 577 /* 2T2R TP issue */
@@ -587,7 +585,7 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
587 } 585 }
588 586
589 if ((mac->link_state < MAC80211_LINKED) && 587 if ((mac->link_state < MAC80211_LINKED) &&
590 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { 588 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
591 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, 589 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
592 "Not connected to any\n"); 590 "Not connected to any\n");
593 591
@@ -599,25 +597,22 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
599 597
600 if (mac->link_state >= MAC80211_LINKED) { 598 if (mac->link_state >= MAC80211_LINKED) {
601 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 599 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
602 undecorated_smoothed_pwdb = 600 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
603 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
604 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 601 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
605 "AP Client PWDB = 0x%lx\n", 602 "AP Client PWDB = 0x%lx\n",
606 undecorated_smoothed_pwdb); 603 undec_sm_pwdb);
607 } else { 604 } else {
608 undecorated_smoothed_pwdb = 605 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
609 rtlpriv->dm.undecorated_smoothed_pwdb;
610 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 606 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
611 "STA Default Port PWDB = 0x%lx\n", 607 "STA Default Port PWDB = 0x%lx\n",
612 undecorated_smoothed_pwdb); 608 undec_sm_pwdb);
613 } 609 }
614 } else { 610 } else {
615 undecorated_smoothed_pwdb = 611 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
616 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
617 612
618 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 613 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
619 "AP Ext Port PWDB = 0x%lx\n", 614 "AP Ext Port PWDB = 0x%lx\n",
620 undecorated_smoothed_pwdb); 615 undec_sm_pwdb);
621 } 616 }
622 617
623 txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2; 618 txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2;
@@ -625,12 +620,12 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
625 620
626 if (rtl_get_bbreg(hw, 0xc90, MASKBYTE0) == 1) 621 if (rtl_get_bbreg(hw, 0xc90, MASKBYTE0) == 1)
627 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL; 622 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
628 else if (undecorated_smoothed_pwdb >= txpwr_threshold_lv2) 623 else if (undec_sm_pwdb >= txpwr_threshold_lv2)
629 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL2; 624 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL2;
630 else if ((undecorated_smoothed_pwdb < (txpwr_threshold_lv2 - 3)) && 625 else if ((undec_sm_pwdb < (txpwr_threshold_lv2 - 3)) &&
631 (undecorated_smoothed_pwdb >= txpwr_threshold_lv1)) 626 (undec_sm_pwdb >= txpwr_threshold_lv1))
632 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL1; 627 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL1;
633 else if (undecorated_smoothed_pwdb < (txpwr_threshold_lv1 - 3)) 628 else if (undec_sm_pwdb < (txpwr_threshold_lv1 - 3))
634 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL; 629 rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
635 630
636 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) 631 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl))
@@ -665,10 +660,10 @@ static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
665 digtable->dig_state = DM_STA_DIG_MAX; 660 digtable->dig_state = DM_STA_DIG_MAX;
666 digtable->dig_highpwrstate = DM_STA_DIG_MAX; 661 digtable->dig_highpwrstate = DM_STA_DIG_MAX;
667 662
668 digtable->cur_sta_connectstate = DIG_STA_DISCONNECT; 663 digtable->cur_sta_cstate = DIG_STA_DISCONNECT;
669 digtable->pre_sta_connectstate = DIG_STA_DISCONNECT; 664 digtable->pre_sta_cstate = DIG_STA_DISCONNECT;
670 digtable->cur_ap_connectstate = DIG_AP_DISCONNECT; 665 digtable->cur_ap_cstate = DIG_AP_DISCONNECT;
671 digtable->pre_ap_connectstate = DIG_AP_DISCONNECT; 666 digtable->pre_ap_cstate = DIG_AP_DISCONNECT;
672 667
673 digtable->rssi_lowthresh = DM_DIG_THRESH_LOW; 668 digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
674 digtable->rssi_highthresh = DM_DIG_THRESH_HIGH; 669 digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
@@ -681,7 +676,7 @@ static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
681 676
682 /* for dig debug rssi value */ 677 /* for dig debug rssi value */
683 digtable->rssi_val = 50; 678 digtable->rssi_val = 50;
684 digtable->backoff_val = DM_DIG_BACKOFF; 679 digtable->back_val = DM_DIG_BACKOFF;
685 digtable->rx_gain_range_max = DM_DIG_MAX; 680 digtable->rx_gain_range_max = DM_DIG_MAX;
686 681
687 digtable->rx_gain_range_min = DM_DIG_MIN; 682 digtable->rx_gain_range_min = DM_DIG_MIN;
@@ -709,7 +704,7 @@ void rtl92s_dm_init(struct ieee80211_hw *hw)
709 struct rtl_priv *rtlpriv = rtl_priv(hw); 704 struct rtl_priv *rtlpriv = rtl_priv(hw);
710 705
711 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; 706 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
712 rtlpriv->dm.undecorated_smoothed_pwdb = -1; 707 rtlpriv->dm.undec_sm_pwdb = -1;
713 708
714 _rtl92s_dm_init_dynamic_txpower(hw); 709 _rtl92s_dm_init_dynamic_txpower(hw);
715 rtl92s_dm_init_edca_turbo(hw); 710 rtl92s_dm_init_edca_turbo(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 4542e6952b97..28526a7361f5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -1089,8 +1089,9 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
1089 return err; 1089 return err;
1090} 1090}
1091 1091
1092void rtl92se_set_mac_addr(struct rtl_io *io, const u8 * addr) 1092void rtl92se_set_mac_addr(struct rtl_io *io, const u8 *addr)
1093{ 1093{
1094 /* This is a stub. */
1094} 1095}
1095 1096
1096void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1097void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
@@ -1697,7 +1698,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1697 hwinfo[EEPROM_TXPOWERBASE + 6 + rf_path * 3 + i]; 1698 hwinfo[EEPROM_TXPOWERBASE + 6 + rf_path * 3 + i];
1698 1699
1699 /* Read OFDM RF A & B Tx power for 2T */ 1700 /* Read OFDM RF A & B Tx power for 2T */
1700 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][i] 1701 rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path][i]
1701 = hwinfo[EEPROM_TXPOWERBASE + 12 + 1702 = hwinfo[EEPROM_TXPOWERBASE + 12 +
1702 rf_path * 3 + i]; 1703 rf_path * 3 + i];
1703 } 1704 }
@@ -1722,7 +1723,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1722 RTPRINT(rtlpriv, FINIT, INIT_EEPROM, 1723 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1723 "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n", 1724 "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
1724 rf_path, i, 1725 rf_path, i,
1725 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif 1726 rtlefuse->eprom_chnl_txpwr_ht40_2sdf
1726 [rf_path][i]); 1727 [rf_path][i]);
1727 1728
1728 for (rf_path = 0; rf_path < 2; rf_path++) { 1729 for (rf_path = 0; rf_path < 2; rf_path++) {
@@ -1748,7 +1749,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
1748 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s 1749 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1749 [rf_path][index]; 1750 [rf_path][index];
1750 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 1751 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
1751 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif 1752 rtlefuse->eprom_chnl_txpwr_ht40_2sdf
1752 [rf_path][index]; 1753 [rf_path][index];
1753 } 1754 }
1754 1755
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
index 1886c2644a26..a8e068c76e47 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.h
@@ -54,7 +54,7 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw);
54int rtl92se_set_network_type(struct ieee80211_hw *hw, 54int rtl92se_set_network_type(struct ieee80211_hw *hw,
55 enum nl80211_iftype type); 55 enum nl80211_iftype type);
56void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); 56void rtl92se_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
57void rtl92se_set_mac_addr(struct rtl_io *io, const u8 * addr); 57void rtl92se_set_mac_addr(struct rtl_io *io, const u8 *addr);
58void rtl92se_set_qos(struct ieee80211_hw *hw, int aci); 58void rtl92se_set_qos(struct ieee80211_hw *hw, int aci);
59void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw); 59void rtl92se_set_beacon_related_registers(struct ieee80211_hw *hw);
60void rtl92se_set_beacon_interval(struct ieee80211_hw *hw); 60void rtl92se_set_beacon_interval(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index b917a2a3caf7..67404975e00b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -139,17 +139,17 @@ static u32 _rtl92s_phy_rf_serial_read(struct ieee80211_hw *hw,
139 BIT(8)); 139 BIT(8));
140 140
141 if (rfpi_enable) 141 if (rfpi_enable)
142 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi, 142 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
143 BLSSI_READBACK_DATA); 143 BLSSI_READBACK_DATA);
144 else 144 else
145 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback, 145 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
146 BLSSI_READBACK_DATA); 146 BLSSI_READBACK_DATA);
147 147
148 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback, 148 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
149 BLSSI_READBACK_DATA); 149 BLSSI_READBACK_DATA);
150 150
151 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n", 151 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
152 rfpath, pphyreg->rflssi_readback, retvalue); 152 rfpath, pphyreg->rf_rb, retvalue);
153 153
154 return retvalue; 154 return retvalue;
155 155
@@ -696,7 +696,7 @@ static void _rtl92s_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
696 else 696 else
697 return; 697 return;
698 698
699 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][index] = data; 699 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
700 if (index == 5) 700 if (index == 5)
701 rtlphy->pwrgroup_cnt++; 701 rtlphy->pwrgroup_cnt++;
702} 702}
@@ -765,14 +765,10 @@ static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
765 rtlphy->phyreg_def[RF90_PATH_D].rfhssi_para2 = RFPGA0_XD_HSSIPARAMETER2; 765 rtlphy->phyreg_def[RF90_PATH_D].rfhssi_para2 = RFPGA0_XD_HSSIPARAMETER2;
766 766
767 /* RF switch Control */ 767 /* RF switch Control */
768 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control = 768 rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
769 RFPGA0_XAB_SWITCHCONTROL; 769 rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
770 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control = 770 rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
771 RFPGA0_XAB_SWITCHCONTROL; 771 rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
772 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
773 RFPGA0_XCD_SWITCHCONTROL;
774 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
775 RFPGA0_XCD_SWITCHCONTROL;
776 772
777 /* AGC control 1 */ 773 /* AGC control 1 */
778 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; 774 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
@@ -787,14 +783,10 @@ static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
787 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; 783 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
788 784
789 /* RX AFE control 1 */ 785 /* RX AFE control 1 */
790 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance = 786 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
791 ROFDM0_XARXIQIMBALANCE; 787 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
792 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance = 788 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
793 ROFDM0_XBRXIQIMBALANCE; 789 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
794 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
795 ROFDM0_XCRXIQIMBALANCE;
796 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
797 ROFDM0_XDRXIQIMBALANCE;
798 790
799 /* RX AFE control 1 */ 791 /* RX AFE control 1 */
800 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; 792 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
@@ -803,14 +795,10 @@ static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
803 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; 795 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
804 796
805 /* Tx AFE control 1 */ 797 /* Tx AFE control 1 */
806 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance = 798 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
807 ROFDM0_XATXIQIMBALANCE; 799 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
808 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance = 800 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
809 ROFDM0_XBTXIQIMBALANCE; 801 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
810 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
811 ROFDM0_XCTXIQIMBALANCE;
812 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
813 ROFDM0_XDTXIQIMBALANCE;
814 802
815 /* Tx AFE control 2 */ 803 /* Tx AFE control 2 */
816 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; 804 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
@@ -819,20 +807,14 @@ static void _rtl92s_phy_init_register_definition(struct ieee80211_hw *hw)
819 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; 807 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
820 808
821 /* Tranceiver LSSI Readback */ 809 /* Tranceiver LSSI Readback */
822 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = 810 rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
823 RFPGA0_XA_LSSIREADBACK; 811 rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
824 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = 812 rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
825 RFPGA0_XB_LSSIREADBACK; 813 rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
826 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
827 RFPGA0_XC_LSSIREADBACK;
828 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
829 RFPGA0_XD_LSSIREADBACK;
830 814
831 /* Tranceiver LSSI Readback PI mode */ 815 /* Tranceiver LSSI Readback PI mode */
832 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = 816 rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
833 TRANSCEIVERA_HSPI_READBACK; 817 rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
834 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
835 TRANSCEIVERB_HSPI_READBACK;
836} 818}
837 819
838 820
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
index 08c2f5625129..5061f1db3f02 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/rf.c
@@ -192,8 +192,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
192 * defined by Realtek for large power */ 192 * defined by Realtek for large power */
193 chnlgroup = 0; 193 chnlgroup = 0;
194 194
195 writeval = rtlphy->mcs_txpwrlevel_origoffset 195 writeval = rtlphy->mcs_offset[chnlgroup][index] +
196 [chnlgroup][index] +
197 ((index < 2) ? pwrbase0 : pwrbase1); 196 ((index < 2) ? pwrbase0 : pwrbase1);
198 197
199 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 198 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
@@ -223,8 +222,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
223 chnlgroup++; 222 chnlgroup++;
224 } 223 }
225 224
226 writeval = rtlphy->mcs_txpwrlevel_origoffset 225 writeval = rtlphy->mcs_offset[chnlgroup][index]
227 [chnlgroup][index]
228 + ((index < 2) ? 226 + ((index < 2) ?
229 pwrbase0 : pwrbase1); 227 pwrbase0 : pwrbase1);
230 228
@@ -257,8 +255,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
257 } 255 }
258 256
259 for (i = 0; i < 4; i++) { 257 for (i = 0; i < 4; i++) {
260 pwrdiff_limit[i] = 258 pwrdiff_limit[i] = (u8)((rtlphy->mcs_offset
261 (u8)((rtlphy->mcs_txpwrlevel_origoffset
262 [chnlgroup][index] & (0x7f << (i * 8))) 259 [chnlgroup][index] & (0x7f << (i * 8)))
263 >> (i * 8)); 260 >> (i * 8));
264 261
@@ -296,7 +293,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
296 break; 293 break;
297 default: 294 default:
298 chnlgroup = 0; 295 chnlgroup = 0;
299 writeval = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index] + 296 writeval = rtlphy->mcs_offset[chnlgroup][index] +
300 ((index < 2) ? pwrbase0 : pwrbase1); 297 ((index < 2) ? pwrbase0 : pwrbase1);
301 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, 298 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
302 "RTK better performance, writeval = 0x%x\n", writeval); 299 "RTK better performance, writeval = 0x%x\n", writeval);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index ad4b4803482d..cecc377e9e61 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -50,8 +50,7 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
50 /*close ASPM for AMD defaultly */ 50 /*close ASPM for AMD defaultly */
51 rtlpci->const_amdpci_aspm = 0; 51 rtlpci->const_amdpci_aspm = 0;
52 52
53 /* 53 /* ASPM PS mode.
54 * ASPM PS mode.
55 * 0 - Disable ASPM, 54 * 0 - Disable ASPM,
56 * 1 - Enable ASPM without Clock Req, 55 * 1 - Enable ASPM without Clock Req,
57 * 2 - Enable ASPM with Clock Req, 56 * 2 - Enable ASPM with Clock Req,
@@ -67,8 +66,7 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
67 /*Setting for PCI-E bridge */ 66 /*Setting for PCI-E bridge */
68 rtlpci->const_hostpci_aspm_setting = 0x02; 67 rtlpci->const_hostpci_aspm_setting = 0x02;
69 68
70 /* 69 /* In Hw/Sw Radio Off situation.
71 * In Hw/Sw Radio Off situation.
72 * 0 - Default, 70 * 0 - Default,
73 * 1 - From ASPM setting without low Mac Pwr, 71 * 1 - From ASPM setting without low Mac Pwr,
74 * 2 - From ASPM setting with low Mac Pwr, 72 * 2 - From ASPM setting with low Mac Pwr,
@@ -77,8 +75,7 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
77 */ 75 */
78 rtlpci->const_hwsw_rfoff_d3 = 2; 76 rtlpci->const_hwsw_rfoff_d3 = 2;
79 77
80 /* 78 /* This setting works for those device with
81 * This setting works for those device with
82 * backdoor ASPM setting such as EPHY setting. 79 * backdoor ASPM setting such as EPHY setting.
83 * 0 - Not support ASPM, 80 * 0 - Not support ASPM,
84 * 1 - Support ASPM, 81 * 1 - Support ASPM,
@@ -403,7 +400,7 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
403 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15, 400 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
404}; 401};
405 402
406static struct pci_device_id rtl92se_pci_ids[] __devinitdata = { 403static struct pci_device_id rtl92se_pci_ids[] = {
407 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8192, rtl92se_hal_cfg)}, 404 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8192, rtl92se_hal_cfg)},
408 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8171, rtl92se_hal_cfg)}, 405 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8171, rtl92se_hal_cfg)},
409 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8172, rtl92se_hal_cfg)}, 406 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8172, rtl92se_hal_cfg)},
@@ -432,14 +429,7 @@ MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
432MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); 429MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
433MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); 430MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
434 431
435static const struct dev_pm_ops rtlwifi_pm_ops = { 432static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
436 .suspend = rtl_pci_suspend,
437 .resume = rtl_pci_resume,
438 .freeze = rtl_pci_suspend,
439 .thaw = rtl_pci_resume,
440 .poweroff = rtl_pci_suspend,
441 .restore = rtl_pci_resume,
442};
443 433
444static struct pci_driver rtl92se_driver = { 434static struct pci_driver rtl92se_driver = {
445 .name = KBUILD_MODNAME, 435 .name = KBUILD_MODNAME,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index e3cf4c02122a..0e9f6ebf078a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -129,8 +129,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
129 pstats->packet_matchbssid = packet_match_bssid; 129 pstats->packet_matchbssid = packet_match_bssid;
130 pstats->packet_toself = packet_toself; 130 pstats->packet_toself = packet_toself;
131 pstats->packet_beacon = packet_beacon; 131 pstats->packet_beacon = packet_beacon;
132 pstats->rx_mimo_signalquality[0] = -1; 132 pstats->rx_mimo_sig_qual[0] = -1;
133 pstats->rx_mimo_signalquality[1] = -1; 133 pstats->rx_mimo_sig_qual[1] = -1;
134 134
135 if (is_cck) { 135 if (is_cck) {
136 u8 report, cck_highpwr; 136 u8 report, cck_highpwr;
@@ -216,8 +216,8 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
216 } 216 }
217 217
218 pstats->signalquality = sq; 218 pstats->signalquality = sq;
219 pstats->rx_mimo_signalquality[0] = sq; 219 pstats->rx_mimo_sig_qual[0] = sq;
220 pstats->rx_mimo_signalquality[1] = -1; 220 pstats->rx_mimo_sig_qual[1] = -1;
221 } 221 }
222 } else { 222 } else {
223 rtlpriv->dm.rfpath_rxenable[0] = 223 rtlpriv->dm.rfpath_rxenable[0] =
@@ -256,8 +256,7 @@ static void _rtl92se_query_rxphystatus(struct ieee80211_hw *hw,
256 if (i == 0) 256 if (i == 0)
257 pstats->signalquality = (u8)(evm & 257 pstats->signalquality = (u8)(evm &
258 0xff); 258 0xff);
259 pstats->rx_mimo_signalquality[i] = 259 pstats->rx_mimo_sig_qual[i] = (u8) (evm & 0xff);
260 (u8) (evm & 0xff);
261 } 260 }
262 } 261 }
263 } 262 }
@@ -366,7 +365,7 @@ static void _rtl92se_process_pwdb(struct ieee80211_hw *hw,
366 return; 365 return;
367 } else { 366 } else {
368 undec_sm_pwdb = 367 undec_sm_pwdb =
369 rtlpriv->dm.undecorated_smoothed_pwdb; 368 rtlpriv->dm.undec_sm_pwdb;
370 } 369 }
371 370
372 if (pstats->packet_toself || pstats->packet_beacon) { 371 if (pstats->packet_toself || pstats->packet_beacon) {
@@ -386,7 +385,7 @@ static void _rtl92se_process_pwdb(struct ieee80211_hw *hw,
386 (RX_SMOOTH_FACTOR); 385 (RX_SMOOTH_FACTOR);
387 } 386 }
388 387
389 rtlpriv->dm.undecorated_smoothed_pwdb = undec_sm_pwdb; 388 rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
390 _rtl92se_update_rxsignalstatistics(hw, pstats); 389 _rtl92se_update_rxsignalstatistics(hw, pstats);
391 } 390 }
392} 391}
@@ -398,16 +397,16 @@ static void rtl_92s_process_streams(struct ieee80211_hw *hw,
398 u32 stream; 397 u32 stream;
399 398
400 for (stream = 0; stream < 2; stream++) { 399 for (stream = 0; stream < 2; stream++) {
401 if (pstats->rx_mimo_signalquality[stream] != -1) { 400 if (pstats->rx_mimo_sig_qual[stream] != -1) {
402 if (rtlpriv->stats.rx_evm_percentage[stream] == 0) { 401 if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
403 rtlpriv->stats.rx_evm_percentage[stream] = 402 rtlpriv->stats.rx_evm_percentage[stream] =
404 pstats->rx_mimo_signalquality[stream]; 403 pstats->rx_mimo_sig_qual[stream];
405 } 404 }
406 405
407 rtlpriv->stats.rx_evm_percentage[stream] = 406 rtlpriv->stats.rx_evm_percentage[stream] =
408 ((rtlpriv->stats.rx_evm_percentage[stream] * 407 ((rtlpriv->stats.rx_evm_percentage[stream] *
409 (RX_SMOOTH_FACTOR - 1)) + 408 (RX_SMOOTH_FACTOR - 1)) +
410 (pstats->rx_mimo_signalquality[stream] * 409 (pstats->rx_mimo_sig_qual[stream] *
411 1)) / (RX_SMOOTH_FACTOR); 410 1)) / (RX_SMOOTH_FACTOR);
412 } 411 }
413 } 412 }
@@ -554,7 +553,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
554 if (stats->is_ht) 553 if (stats->is_ht)
555 rx_status->flag |= RX_FLAG_HT; 554 rx_status->flag |= RX_FLAG_HT;
556 555
557 rx_status->flag |= RX_FLAG_MACTIME_MPDU; 556 rx_status->flag |= RX_FLAG_MACTIME_START;
558 557
559 /* hw will set stats->decrypted true, if it finds the 558 /* hw will set stats->decrypted true, if it finds the
560 * frame is open data frame or mgmt frame, 559 * frame is open data frame or mgmt frame,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
new file mode 100644
index 000000000000..4ed731f09b1f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/Makefile
@@ -0,0 +1,22 @@
1obj-m := rtl8723ae.o
2
3
4rtl8723ae-objs := \
5 dm.o \
6 fw.o \
7 hal_btc.o \
8 hal_bt_coexist.o\
9 hw.o \
10 led.o \
11 phy.o \
12 pwrseq.o \
13 pwrseqcmd.o \
14 rf.o \
15 sw.o \
16 table.o \
17 trx.o \
18
19
20obj-$(CONFIG_RTL8723AE) += rtl8723ae.o
21
22ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h b/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h
new file mode 100644
index 000000000000..417afeed36af
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/btc.h
@@ -0,0 +1,41 @@
1/******************************************************************************
2 **
3 ** Copyright(c) 2009-2012 Realtek Corporation.
4 **
5 ** This program is free software; you can redistribute it and/or modify it
6 ** under the terms of version 2 of the GNU General Public License as
7 ** published by the Free Software Foundation.
8 **
9 ** This program is distributed in the hope that it will be useful, but WITHOUT
10 ** ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 ** FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 ** more details.
13 **
14 ** You should have received a copy of the GNU General Public License along with
15 ** this program; if not, write to the Free Software Foundation, Inc.,
16 ** 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 **
18 ** The full GNU General Public License is included in this distribution in the
19 ** file called LICENSE.
20 **
21 ** Contact Information:
22 ** wlanfae <wlanfae@realtek.com>
23 ** Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 ** Hsinchu 300, Taiwan.
25 ** Larry Finger <Larry.Finger@lwfinger.net>
26 **
27 *****************************************************************************
28 */
29
30#ifndef __RTL8723E_BTC_H__
31#define __RTL8723E_BTC_H__
32
33#include "../wifi.h"
34#include "hal_bt_coexist.h"
35
36struct bt_coexist_c2h_info {
37 u8 no_parse_c2h;
38 u8 has_c2h;
39};
40
41#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/def.h b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
new file mode 100644
index 000000000000..8c110356dff9
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/def.h
@@ -0,0 +1,163 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 ****************************************************************************
29 */
30
31#ifndef __RTL8723E_DEF_H__
32#define __RTL8723E_DEF_H__
33
34#define HAL_PRIME_CHNL_OFFSET_LOWER 1
35
36#define RX_MPDU_QUEUE 0
37
38#define CHIP_8723 BIT(0)
39#define NORMAL_CHIP BIT(3)
40#define RF_TYPE_1T2R BIT(4)
41#define RF_TYPE_2T2R BIT(5)
42#define CHIP_VENDOR_UMC BIT(7)
43#define B_CUT_VERSION BIT(12)
44#define C_CUT_VERSION BIT(13)
45#define D_CUT_VERSION ((BIT(12)|BIT(13)))
46#define E_CUT_VERSION BIT(14)
47#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
48
49enum version_8723e {
50 VERSION_TEST_UMC_CHIP_8723 = 0x0081,
51 VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT = 0x0089,
52 VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT = 0x1089,
53};
54
55/* MASK */
56#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
57#define CHIP_TYPE_MASK BIT(3)
58#define RF_TYPE_MASK (BIT(4)|BIT(5)|BIT(6))
59#define MANUFACTUER_MASK BIT(7)
60#define ROM_VERSION_MASK (BIT(11)|BIT(10)|BIT(9)|BIT(8))
61#define CUT_VERSION_MASK (BIT(15)|BIT(14)|BIT(13)|BIT(12))
62
63/* Get element */
64#define GET_CVID_IC_TYPE(version) ((version) & IC_TYPE_MASK)
65#define GET_CVID_MANUFACTUER(version) ((version) & MANUFACTUER_MASK)
66#define GET_CVID_CUT_VERSION(version) ((version) & CUT_VERSION_MASK)
67
68#define IS_81XXC(version) ((GET_CVID_IC_TYPE(version) == 0) ?\
69 true : false)
70#define IS_8723_SERIES(version) \
71 ((GET_CVID_IC_TYPE(version) == CHIP_8723) ? true : false)
72#define IS_CHIP_VENDOR_UMC(version) \
73 ((GET_CVID_MANUFACTUER(version)) ? true : false)
74
75#define IS_VENDOR_UMC_A_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) ? \
76 ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
77#define IS_VENDOR_8723_A_CUT(version) ((IS_8723_SERIES(version)) ? \
78 ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
79#define IS_81xxC_VENDOR_UMC_B_CUT(version) ((IS_CHIP_VENDOR_UMC(version)) \
80 ? ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? \
81 true : false) : false)
82
83enum rf_optype {
84 RF_OP_BY_SW_3WIRE = 0,
85 RF_OP_BY_FW,
86 RF_OP_MAX
87};
88
89enum rf_power_state {
90 RF_ON,
91 RF_OFF,
92 RF_SLEEP,
93 RF_SHUT_DOWN,
94};
95
96enum power_save_mode {
97 POWER_SAVE_MODE_ACTIVE,
98 POWER_SAVE_MODE_SAVE,
99};
100
101enum power_polocy_config {
102 POWERCFG_MAX_POWER_SAVINGS,
103 POWERCFG_GLOBAL_POWER_SAVINGS,
104 POWERCFG_LOCAL_POWER_SAVINGS,
105 POWERCFG_LENOVO,
106};
107
108enum interface_select_pci {
109 INTF_SEL1_MINICARD = 0,
110 INTF_SEL0_PCIE = 1,
111 INTF_SEL2_RSV = 2,
112 INTF_SEL3_RSV = 3,
113};
114
115enum hal_fw_c2h_cmd_id {
116 HAL_FW_C2H_CMD_Read_MACREG = 0,
117 HAL_FW_C2H_CMD_Read_BBREG = 1,
118 HAL_FW_C2H_CMD_Read_RFREG = 2,
119 HAL_FW_C2H_CMD_Read_EEPROM = 3,
120 HAL_FW_C2H_CMD_Read_EFUSE = 4,
121 HAL_FW_C2H_CMD_Read_CAM = 5,
122 HAL_FW_C2H_CMD_Get_BasicRate = 6,
123 HAL_FW_C2H_CMD_Get_DataRate = 7,
124 HAL_FW_C2H_CMD_Survey = 8,
125 HAL_FW_C2H_CMD_SurveyDone = 9,
126 HAL_FW_C2H_CMD_JoinBss = 10,
127 HAL_FW_C2H_CMD_AddSTA = 11,
128 HAL_FW_C2H_CMD_DelSTA = 12,
129 HAL_FW_C2H_CMD_AtimDone = 13,
130 HAL_FW_C2H_CMD_TX_Report = 14,
131 HAL_FW_C2H_CMD_CCX_Report = 15,
132 HAL_FW_C2H_CMD_DTM_Report = 16,
133 HAL_FW_C2H_CMD_TX_Rate_Statistics = 17,
134 HAL_FW_C2H_CMD_C2HLBK = 18,
135 HAL_FW_C2H_CMD_C2HDBG = 19,
136 HAL_FW_C2H_CMD_C2HFEEDBACK = 20,
137 HAL_FW_C2H_CMD_MAX
138};
139
140enum rtl_desc_qsel {
141 QSLT_BK = 0x2,
142 QSLT_BE = 0x0,
143 QSLT_VI = 0x5,
144 QSLT_VO = 0x7,
145 QSLT_BEACON = 0x10,
146 QSLT_HIGH = 0x11,
147 QSLT_MGNT = 0x12,
148 QSLT_CMD = 0x13,
149};
150
151struct phy_sts_cck_8723e_t {
152 u8 adc_pwdb_X[4];
153 u8 sq_rpt;
154 u8 cck_agc_rpt;
155};
156
157struct h2c_cmd_8723e {
158 u8 element_id;
159 u32 cmd_len;
160 u8 *p_cmdbuffer;
161};
162
163#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
new file mode 100644
index 000000000000..12e2a3cb0701
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.c
@@ -0,0 +1,920 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 ****************************************************************************
29 */
30
31#include "../wifi.h"
32#include "../base.h"
33#include "../pci.h"
34#include "reg.h"
35#include "def.h"
36#include "phy.h"
37#include "dm.h"
38#include "fw.h"
39#include "hal_btc.h"
40
41static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
42 0x7f8001fe,
43 0x788001e2,
44 0x71c001c7,
45 0x6b8001ae,
46 0x65400195,
47 0x5fc0017f,
48 0x5a400169,
49 0x55400155,
50 0x50800142,
51 0x4c000130,
52 0x47c0011f,
53 0x43c0010f,
54 0x40000100,
55 0x3c8000f2,
56 0x390000e4,
57 0x35c000d7,
58 0x32c000cb,
59 0x300000c0,
60 0x2d4000b5,
61 0x2ac000ab,
62 0x288000a2,
63 0x26000098,
64 0x24000090,
65 0x22000088,
66 0x20000080,
67 0x1e400079,
68 0x1c800072,
69 0x1b00006c,
70 0x19800066,
71 0x18000060,
72 0x16c0005b,
73 0x15800056,
74 0x14400051,
75 0x1300004c,
76 0x12000048,
77 0x11000044,
78 0x10000040,
79};
80
81static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
82 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
83 {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
84 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
85 {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
86 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
87 {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
88 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
89 {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
90 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
91 {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
92 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
93 {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
94 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
95 {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
96 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
97 {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
98 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
99 {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
100 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
101 {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
102 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
103 {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
104 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
105 {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
106 {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
107 {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
108 {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
109 {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
110 {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
111 {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
112 {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
113 {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
114 {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
115};
116
117static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
118 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
119 {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
120 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
121 {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
122 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
123 {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
124 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
125 {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
126 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
127 {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
128 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
129 {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
130 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
131 {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
132 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
133 {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
134 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
135 {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
136 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
137 {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
138 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
139 {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
140 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
141 {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
142 {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
143 {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
144 {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
145 {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
146 {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
147 {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
148 {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
149 {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
150 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
151};
152
153static void rtl8723ae_dm_diginit(struct ieee80211_hw *hw)
154{
155 struct rtl_priv *rtlpriv = rtl_priv(hw);
156 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
157
158 dm_digtable->dig_enable_flag = true;
159 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
160 dm_digtable->cur_igvalue = 0x20;
161 dm_digtable->pre_igvalue = 0x0;
162 dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
163 dm_digtable->presta_cstate = DIG_STA_DISCONNECT;
164 dm_digtable->curmultista_cstate = DIG_MULTISTA_DISCONNECT;
165 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
166 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
167 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
168 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
169 dm_digtable->rx_gain_range_max = DM_DIG_MAX;
170 dm_digtable->rx_gain_range_min = DM_DIG_MIN;
171 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
172 dm_digtable->back_range_max = DM_DIG_BACKOFF_MAX;
173 dm_digtable->back_range_min = DM_DIG_BACKOFF_MIN;
174 dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
175 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
176}
177
178static u8 rtl_init_gain_min_pwdb(struct ieee80211_hw *hw)
179{
180 struct rtl_priv *rtlpriv = rtl_priv(hw);
181 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
182 long rssi_val_min = 0;
183
184 if ((dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) &&
185 (dm_digtable->cursta_cstate == DIG_STA_CONNECT)) {
186 if (rtlpriv->dm.entry_min_undec_sm_pwdb != 0)
187 rssi_val_min =
188 (rtlpriv->dm.entry_min_undec_sm_pwdb >
189 rtlpriv->dm.undec_sm_pwdb) ?
190 rtlpriv->dm.undec_sm_pwdb :
191 rtlpriv->dm.entry_min_undec_sm_pwdb;
192 else
193 rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
194 } else if (dm_digtable->cursta_cstate == DIG_STA_CONNECT ||
195 dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT) {
196 rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
197 } else if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
198 rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
199 }
200
201 return (u8) rssi_val_min;
202}
203
204static void rtl8723ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
205{
206 u32 ret_value;
207 struct rtl_priv *rtlpriv = rtl_priv(hw);
208 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
209
210 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
211 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
212
213 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
214 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
215 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
216
217 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
218 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
219 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
220 falsealm_cnt->cnt_rate_illegal +
221 falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
222
223 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
224 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
225 falsealm_cnt->cnt_cck_fail = ret_value;
226
227 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
228 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
229 falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
230 falsealm_cnt->cnt_rate_illegal +
231 falsealm_cnt->cnt_crc8_fail +
232 falsealm_cnt->cnt_mcs_fail +
233 falsealm_cnt->cnt_cck_fail);
234
235 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
236 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
237 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
238 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
239
240 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
241 "cnt_parity_fail = %d, cnt_rate_illegal = %d, "
242 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
243 falsealm_cnt->cnt_parity_fail,
244 falsealm_cnt->cnt_rate_illegal,
245 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
246
247 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
248 "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
249 falsealm_cnt->cnt_ofdm_fail,
250 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
251}
252
253static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
254{
255 struct rtl_priv *rtlpriv = rtl_priv(hw);
256 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
257 u8 value_igi = dm_digtable->cur_igvalue;
258
259 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
260 value_igi--;
261 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
262 value_igi += 0;
263 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
264 value_igi++;
265 else
266 value_igi += 2;
267
268 value_igi = clamp(value_igi, (u8)DM_DIG_FA_LOWER, (u8)DM_DIG_FA_UPPER);
269 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
270 value_igi = 0x32;
271
272 dm_digtable->cur_igvalue = value_igi;
273 rtl8723ae_dm_write_dig(hw);
274}
275
276static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
277{
278 struct rtl_priv *rtlpriv = rtl_priv(hw);
279 struct dig_t *dgtbl = &rtlpriv->dm_digtable;
280
281 if (rtlpriv->falsealm_cnt.cnt_all > dgtbl->fa_highthresh) {
282 if ((dgtbl->back_val - 2) < dgtbl->back_range_min)
283 dgtbl->back_val = dgtbl->back_range_min;
284 else
285 dgtbl->back_val -= 2;
286 } else if (rtlpriv->falsealm_cnt.cnt_all < dgtbl->fa_lowthresh) {
287 if ((dgtbl->back_val + 2) > dgtbl->back_range_max)
288 dgtbl->back_val = dgtbl->back_range_max;
289 else
290 dgtbl->back_val += 2;
291 }
292
293 if ((dgtbl->rssi_val_min + 10 - dgtbl->back_val) >
294 dgtbl->rx_gain_range_max)
295 dgtbl->cur_igvalue = dgtbl->rx_gain_range_max;
296 else if ((dgtbl->rssi_val_min + 10 -
297 dgtbl->back_val) < dgtbl->rx_gain_range_min)
298 dgtbl->cur_igvalue = dgtbl->rx_gain_range_min;
299 else
300 dgtbl->cur_igvalue = dgtbl->rssi_val_min + 10 - dgtbl->back_val;
301
302 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
303 "rssi_val_min = %x back_val %x\n",
304 dgtbl->rssi_val_min, dgtbl->back_val);
305
306 rtl8723ae_dm_write_dig(hw);
307}
308
309static void rtl8723ae_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
310{
311 struct rtl_priv *rtlpriv = rtl_priv(hw);
312 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
313 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
314 long rssi_strength = rtlpriv->dm.entry_min_undec_sm_pwdb;
315 bool multi_sta = false;
316
317 if (mac->opmode == NL80211_IFTYPE_ADHOC)
318 multi_sta = true;
319
320 if ((!multi_sta) ||
321 (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT)) {
322 rtlpriv->initialized = false;
323 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
324 return;
325 } else if (!rtlpriv->initialized) {
326 rtlpriv->initialized = true;
327 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
328 dm_digtable->cur_igvalue = 0x20;
329 rtl8723ae_dm_write_dig(hw);
330 }
331
332 if (dm_digtable->curmultista_cstate == DIG_MULTISTA_CONNECT) {
333 if ((rssi_strength < dm_digtable->rssi_lowthresh) &&
334 (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
335
336 if (dm_digtable->dig_ext_port_stage ==
337 DIG_EXT_PORT_STAGE_2) {
338 dm_digtable->cur_igvalue = 0x20;
339 rtl8723ae_dm_write_dig(hw);
340 }
341
342 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
343 } else if (rssi_strength > dm_digtable->rssi_highthresh) {
344 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
345 rtl92c_dm_ctrl_initgain_by_fa(hw);
346 }
347 } else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
348 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
349 dm_digtable->cur_igvalue = 0x20;
350 rtl8723ae_dm_write_dig(hw);
351 }
352
353 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
354 "curmultista_cstate = %x dig_ext_port_stage %x\n",
355 dm_digtable->curmultista_cstate,
356 dm_digtable->dig_ext_port_stage);
357}
358
359static void rtl8723ae_dm_initial_gain_sta(struct ieee80211_hw *hw)
360{
361 struct rtl_priv *rtlpriv = rtl_priv(hw);
362 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
363
364 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
365 "presta_cstate = %x, cursta_cstate = %x\n",
366 dm_digtable->presta_cstate,
367 dm_digtable->cursta_cstate);
368
369 if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
370 dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
371 dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
372
373 if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) {
374 dm_digtable->rssi_val_min = rtl_init_gain_min_pwdb(hw);
375 rtl92c_dm_ctrl_initgain_by_rssi(hw);
376 }
377 } else {
378 dm_digtable->rssi_val_min = 0;
379 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
380 dm_digtable->back_val = DM_DIG_BACKOFF_DEFAULT;
381 dm_digtable->cur_igvalue = 0x20;
382 dm_digtable->pre_igvalue = 0;
383 rtl8723ae_dm_write_dig(hw);
384 }
385}
386static void rtl8723ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
387{
388 struct rtl_priv *rtlpriv = rtl_priv(hw);
389 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
390
391 if (dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
392 dm_digtable->rssi_val_min = rtl_init_gain_min_pwdb(hw);
393
394 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
395 if (dm_digtable->rssi_val_min <= 25)
396 dm_digtable->cur_cck_pd_state =
397 CCK_PD_STAGE_LowRssi;
398 else
399 dm_digtable->cur_cck_pd_state =
400 CCK_PD_STAGE_HighRssi;
401 } else {
402 if (dm_digtable->rssi_val_min <= 20)
403 dm_digtable->cur_cck_pd_state =
404 CCK_PD_STAGE_LowRssi;
405 else
406 dm_digtable->cur_cck_pd_state =
407 CCK_PD_STAGE_HighRssi;
408 }
409 } else {
410 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
411 }
412
413 if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
414 if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
415 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
416 dm_digtable->cur_cck_fa_state =
417 CCK_FA_STAGE_High;
418 else
419 dm_digtable->cur_cck_fa_state =
420 CCK_FA_STAGE_Low;
421
422 if (dm_digtable->pre_cck_fa_state !=
423 dm_digtable->cur_cck_fa_state) {
424 if (dm_digtable->cur_cck_fa_state ==
425 CCK_FA_STAGE_Low)
426 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
427 0x83);
428 else
429 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
430 0xcd);
431
432 dm_digtable->pre_cck_fa_state =
433 dm_digtable->cur_cck_fa_state;
434 }
435
436 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
437
438 } else {
439 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
440 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
441
442 }
443 dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
444 }
445
446 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
447 "CCKPDStage=%x\n", dm_digtable->cur_cck_pd_state);
448
449}
450
451static void rtl8723ae_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
452{
453 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
454 struct rtl_priv *rtlpriv = rtl_priv(hw);
455 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
456
457 if (mac->act_scanning == true)
458 return;
459
460 if (mac->link_state >= MAC80211_LINKED)
461 dm_digtable->cursta_cstate = DIG_STA_CONNECT;
462 else
463 dm_digtable->cursta_cstate = DIG_STA_DISCONNECT;
464
465 rtl8723ae_dm_initial_gain_sta(hw);
466 rtl8723ae_dm_initial_gain_multi_sta(hw);
467 rtl8723ae_dm_cck_packet_detection_thresh(hw);
468
469 dm_digtable->presta_cstate = dm_digtable->cursta_cstate;
470
471}
472
473static void rtl8723ae_dm_dig(struct ieee80211_hw *hw)
474{
475 struct rtl_priv *rtlpriv = rtl_priv(hw);
476 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
477
478 if (rtlpriv->dm.dm_initialgain_enable == false)
479 return;
480 if (dm_digtable->dig_enable_flag == false)
481 return;
482
483 rtl8723ae_dm_ctrl_initgain_by_twoport(hw);
484}
485
486static void rtl8723ae_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
487{
488 struct rtl_priv *rtlpriv = rtl_priv(hw);
489
490 rtlpriv->dm.dynamic_txpower_enable = false;
491
492 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
493 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
494}
495
496static void rtl8723ae_dm_dynamic_txpower(struct ieee80211_hw *hw)
497{
498 struct rtl_priv *rtlpriv = rtl_priv(hw);
499 struct rtl_phy *rtlphy = &(rtlpriv->phy);
500 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
501 long undec_sm_pwdb;
502
503 if (!rtlpriv->dm.dynamic_txpower_enable)
504 return;
505
506 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
507 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
508 return;
509 }
510
511 if ((mac->link_state < MAC80211_LINKED) &&
512 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
513 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
514 "Not connected\n");
515
516 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
517
518 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
519 return;
520 }
521
522 if (mac->link_state >= MAC80211_LINKED) {
523 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
524 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
525 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
526 "AP Client PWDB = 0x%lx\n",
527 undec_sm_pwdb);
528 } else {
529 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
530 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
531 "STA Default Port PWDB = 0x%lx\n",
532 undec_sm_pwdb);
533 }
534 } else {
535 undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
536
537 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
538 "AP Ext Port PWDB = 0x%lx\n",
539 undec_sm_pwdb);
540 }
541
542 if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
543 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
544 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
545 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
546 } else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
547 (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
548 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
549 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
550 "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
551 } else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
552 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
553 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
554 "TXHIGHPWRLEVEL_NORMAL\n");
555 }
556
557 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
558 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
559 "PHY_SetTxPowerLevel8192S() Channel = %d\n",
560 rtlphy->current_channel);
561 rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel);
562 }
563
564 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
565}
566
567void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw)
568{
569 struct rtl_priv *rtlpriv = rtl_priv(hw);
570 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
571
572 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
573 "cur_igvalue = 0x%x, "
574 "pre_igvalue = 0x%x, back_val = %d\n",
575 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
576 dm_digtable->back_val);
577
578 if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
579 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
580 dm_digtable->cur_igvalue);
581 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
582 dm_digtable->cur_igvalue);
583
584 dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
585 }
586}
587
588static void rtl8723ae_dm_pwdmonitor(struct ieee80211_hw *hw)
589{
590}
591
592void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw)
593{
594 struct rtl_priv *rtlpriv = rtl_priv(hw);
595
596 rtlpriv->dm.current_turbo_edca = false;
597 rtlpriv->dm.is_any_nonbepkts = false;
598 rtlpriv->dm.is_cur_rdlstate = false;
599}
600
601static void rtl8723ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
602{
603 struct rtl_priv *rtlpriv = rtl_priv(hw);
604 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
605 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
606
607 u64 cur_txok_cnt = 0;
608 u64 cur_rxok_cnt = 0;
609 u32 edca_be_ul = 0x5ea42b;
610 u32 edca_be_dl = 0x5ea42b;
611 bool bt_change_edca = false;
612
613 if ((mac->last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
614 (mac->last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
615 rtlpriv->dm.current_turbo_edca = false;
616 mac->last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
617 mac->last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
618 }
619
620 if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
621 edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
622 bt_change_edca = true;
623 }
624
625 if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
626 edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
627 bt_change_edca = true;
628 }
629
630 if (mac->link_state != MAC80211_LINKED) {
631 rtlpriv->dm.current_turbo_edca = false;
632 return;
633 }
634
635 if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
636 if (!(edca_be_ul & 0xffff0000))
637 edca_be_ul |= 0x005e0000;
638
639 if (!(edca_be_dl & 0xffff0000))
640 edca_be_dl |= 0x005e0000;
641 }
642
643 if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) &&
644 (!rtlpriv->dm.disable_framebursting))) {
645
646 cur_txok_cnt = rtlpriv->stats.txbytesunicast -
647 mac->last_txok_cnt;
648 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast -
649 mac->last_rxok_cnt;
650
651 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
652 if (!rtlpriv->dm.is_cur_rdlstate ||
653 !rtlpriv->dm.current_turbo_edca) {
654 rtl_write_dword(rtlpriv,
655 REG_EDCA_BE_PARAM,
656 edca_be_dl);
657 rtlpriv->dm.is_cur_rdlstate = true;
658 }
659 } else {
660 if (rtlpriv->dm.is_cur_rdlstate ||
661 !rtlpriv->dm.current_turbo_edca) {
662 rtl_write_dword(rtlpriv,
663 REG_EDCA_BE_PARAM,
664 edca_be_ul);
665 rtlpriv->dm.is_cur_rdlstate = false;
666 }
667 }
668 rtlpriv->dm.current_turbo_edca = true;
669 } else {
670 if (rtlpriv->dm.current_turbo_edca) {
671 u8 tmp = AC0_BE;
672 rtlpriv->cfg->ops->set_hw_reg(hw,
673 HW_VAR_AC_PARAM,
674 (u8 *) (&tmp));
675 rtlpriv->dm.current_turbo_edca = false;
676 }
677 }
678
679 rtlpriv->dm.is_any_nonbepkts = false;
680 mac->last_txok_cnt = rtlpriv->stats.txbytesunicast;
681 mac->last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
682}
683
684static void rtl8723ae_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
685{
686 struct rtl_priv *rtlpriv = rtl_priv(hw);
687
688 rtlpriv->dm.txpower_tracking = true;
689 rtlpriv->dm.txpower_trackinginit = false;
690
691 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
692 "pMgntInfo->txpower_tracking = %d\n",
693 rtlpriv->dm.txpower_tracking);
694}
695
696void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
697{
698 struct rtl_priv *rtlpriv = rtl_priv(hw);
699 struct rate_adaptive *p_ra = &(rtlpriv->ra);
700
701 p_ra->ratr_state = DM_RATR_STA_INIT;
702 p_ra->pre_ratr_state = DM_RATR_STA_INIT;
703
704 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
705 rtlpriv->dm.useramask = true;
706 else
707 rtlpriv->dm.useramask = false;
708}
709
710static void rtl8723ae_dm_init_dynamic_bpowersaving(struct ieee80211_hw *hw)
711{
712 struct rtl_priv *rtlpriv = rtl_priv(hw);
713
714 rtlpriv->dm_pstable.pre_ccastate = CCA_MAX;
715 rtlpriv->dm_pstable.cur_ccasate = CCA_MAX;
716 rtlpriv->dm_pstable.pre_rfstate = RF_MAX;
717 rtlpriv->dm_pstable.cur_rfstate = RF_MAX;
718 rtlpriv->dm_pstable.rssi_val_min = 0;
719}
720
721void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 force_in_normal)
722{
723 struct rtl_priv *rtlpriv = rtl_priv(hw);
724 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
725
726 if (!rtlpriv->reg_init) {
727 rtlpriv->reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
728 MASKDWORD) & 0x1CC000) >> 14;
729
730 rtlpriv->reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
731 MASKDWORD) & BIT(3)) >> 3;
732
733 rtlpriv->reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
734 MASKDWORD) & 0xFF000000) >> 24;
735
736 rtlpriv->reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) &
737 0xF000) >> 12;
738
739 rtlpriv->reg_init = true;
740 }
741
742 if (!force_in_normal) {
743 if (dm_pstable->rssi_val_min != 0) {
744 if (dm_pstable->pre_rfstate == RF_NORMAL) {
745 if (dm_pstable->rssi_val_min >= 30)
746 dm_pstable->cur_rfstate = RF_SAVE;
747 else
748 dm_pstable->cur_rfstate = RF_NORMAL;
749 } else {
750 if (dm_pstable->rssi_val_min <= 25)
751 dm_pstable->cur_rfstate = RF_NORMAL;
752 else
753 dm_pstable->cur_rfstate = RF_SAVE;
754 }
755 } else {
756 dm_pstable->cur_rfstate = RF_MAX;
757 }
758 } else {
759 dm_pstable->cur_rfstate = RF_NORMAL;
760 }
761
762 if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) {
763 if (dm_pstable->cur_rfstate == RF_SAVE) {
764
765 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
766 BIT(5), 0x1);
767 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
768 0x1C0000, 0x2);
769 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
770 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
771 0xFF000000, 0x63);
772 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
773 0xC000, 0x2);
774 rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
775 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
776 rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
777 } else {
778 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
779 0x1CC000, rtlpriv->reg_874);
780 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
781 rtlpriv->reg_c70);
782 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
783 rtlpriv->reg_85c);
784 rtl_set_bbreg(hw, 0xa74, 0xF000, rtlpriv->reg_a74);
785 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
786 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
787 BIT(5), 0x0);
788 }
789
790 dm_pstable->pre_rfstate = dm_pstable->cur_rfstate;
791 }
792}
793
794static void rtl8723ae_dm_dynamic_bpowersaving(struct ieee80211_hw *hw)
795{
796 struct rtl_priv *rtlpriv = rtl_priv(hw);
797 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
798 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
799
800 if (((mac->link_state == MAC80211_NOLINK)) &&
801 (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
802 dm_pstable->rssi_val_min = 0;
803 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
804 "Not connected to any\n");
805 }
806
807 if (mac->link_state == MAC80211_LINKED) {
808 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
809 dm_pstable->rssi_val_min =
810 rtlpriv->dm.entry_min_undec_sm_pwdb;
811 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
812 "AP Client PWDB = 0x%lx\n",
813 dm_pstable->rssi_val_min);
814 } else {
815 dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
816 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
817 "STA Default Port PWDB = 0x%lx\n",
818 dm_pstable->rssi_val_min);
819 }
820 } else {
821 dm_pstable->rssi_val_min = rtlpriv->dm.entry_min_undec_sm_pwdb;
822
823 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
824 "AP Ext Port PWDB = 0x%lx\n",
825 dm_pstable->rssi_val_min);
826 }
827
828 rtl8723ae_dm_rf_saving(hw, false);
829}
830
831void rtl8723ae_dm_init(struct ieee80211_hw *hw)
832{
833 struct rtl_priv *rtlpriv = rtl_priv(hw);
834
835 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
836 rtl8723ae_dm_diginit(hw);
837 rtl8723ae_dm_init_dynamic_txpower(hw);
838 rtl8723ae_dm_init_edca_turbo(hw);
839 rtl8723ae_dm_init_rate_adaptive_mask(hw);
840 rtl8723ae_dm_initialize_txpower_tracking(hw);
841 rtl8723ae_dm_init_dynamic_bpowersaving(hw);
842}
843
844void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw)
845{
846 struct rtl_priv *rtlpriv = rtl_priv(hw);
847 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
848 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
849 bool fw_current_inpsmode = false;
850 bool fw_ps_awake = true;
851 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
852 (u8 *) (&fw_current_inpsmode));
853 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
854 (u8 *) (&fw_ps_awake));
855
856 if ((ppsc->rfpwr_state == ERFON) &&
857 ((!fw_current_inpsmode) && fw_ps_awake) &&
858 (!ppsc->rfchange_inprogress)) {
859 rtl8723ae_dm_pwdmonitor(hw);
860 rtl8723ae_dm_dig(hw);
861 rtl8723ae_dm_false_alarm_counter_statistics(hw);
862 rtl8723ae_dm_dynamic_bpowersaving(hw);
863 rtl8723ae_dm_dynamic_txpower(hw);
864 /* rtl92c_dm_refresh_rate_adaptive_mask(hw); */
865 rtl8723ae_dm_bt_coexist(hw);
866 rtl8723ae_dm_check_edca_turbo(hw);
867 }
868 if (rtlpcipriv->bt_coexist.init_set)
869 rtl_write_byte(rtlpriv, 0x76e, 0xc);
870}
871
872static void rtl8723ae_dm_init_bt_coexist(struct ieee80211_hw *hw)
873{
874 struct rtl_priv *rtlpriv = rtl_priv(hw);
875 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
876
877 rtlpcipriv->bt_coexist.bt_rfreg_origin_1e
878 = rtl_get_rfreg(hw, (enum radio_path)0, RF_RCK1, 0xfffff);
879 rtlpcipriv->bt_coexist.bt_rfreg_origin_1f
880 = rtl_get_rfreg(hw, (enum radio_path)0, RF_RCK2, 0xf0);
881
882 rtlpcipriv->bt_coexist.cstate = 0;
883 rtlpcipriv->bt_coexist.previous_state = 0;
884 rtlpcipriv->bt_coexist.cstate_h = 0;
885 rtlpcipriv->bt_coexist.previous_state_h = 0;
886 rtlpcipriv->bt_coexist.lps_counter = 0;
887
888 /* Enable counter statistics */
889 rtl_write_byte(rtlpriv, 0x76e, 0x4);
890 rtl_write_byte(rtlpriv, 0x778, 0x3);
891 rtl_write_byte(rtlpriv, 0x40, 0x20);
892
893 rtlpcipriv->bt_coexist.init_set = true;
894}
895
896void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw)
897{
898 struct rtl_priv *rtlpriv = rtl_priv(hw);
899 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
900 u8 tmp_byte = 0;
901 if (!rtlpcipriv->bt_coexist.bt_coexistence) {
902 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
903 "[DM]{BT], BT not exist!!\n");
904 return;
905 }
906
907 if (!rtlpcipriv->bt_coexist.init_set) {
908 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
909 "[DM][BT], rtl8723ae_dm_bt_coexist()\n");
910
911 rtl8723ae_dm_init_bt_coexist(hw);
912 }
913
914 tmp_byte = rtl_read_byte(rtlpriv, 0x40);
915 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
916 "[DM][BT], 0x40 is 0x%x", tmp_byte);
917 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
918 "[DM][BT], bt_dm_coexist start");
919 rtl8723ae_dm_bt_coexist_8723(hw);
920}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
new file mode 100644
index 000000000000..39d246196247
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/dm.h
@@ -0,0 +1,149 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 ****************************************************************************
29 */
30
31#ifndef __RTL8723E_DM_H__
32#define __RTL8723E_DM_H__
33
34#define HAL_DM_HIPWR_DISABLE BIT(1)
35
36#define OFDM_TABLE_SIZE 37
37#define CCK_TABLE_SIZE 33
38
39#define DM_DIG_THRESH_HIGH 40
40#define DM_DIG_THRESH_LOW 35
41
42#define DM_FALSEALARM_THRESH_LOW 400
43#define DM_FALSEALARM_THRESH_HIGH 1000
44
45#define DM_DIG_MAX 0x3e
46#define DM_DIG_MIN 0x1e
47
48#define DM_DIG_FA_UPPER 0x32
49#define DM_DIG_FA_LOWER 0x20
50#define DM_DIG_FA_TH0 0x20
51#define DM_DIG_FA_TH1 0x100
52#define DM_DIG_FA_TH2 0x200
53
54#define DM_DIG_BACKOFF_MAX 12
55#define DM_DIG_BACKOFF_MIN -4
56#define DM_DIG_BACKOFF_DEFAULT 10
57
58#define DM_RATR_STA_INIT 0
59
60#define TXHIGHPWRLEVEL_NORMAL 0
61#define TXHIGHPWRLEVEL_LEVEL1 1
62#define TXHIGHPWRLEVEL_LEVEL2 2
63#define TXHIGHPWRLEVEL_BT1 3
64#define TXHIGHPWRLEVEL_BT2 4
65
66#define DM_TYPE_BYDRIVER 1
67
68#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
69#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
70
71struct swat_t {
72 u8 failure_cnt;
73 u8 try_flag;
74 u8 stop_trying;
75 long pre_rssi;
76 long trying_threshold;
77 u8 cur_antenna;
78 u8 pre_antenna;
79};
80
81enum tag_dynamic_init_gain_operation_type_definition {
82 DIG_TYPE_THRESH_HIGH = 0,
83 DIG_TYPE_THRESH_LOW = 1,
84 DIG_TYPE_BACKOFF = 2,
85 DIG_TYPE_RX_GAIN_MIN = 3,
86 DIG_TYPE_RX_GAIN_MAX = 4,
87 DIG_TYPE_ENABLE = 5,
88 DIG_TYPE_DISABLE = 6,
89 DIG_OP_TYPE_MAX
90};
91
92enum tag_cck_packet_detection_threshold_type_definition {
93 CCK_PD_STAGE_LowRssi = 0,
94 CCK_PD_STAGE_HighRssi = 1,
95 CCK_FA_STAGE_Low = 2,
96 CCK_FA_STAGE_High = 3,
97 CCK_PD_STAGE_MAX = 4,
98};
99
100enum dm_1r_cca_e {
101 CCA_1R = 0,
102 CCA_2R = 1,
103 CCA_MAX = 2,
104};
105
106enum dm_rf_e {
107 RF_SAVE = 0,
108 RF_NORMAL = 1,
109 RF_MAX = 2,
110};
111
112enum dm_sw_ant_switch_e {
113 ANS_ANTENNA_B = 1,
114 ANS_ANTENNA_A = 2,
115 ANS_ANTENNA_MAX = 3,
116};
117
118enum dm_dig_ext_port_alg_e {
119 DIG_EXT_PORT_STAGE_0 = 0,
120 DIG_EXT_PORT_STAGE_1 = 1,
121 DIG_EXT_PORT_STAGE_2 = 2,
122 DIG_EXT_PORT_STAGE_3 = 3,
123 DIG_EXT_PORT_STAGE_MAX = 4,
124};
125
126enum dm_dig_connect_e {
127 DIG_STA_DISCONNECT = 0,
128 DIG_STA_CONNECT = 1,
129 DIG_STA_BEFORE_CONNECT = 2,
130 DIG_MULTISTA_DISCONNECT = 3,
131 DIG_MULTISTA_CONNECT = 4,
132 DIG_CONNECT_MAX
133};
134
135#define GET_UNDECORATED_AVERAGE_RSSI(_priv) \
136 ((((struct rtl_priv *)(_priv))->mac80211.opmode == \
137 NL80211_IFTYPE_ADHOC) ? \
138 (((struct rtl_priv *)(_priv))->dm.entry_min_undec_sm_pwdb) \
139 : (((struct rtl_priv *)(_priv))->dm.undec_sm_pwdb))
140
141void rtl8723ae_dm_init(struct ieee80211_hw *hw);
142void rtl8723ae_dm_watchdog(struct ieee80211_hw *hw);
143void rtl8723ae_dm_write_dig(struct ieee80211_hw *hw);
144void rtl8723ae_dm_init_edca_turbo(struct ieee80211_hw *hw);
145void rtl8723ae_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
146void rtl8723ae_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
147void rtl8723ae_dm_bt_coexist(struct ieee80211_hw *hw);
148
149#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
new file mode 100644
index 000000000000..f55b1767ef57
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.c
@@ -0,0 +1,745 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 ****************************************************************************
29 */
30
31#include "../wifi.h"
32#include "../pci.h"
33#include "../base.h"
34#include "reg.h"
35#include "def.h"
36#include "fw.h"
37
38static void _rtl8723ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
39{
40 struct rtl_priv *rtlpriv = rtl_priv(hw);
41 u8 tmp;
42 if (enable) {
43 tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
44 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
45
46 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
47 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
48
49 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
50 rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
51 } else {
52 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
53 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
54
55 rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
56 }
57}
58
59static void _rtl8723ae_fw_block_write(struct ieee80211_hw *hw,
60 const u8 *buffer, u32 size)
61{
62 struct rtl_priv *rtlpriv = rtl_priv(hw);
63 u32 blockSize = sizeof(u32);
64 u8 *bufferPtr = (u8 *) buffer;
65 u32 *pu4BytePtr = (u32 *) buffer;
66 u32 i, offset, blockCount, remainSize;
67
68 blockCount = size / blockSize;
69 remainSize = size % blockSize;
70
71 for (i = 0; i < blockCount; i++) {
72 offset = i * blockSize;
73 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
74 *(pu4BytePtr + i));
75 }
76
77 if (remainSize) {
78 offset = blockCount * blockSize;
79 bufferPtr += offset;
80 for (i = 0; i < remainSize; i++) {
81 rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
82 offset + i), *(bufferPtr + i));
83 }
84 }
85}
86
87static void _rtl8723ae_fw_page_write(struct ieee80211_hw *hw,
88 u32 page, const u8 *buffer, u32 size)
89{
90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91 u8 value8;
92 u8 u8page = (u8) (page & 0x07);
93
94 value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
95
96 rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
97 _rtl8723ae_fw_block_write(hw, buffer, size);
98}
99
100static void _rtl8723ae_write_fw(struct ieee80211_hw *hw,
101 enum version_8723e version, u8 *buffer,
102 u32 size)
103{
104 struct rtl_priv *rtlpriv = rtl_priv(hw);
105 u8 *bufferPtr = (u8 *) buffer;
106 u32 page_nums, remain_size;
107 u32 page, offset;
108
109 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
110
111 page_nums = size / FW_8192C_PAGE_SIZE;
112 remain_size = size % FW_8192C_PAGE_SIZE;
113
114 if (page_nums > 6) {
115 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
116 "Page numbers should not be greater then 6\n");
117 }
118
119 for (page = 0; page < page_nums; page++) {
120 offset = page * FW_8192C_PAGE_SIZE;
121 _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
122 FW_8192C_PAGE_SIZE);
123 }
124
125 if (remain_size) {
126 offset = page_nums * FW_8192C_PAGE_SIZE;
127 page = page_nums;
128 _rtl8723ae_fw_page_write(hw, page, (bufferPtr + offset),
129 remain_size);
130 }
131
132 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
133}
134
135static int _rtl8723ae_fw_free_to_go(struct ieee80211_hw *hw)
136{
137 struct rtl_priv *rtlpriv = rtl_priv(hw);
138 int err = -EIO;
139 u32 counter = 0;
140 u32 value32;
141
142 do {
143 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
144 } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
145 (!(value32 & FWDL_ChkSum_rpt)));
146
147 if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
148 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
149 "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
150 value32);
151 goto exit;
152 }
153
154 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
155 "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
156
157 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
158 value32 |= MCUFWDL_RDY;
159 value32 &= ~WINTINI_RDY;
160 rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
161
162 counter = 0;
163
164 do {
165 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
166 if (value32 & WINTINI_RDY) {
167 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
168 "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
169 value32);
170 err = 0;
171 goto exit;
172 }
173
174 mdelay(FW_8192C_POLLING_DELAY);
175
176 } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
177
178 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
179 "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
180
181exit:
182 return err;
183}
184
185int rtl8723ae_download_fw(struct ieee80211_hw *hw)
186{
187 struct rtl_priv *rtlpriv = rtl_priv(hw);
188 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
189 struct rtl8723ae_firmware_header *pfwheader;
190 u8 *pfwdata;
191 u32 fwsize;
192 int err;
193 enum version_8723e version = rtlhal->version;
194
195 if (!rtlhal->pfirmware)
196 return 1;
197
198 pfwheader = (struct rtl8723ae_firmware_header *)rtlhal->pfirmware;
199 pfwdata = (u8 *) rtlhal->pfirmware;
200 fwsize = rtlhal->fwsize;
201
202 if (IS_FW_HEADER_EXIST(pfwheader)) {
203 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
204 "Firmware Version(%d), Signature(%#x),Size(%d)\n",
205 pfwheader->version, pfwheader->signature,
206 (int)sizeof(struct rtl8723ae_firmware_header));
207
208 pfwdata = pfwdata + sizeof(struct rtl8723ae_firmware_header);
209 fwsize = fwsize - sizeof(struct rtl8723ae_firmware_header);
210 }
211
212 if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
213 rtl8723ae_firmware_selfreset(hw);
214 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
215 }
216 _rtl8723ae_enable_fw_download(hw, true);
217 _rtl8723ae_write_fw(hw, version, pfwdata, fwsize);
218 _rtl8723ae_enable_fw_download(hw, false);
219
220 err = _rtl8723ae_fw_free_to_go(hw);
221 if (err) {
222 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
223 "Firmware is not ready to run!\n");
224 } else {
225 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
226 "Firmware is ready to run!\n");
227 }
228 return 0;
229}
230
231static bool rtl8723ae_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
232{
233 struct rtl_priv *rtlpriv = rtl_priv(hw);
234 u8 val_hmetfr, val_mcutst_1;
235 bool result = false;
236
237 val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
238 val_mcutst_1 = rtl_read_byte(rtlpriv, (REG_MCUTST_1 + boxnum));
239
240 if (((val_hmetfr >> boxnum) & BIT(0)) == 0 && val_mcutst_1 == 0)
241 result = true;
242 return result;
243}
244
245static void _rtl8723ae_fill_h2c_command(struct ieee80211_hw *hw,
246 u8 element_id, u32 cmd_len,
247 u8 *p_cmdbuffer)
248{
249 struct rtl_priv *rtlpriv = rtl_priv(hw);
250 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
251 u8 boxnum;
252 u16 box_reg = 0, box_extreg = 0;
253 u8 u1tmp;
254 bool isfw_rd = false;
255 bool bwrite_sucess = false;
256 u8 wait_h2c_limmit = 100;
257 u8 wait_writeh2c_limmit = 100;
258 u8 boxcontent[4], boxextcontent[2];
259 u32 h2c_waitcounter = 0;
260 unsigned long flag;
261 u8 idx;
262
263 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
264
265 while (true) {
266 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
267 if (rtlhal->h2c_setinprogress) {
268 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
269 "H2C set in progress! Wait to set..element_id(%d).\n",
270 element_id);
271
272 while (rtlhal->h2c_setinprogress) {
273 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
274 flag);
275 h2c_waitcounter++;
276 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
277 "Wait 100 us (%d times)...\n",
278 h2c_waitcounter);
279 udelay(100);
280
281 if (h2c_waitcounter > 1000)
282 return;
283 spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
284 flag);
285 }
286 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
287 } else {
288 rtlhal->h2c_setinprogress = true;
289 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
290 break;
291 }
292 }
293
294 while (!bwrite_sucess) {
295 wait_writeh2c_limmit--;
296 if (wait_writeh2c_limmit == 0) {
297 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
298 "Write H2C fail because no trigger "
299 "for FW INT!\n");
300 break;
301 }
302
303 boxnum = rtlhal->last_hmeboxnum;
304 switch (boxnum) {
305 case 0:
306 box_reg = REG_HMEBOX_0;
307 box_extreg = REG_HMEBOX_EXT_0;
308 break;
309 case 1:
310 box_reg = REG_HMEBOX_1;
311 box_extreg = REG_HMEBOX_EXT_1;
312 break;
313 case 2:
314 box_reg = REG_HMEBOX_2;
315 box_extreg = REG_HMEBOX_EXT_2;
316 break;
317 case 3:
318 box_reg = REG_HMEBOX_3;
319 box_extreg = REG_HMEBOX_EXT_3;
320 break;
321 default:
322 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
323 "switch case not processed\n");
324 break;
325 }
326
327 isfw_rd = rtl8723ae_check_fw_read_last_h2c(hw, boxnum);
328 while (!isfw_rd) {
329
330 wait_h2c_limmit--;
331 if (wait_h2c_limmit == 0) {
332 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
333 "Wating too long for FW read clear HMEBox(%d)!\n",
334 boxnum);
335 break;
336 }
337
338 udelay(10);
339
340 isfw_rd = rtl8723ae_check_fw_read_last_h2c(hw, boxnum);
341 u1tmp = rtl_read_byte(rtlpriv, 0x1BF);
342 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
343 "Wating for FW read clear HMEBox(%d)!!! "
344 "0x1BF = %2x\n", boxnum, u1tmp);
345 }
346
347 if (!isfw_rd) {
348 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
349 "Write H2C register BOX[%d] fail!!!!! "
350 "Fw do not read.\n", boxnum);
351 break;
352 }
353
354 memset(boxcontent, 0, sizeof(boxcontent));
355 memset(boxextcontent, 0, sizeof(boxextcontent));
356 boxcontent[0] = element_id;
357 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
358 "Write element_id box_reg(%4x) = %2x\n",
359 box_reg, element_id);
360
361 switch (cmd_len) {
362 case 1:
363 boxcontent[0] &= ~(BIT(7));
364 memcpy((u8 *) (boxcontent) + 1,
365 p_cmdbuffer, 1);
366
367 for (idx = 0; idx < 4; idx++) {
368 rtl_write_byte(rtlpriv, box_reg + idx,
369 boxcontent[idx]);
370 }
371 break;
372 case 2:
373 boxcontent[0] &= ~(BIT(7));
374 memcpy((u8 *) (boxcontent) + 1,
375 p_cmdbuffer, 2);
376
377 for (idx = 0; idx < 4; idx++) {
378 rtl_write_byte(rtlpriv, box_reg + idx,
379 boxcontent[idx]);
380 }
381 break;
382 case 3:
383 boxcontent[0] &= ~(BIT(7));
384 memcpy((u8 *) (boxcontent) + 1,
385 p_cmdbuffer, 3);
386
387 for (idx = 0; idx < 4; idx++) {
388 rtl_write_byte(rtlpriv, box_reg + idx,
389 boxcontent[idx]);
390 }
391 break;
392 case 4:
393 boxcontent[0] |= (BIT(7));
394 memcpy((u8 *) (boxextcontent),
395 p_cmdbuffer, 2);
396 memcpy((u8 *) (boxcontent) + 1,
397 p_cmdbuffer + 2, 2);
398
399 for (idx = 0; idx < 2; idx++) {
400 rtl_write_byte(rtlpriv, box_extreg + idx,
401 boxextcontent[idx]);
402 }
403
404 for (idx = 0; idx < 4; idx++) {
405 rtl_write_byte(rtlpriv, box_reg + idx,
406 boxcontent[idx]);
407 }
408 break;
409 case 5:
410 boxcontent[0] |= (BIT(7));
411 memcpy((u8 *) (boxextcontent),
412 p_cmdbuffer, 2);
413 memcpy((u8 *) (boxcontent) + 1,
414 p_cmdbuffer + 2, 3);
415
416 for (idx = 0; idx < 2; idx++) {
417 rtl_write_byte(rtlpriv, box_extreg + idx,
418 boxextcontent[idx]);
419 }
420
421 for (idx = 0; idx < 4; idx++) {
422 rtl_write_byte(rtlpriv, box_reg + idx,
423 boxcontent[idx]);
424 }
425 break;
426 default:
427 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
428 "switch case not process\n");
429 break;
430 }
431
432 bwrite_sucess = true;
433
434 rtlhal->last_hmeboxnum = boxnum + 1;
435 if (rtlhal->last_hmeboxnum == 4)
436 rtlhal->last_hmeboxnum = 0;
437
438 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
439 "pHalData->last_hmeboxnum = %d\n",
440 rtlhal->last_hmeboxnum);
441 }
442
443 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
444 rtlhal->h2c_setinprogress = false;
445 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
446
447 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
448}
449
450void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw,
451 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
452{
453 struct rtl_priv *rtlpriv = rtl_priv(hw);
454 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
455
456 if (rtlhal->fw_ready == false) {
457 RT_ASSERT(false,
458 "return H2C cmd because of Fw download fail!!!\n");
459 return;
460 }
461
462 _rtl8723ae_fill_h2c_command(hw, element_id, cmd_len, p_cmdbuffer);
463 return;
464}
465
466void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw)
467{
468 u8 u1tmp;
469 u8 delay = 100;
470 struct rtl_priv *rtlpriv = rtl_priv(hw);
471
472 rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
473 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
474
475 while (u1tmp & BIT(2)) {
476 delay--;
477 if (delay == 0)
478 break;
479 udelay(50);
480 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
481 }
482 if (delay == 0) {
483 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
484 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, u1tmp&(~BIT(2)));
485 }
486}
487
488void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
489{
490 struct rtl_priv *rtlpriv = rtl_priv(hw);
491 u8 u1_h2c_set_pwrmode[3] = { 0 };
492 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
493
494 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
495
496 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
497 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
498 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
499 ppsc->reg_max_lps_awakeintvl);
500
501 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
502 "rtl8723ae_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
503 u1_h2c_set_pwrmode, 3);
504 rtl8723ae_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
505
506}
507
508static bool _rtl8723ae_cmd_send_packet(struct ieee80211_hw *hw,
509 struct sk_buff *skb)
510{
511 struct rtl_priv *rtlpriv = rtl_priv(hw);
512 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
513 struct rtl8192_tx_ring *ring;
514 struct rtl_tx_desc *pdesc;
515 u8 own;
516 unsigned long flags;
517 struct sk_buff *pskb = NULL;
518
519 ring = &rtlpci->tx_ring[BEACON_QUEUE];
520
521 pskb = __skb_dequeue(&ring->queue);
522 if (pskb)
523 kfree_skb(pskb);
524
525 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
526
527 pdesc = &ring->desc[0];
528 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
529
530 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
531
532 __skb_queue_tail(&ring->queue, skb);
533
534 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
535
536 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
537
538 return true;
539}
540
541static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
542 /* page 0 beacon */
543 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
544 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
545 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08,
546 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
547 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
548 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
549 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
550 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
551 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
552 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
553 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
554 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
555 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
556 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
557 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
558 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
559
560 /* page 1 beacon */
561 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
563 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
564 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
565 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
566 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
567 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
569 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
571 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
572 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
573 0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00,
574 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
575 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
577
578 /* page 2 ps-poll */
579 0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10,
580 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
581 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
582 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
583 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
585 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
587 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
588 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
589 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
590 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
591 0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
593 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
594 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
595
596 /* page 3 null */
597 0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
598 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
599 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
600 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
601 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
602 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
603 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
604 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
605 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
606 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
607 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
609 0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
611 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
612 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
613
614 /* page 4 probe_resp */
615 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
616 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
617 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
618 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
619 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
620 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
621 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
622 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
623 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
624 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
625 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
626 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
627 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
628 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
629 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
630 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
631
632 /* page 5 probe_resp */
633 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
634 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
635 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
636 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
639 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
641 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
643 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
649};
650
651void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
652{
653 struct rtl_priv *rtlpriv = rtl_priv(hw);
654 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
655 struct sk_buff *skb = NULL;
656
657 u32 totalpacketlen;
658 bool rtstatus;
659 u8 u1RsvdPageLoc[3] = { 0 };
660 bool dlok = false;
661
662 u8 *beacon;
663 u8 *p_pspoll;
664 u8 *nullfunc;
665 u8 *p_probersp;
666 /*---------------------------------------------------------
667 (1) beacon
668 ---------------------------------------------------------
669 */
670 beacon = &reserved_page_packet[BEACON_PG * 128];
671 SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
672 SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
673
674 /*-------------------------------------------------------
675 (2) ps-poll
676 --------------------------------------------------------
677 */
678 p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
679 SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
680 SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
681 SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
682
683 SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
684
685 /*--------------------------------------------------------
686 (3) null data
687 ---------------------------------------------------------i
688 */
689 nullfunc = &reserved_page_packet[NULL_PG * 128];
690 SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
691 SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
692 SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
693
694 SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
695
696 /*---------------------------------------------------------
697 (4) probe response
698 ----------------------------------------------------------
699 */
700 p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
701 SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
702 SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
703 SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
704
705 SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
706
707 totalpacketlen = TOTAL_RESERVED_PKT_LEN;
708
709 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
710 "rtl8723ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
711 &reserved_page_packet[0], totalpacketlen);
712 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
713 "rtl8723ae_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
714 u1RsvdPageLoc, 3);
715
716 skb = dev_alloc_skb(totalpacketlen);
717 memcpy((u8 *) skb_put(skb, totalpacketlen),
718 &reserved_page_packet, totalpacketlen);
719
720 rtstatus = _rtl8723ae_cmd_send_packet(hw, skb);
721
722 if (rtstatus)
723 dlok = true;
724
725 if (dlok) {
726 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
727 "Set RSVD page location to Fw.\n");
728 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
729 "H2C_RSVDPAGE:\n",
730 u1RsvdPageLoc, 3);
731 rtl8723ae_fill_h2c_cmd(hw, H2C_RSVDPAGE,
732 sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
733 } else
734 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
735 "Set RSVD page location to Fw FAIL!!!!!!.\n");
736}
737
738void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
739{
740 u8 u1_joinbssrpt_parm[1] = { 0 };
741
742 SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
743
744 rtl8723ae_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
745}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
new file mode 100644
index 000000000000..89994e16dc83
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/fw.h
@@ -0,0 +1,101 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 * Larry Finger <Larry.Finger@lwfinger.net>
26 *
27 ****************************************************************************
28 */
29
30#ifndef __RTL92C__FW__H__
31#define __RTL92C__FW__H__
32
33#define FW_8192C_START_ADDRESS 0x1000
34#define FW_8192C_END_ADDRESS 0x3FFF
35#define FW_8192C_PAGE_SIZE 4096
36#define FW_8192C_POLLING_DELAY 5
37#define FW_8192C_POLLING_TIMEOUT_COUNT 1000
38
39#define BEACON_PG 0
40#define PSPOLL_PG 2
41#define NULL_PG 3
42#define PROBERSP_PG 4 /* ->5 */
43
44#define TOTAL_RESERVED_PKT_LEN 768
45
46#define IS_FW_HEADER_EXIST(_pfwhdr) \
47 ((_pfwhdr->signature&0xFF00) == 0x2300)
48
49struct rtl8723ae_firmware_header {
50 u16 signature;
51 u8 category;
52 u8 function;
53 u16 version;
54 u8 subversion;
55 u8 rsvd1;
56 u8 month;
57 u8 date;
58 u8 hour;
59 u8 minute;
60 u16 ramcodeSize;
61 u16 rsvd2;
62 u32 svnindex;
63 u32 rsvd3;
64 u32 rsvd4;
65 u32 rsvd5;
66};
67
68enum rtl8192c_h2c_cmd {
69 H2C_AP_OFFLOAD = 0,
70 H2C_SETPWRMODE = 1,
71 H2C_JOINBSSRPT = 2,
72 H2C_RSVDPAGE = 3,
73 H2C_RSSI_REPORT = 5,
74 H2C_RA_MASK = 6,
75 MAX_H2CCMD
76};
77
78#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
79 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
80#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
81 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
82#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
83 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
84#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
85 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
86#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
87 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
88#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
89 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
90#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
91 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
92
93int rtl8723ae_download_fw(struct ieee80211_hw *hw);
94void rtl8723ae_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
95 u32 cmd_len, u8 *p_cmdbuffer);
96void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
97void rtl8723ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
98void rtl8723ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
99void rtl8723ae_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
100
101#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
new file mode 100644
index 000000000000..3d092e4b0b7f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -0,0 +1,542 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "hal_bt_coexist.h"
31#include "../pci.h"
32#include "dm.h"
33#include "fw.h"
34#include "phy.h"
35#include "reg.h"
36#include "hal_btc.h"
37
38void rtl8723ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw,
39 bool reject)
40{
41}
42
43void _rtl8723_dm_bt_check_wifi_state(struct ieee80211_hw *hw)
44{
45 struct rtl_priv *rtlpriv = rtl_priv(hw);
46 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
47 struct rtl_phy *rtlphy = &(rtlpriv->phy);
48
49 if (rtlpriv->link_info.busytraffic) {
50 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_IDLE;
51
52 if (rtlpriv->link_info.tx_busy_traffic)
53 rtlpcipriv->bt_coexist.cstate |=
54 BT_COEX_STATE_WIFI_UPLINK;
55 else
56 rtlpcipriv->bt_coexist.cstate &=
57 ~BT_COEX_STATE_WIFI_UPLINK;
58
59 if (rtlpriv->link_info.rx_busy_traffic)
60 rtlpcipriv->bt_coexist.cstate |=
61 BT_COEX_STATE_WIFI_DOWNLINK;
62 else
63 rtlpcipriv->bt_coexist.cstate &=
64 ~BT_COEX_STATE_WIFI_DOWNLINK;
65 } else {
66 rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_WIFI_IDLE;
67 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_UPLINK;
68 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_DOWNLINK;
69 }
70
71 if (rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
72 rtlpriv->mac80211.mode == WIRELESS_MODE_B) {
73 rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_WIFI_LEGACY;
74 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_HT20;
75 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_HT40;
76 } else {
77 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_WIFI_LEGACY;
78 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
79 rtlpcipriv->bt_coexist.cstate |=
80 BT_COEX_STATE_WIFI_HT40;
81 rtlpcipriv->bt_coexist.cstate &=
82 ~BT_COEX_STATE_WIFI_HT20;
83 } else {
84 rtlpcipriv->bt_coexist.cstate |=
85 BT_COEX_STATE_WIFI_HT20;
86 rtlpcipriv->bt_coexist.cstate &=
87 ~BT_COEX_STATE_WIFI_HT40;
88 }
89 }
90
91 if (rtlpriv->bt_operation_on)
92 rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BT30;
93 else
94 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_BT30;
95}
96
97u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
98 u8 level_num, u8 rssi_thresh,
99 u8 rssi_thresh1)
100
101{
102 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
103 struct rtl_priv *rtlpriv = rtl_priv(hw);
104 long smooth;
105 u8 bt_rssi_state = 0;
106
107 smooth = rtl8723ae_dm_bt_get_rx_ss(hw);
108
109 if (level_num == 2) {
110 rtlpcipriv->bt_coexist.cstate &=
111 ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
112
113 if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
114 BT_RSSI_STATE_LOW) ||
115 (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
116 BT_RSSI_STATE_STAY_LOW)) {
117 if (smooth >= (rssi_thresh +
118 BT_FW_COEX_THRESH_TOL)) {
119 bt_rssi_state = BT_RSSI_STATE_HIGH;
120 rtlpcipriv->bt_coexist.cstate |=
121 BT_COEX_STATE_WIFI_RSSI_1_HIGH;
122 rtlpcipriv->bt_coexist.cstate &=
123 ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
124 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
125 "[DM][BT], RSSI_1 state switch to High\n");
126 } else {
127 bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
128 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
129 "[DM][BT], RSSI_1 state stay at Low\n");
130 }
131 } else {
132 if (smooth < rssi_thresh) {
133 bt_rssi_state = BT_RSSI_STATE_LOW;
134 rtlpcipriv->bt_coexist.cstate |=
135 BT_COEX_STATE_WIFI_RSSI_1_LOW;
136 rtlpcipriv->bt_coexist.cstate &=
137 ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
138 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
139 "[DM][BT], RSSI_1 state switch to Low\n");
140 } else {
141 bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
142 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
143 "[DM][BT], RSSI_1 state stay at High\n");
144 }
145 }
146 } else if (level_num == 3) {
147 if (rssi_thresh > rssi_thresh1) {
148 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
149 "[DM][BT], RSSI_1 thresh error!!\n");
150 return rtlpcipriv->bt_coexist.bt_pre_rssi_state;
151 }
152
153 if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
154 BT_RSSI_STATE_LOW) ||
155 (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
156 BT_RSSI_STATE_STAY_LOW)) {
157 if (smooth >=
158 (rssi_thresh+BT_FW_COEX_THRESH_TOL)) {
159 bt_rssi_state = BT_RSSI_STATE_MEDIUM;
160 rtlpcipriv->bt_coexist.cstate |=
161 BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
162 rtlpcipriv->bt_coexist.cstate &=
163 ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
164 rtlpcipriv->bt_coexist.cstate &=
165 ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
166 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
167 "[DM][BT], RSSI_1 state switch to Medium\n");
168 } else {
169 bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
170 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
171 "[DM][BT], RSSI_1 state stay at Low\n");
172 }
173 } else if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
174 BT_RSSI_STATE_MEDIUM) ||
175 (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
176 BT_RSSI_STATE_STAY_MEDIUM)) {
177 if (smooth >= (rssi_thresh1 +
178 BT_FW_COEX_THRESH_TOL)) {
179 bt_rssi_state = BT_RSSI_STATE_HIGH;
180 rtlpcipriv->bt_coexist.cstate |=
181 BT_COEX_STATE_WIFI_RSSI_1_HIGH;
182 rtlpcipriv->bt_coexist.cstate &=
183 ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
184 rtlpcipriv->bt_coexist.cstate &=
185 ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
186 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
187 "[DM][BT], RSSI_1 state switch to High\n");
188 } else if (smooth < rssi_thresh) {
189 bt_rssi_state = BT_RSSI_STATE_LOW;
190 rtlpcipriv->bt_coexist.cstate |=
191 BT_COEX_STATE_WIFI_RSSI_1_LOW;
192 rtlpcipriv->bt_coexist.cstate &=
193 ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
194 rtlpcipriv->bt_coexist.cstate &=
195 ~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
196 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
197 "[DM][BT], RSSI_1 state switch to Low\n");
198 } else {
199 bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
200 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
201 "[DM][BT], RSSI_1 state stay at Medium\n");
202 }
203 } else {
204 if (smooth < rssi_thresh1) {
205 bt_rssi_state = BT_RSSI_STATE_MEDIUM;
206 rtlpcipriv->bt_coexist.cstate |=
207 BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
208 rtlpcipriv->bt_coexist.cstate &=
209 ~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
210 rtlpcipriv->bt_coexist.cstate &=
211 ~BT_COEX_STATE_WIFI_RSSI_1_LOW;
212 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
213 "[DM][BT], RSSI_1 state switch to Medium\n");
214 } else {
215 bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
216 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
217 "[DM][BT], RSSI_1 state stay at High\n");
218 }
219 }
220 }
221
222 rtlpcipriv->bt_coexist.bt_pre_rssi_state1 = bt_rssi_state;
223
224 return bt_rssi_state;
225}
226
227u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
228 u8 level_num, u8 rssi_thresh,
229 u8 rssi_thresh1)
230{
231 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
232 struct rtl_priv *rtlpriv = rtl_priv(hw);
233 long smooth;
234 u8 bt_rssi_state = 0;
235
236 smooth = rtl8723ae_dm_bt_get_rx_ss(hw);
237
238 if (level_num == 2) {
239 rtlpcipriv->bt_coexist.cstate &=
240 ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
241
242 if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
243 BT_RSSI_STATE_LOW) ||
244 (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
245 BT_RSSI_STATE_STAY_LOW)){
246 if (smooth >=
247 (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
248 bt_rssi_state = BT_RSSI_STATE_HIGH;
249 rtlpcipriv->bt_coexist.cstate |=
250 BT_COEX_STATE_WIFI_RSSI_HIGH;
251 rtlpcipriv->bt_coexist.cstate &=
252 ~BT_COEX_STATE_WIFI_RSSI_LOW;
253 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
254 "[DM][BT], RSSI state switch to High\n");
255 } else {
256 bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
257 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
258 "[DM][BT], RSSI state stay at Low\n");
259 }
260 } else {
261 if (smooth < rssi_thresh) {
262 bt_rssi_state = BT_RSSI_STATE_LOW;
263 rtlpcipriv->bt_coexist.cstate |=
264 BT_COEX_STATE_WIFI_RSSI_LOW;
265 rtlpcipriv->bt_coexist.cstate &=
266 ~BT_COEX_STATE_WIFI_RSSI_HIGH;
267 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
268 "[DM][BT], RSSI state switch to Low\n");
269 } else {
270 bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
271 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
272 "[DM][BT], RSSI state stay at High\n");
273 }
274 }
275 } else if (level_num == 3) {
276 if (rssi_thresh > rssi_thresh1) {
277 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
278 "[DM][BT], RSSI thresh error!!\n");
279 return rtlpcipriv->bt_coexist.bt_pre_rssi_state;
280 }
281 if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
282 BT_RSSI_STATE_LOW) ||
283 (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
284 BT_RSSI_STATE_STAY_LOW)) {
285 if (smooth >=
286 (rssi_thresh + BT_FW_COEX_THRESH_TOL)) {
287 bt_rssi_state = BT_RSSI_STATE_MEDIUM;
288 rtlpcipriv->bt_coexist.cstate
289 |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
290 rtlpcipriv->bt_coexist.cstate
291 &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
292 rtlpcipriv->bt_coexist.cstate
293 &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
294 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
295 "[DM][BT], RSSI state switch to Medium\n");
296 } else {
297 bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
298 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
299 "[DM][BT], RSSI state stay at Low\n");
300 }
301 } else if ((rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
302 BT_RSSI_STATE_MEDIUM) ||
303 (rtlpcipriv->bt_coexist.bt_pre_rssi_state ==
304 BT_RSSI_STATE_STAY_MEDIUM)) {
305 if (smooth >=
306 (rssi_thresh1 + BT_FW_COEX_THRESH_TOL)) {
307 bt_rssi_state = BT_RSSI_STATE_HIGH;
308 rtlpcipriv->bt_coexist.cstate
309 |= BT_COEX_STATE_WIFI_RSSI_HIGH;
310 rtlpcipriv->bt_coexist.cstate
311 &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
312 rtlpcipriv->bt_coexist.cstate
313 &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
314 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
315 "[DM][BT], RSSI state switch to High\n");
316 } else if (smooth < rssi_thresh) {
317 bt_rssi_state = BT_RSSI_STATE_LOW;
318 rtlpcipriv->bt_coexist.cstate
319 |= BT_COEX_STATE_WIFI_RSSI_LOW;
320 rtlpcipriv->bt_coexist.cstate
321 &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
322 rtlpcipriv->bt_coexist.cstate
323 &= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
324 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
325 "[DM][BT], RSSI state switch to Low\n");
326 } else {
327 bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
328 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
329 "[DM][BT], RSSI state stay at Medium\n");
330 }
331 } else {
332 if (smooth < rssi_thresh1) {
333 bt_rssi_state = BT_RSSI_STATE_MEDIUM;
334 rtlpcipriv->bt_coexist.cstate
335 |= BT_COEX_STATE_WIFI_RSSI_MEDIUM;
336 rtlpcipriv->bt_coexist.cstate
337 &= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
338 rtlpcipriv->bt_coexist.cstate
339 &= ~BT_COEX_STATE_WIFI_RSSI_LOW;
340 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
341 "[DM][BT], RSSI state switch to Medium\n");
342 } else {
343 bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
344 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
345 "[DM][BT], RSSI state stay at High\n");
346 }
347 }
348 }
349
350 rtlpcipriv->bt_coexist.bt_pre_rssi_state = bt_rssi_state;
351 return bt_rssi_state;
352}
353
354long rtl8723ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw)
355{
356 struct rtl_priv *rtlpriv = rtl_priv(hw);
357 long smooth = 0;
358
359 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
360 smooth = GET_UNDECORATED_AVERAGE_RSSI(rtlpriv);
361 else
362 smooth = rtlpriv->dm.entry_min_undec_sm_pwdb;
363
364 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
365 "rtl8723ae_dm_bt_get_rx_ss() = %ld\n", smooth);
366
367 return smooth;
368}
369
370void rtl8723ae_dm_bt_balance(struct ieee80211_hw *hw,
371 bool balance_on, u8 ms0, u8 ms1)
372{
373 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
374 struct rtl_priv *rtlpriv = rtl_priv(hw);
375 u8 h2c_parameter[3] = {0};
376
377 if (balance_on) {
378 h2c_parameter[2] = 1;
379 h2c_parameter[1] = ms1;
380 h2c_parameter[0] = ms0;
381 rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
382 } else {
383 h2c_parameter[2] = 0;
384 h2c_parameter[1] = 0;
385 h2c_parameter[0] = 0;
386 }
387 rtlpcipriv->bt_coexist.balance_on = balance_on;
388
389 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
390 "[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
391 balance_on ? "ON" : "OFF", ms0, ms1,
392 h2c_parameter[0]<<16 | h2c_parameter[1]<<8 | h2c_parameter[2]);
393
394 rtl8723ae_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter);
395}
396
397
398void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
399{
400 struct rtl_priv *rtlpriv = rtl_priv(hw);
401 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
402
403 if (type == BT_AGCTABLE_OFF) {
404 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
405 "[BT]AGCTable Off!\n");
406 rtl_write_dword(rtlpriv, 0xc78, 0x641c0001);
407 rtl_write_dword(rtlpriv, 0xc78, 0x631d0001);
408 rtl_write_dword(rtlpriv, 0xc78, 0x621e0001);
409 rtl_write_dword(rtlpriv, 0xc78, 0x611f0001);
410 rtl_write_dword(rtlpriv, 0xc78, 0x60200001);
411
412 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
413 RF_RX_AGC_HP, 0xfffff, 0x32000);
414 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
415 RF_RX_AGC_HP, 0xfffff, 0x71000);
416 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
417 RF_RX_AGC_HP, 0xfffff, 0xb0000);
418 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
419 RF_RX_AGC_HP, 0xfffff, 0xfc000);
420 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
421 RF_RX_G1, 0xfffff, 0x30355);
422 } else if (type == BT_AGCTABLE_ON) {
423 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
424 "[BT]AGCTable On!\n");
425 rtl_write_dword(rtlpriv, 0xc78, 0x4e1c0001);
426 rtl_write_dword(rtlpriv, 0xc78, 0x4d1d0001);
427 rtl_write_dword(rtlpriv, 0xc78, 0x4c1e0001);
428 rtl_write_dword(rtlpriv, 0xc78, 0x4b1f0001);
429 rtl_write_dword(rtlpriv, 0xc78, 0x4a200001);
430
431 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
432 RF_RX_AGC_HP, 0xfffff, 0xdc000);
433 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
434 RF_RX_AGC_HP, 0xfffff, 0x90000);
435 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
436 RF_RX_AGC_HP, 0xfffff, 0x51000);
437 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
438 RF_RX_AGC_HP, 0xfffff, 0x12000);
439 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A,
440 RF_RX_G1, 0xfffff, 0x00355);
441
442 rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
443 }
444}
445
446void rtl8723ae_dm_bt_bback_off_level(struct ieee80211_hw *hw, u8 type)
447{
448 struct rtl_priv *rtlpriv = rtl_priv(hw);
449 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
450
451 if (type == BT_BB_BACKOFF_OFF) {
452 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
453 "[BT]BBBackOffLevel Off!\n");
454 rtl_write_dword(rtlpriv, 0xc04, 0x3a05611);
455 } else if (type == BT_BB_BACKOFF_ON) {
456 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
457 "[BT]BBBackOffLevel On!\n");
458 rtl_write_dword(rtlpriv, 0xc04, 0x3a07611);
459 rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
460 }
461}
462
463void rtl8723ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw)
464{
465 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
466 struct rtl_priv *rtlpriv = rtl_priv(hw);
467 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
468 "rtl8723ae_dm_bt_fw_coex_all_off()\n");
469
470 if (rtlpcipriv->bt_coexist.fw_coexist_all_off)
471 return;
472
473 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
474 "rtl8723ae_dm_bt_fw_coex_all_off(), real Do\n");
475 rtl8723ae_dm_bt_fw_coex_all_off_8723a(hw);
476 rtlpcipriv->bt_coexist.fw_coexist_all_off = true;
477}
478
479void rtl8723ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw)
480{
481 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
482 struct rtl_priv *rtlpriv = rtl_priv(hw);
483
484 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
485 "rtl8723ae_dm_bt_sw_coex_all_off()\n");
486
487 if (rtlpcipriv->bt_coexist.sw_coexist_all_off)
488 return;
489
490 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
491 "rtl8723ae_dm_bt_sw_coex_all_off(), real Do\n");
492 rtl8723ae_dm_bt_sw_coex_all_off_8723a(hw);
493 rtlpcipriv->bt_coexist.sw_coexist_all_off = true;
494}
495
496void rtl8723ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw)
497{
498 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
499 struct rtl_priv *rtlpriv = rtl_priv(hw);
500
501 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
502 "rtl8723ae_dm_bt_hw_coex_all_off()\n");
503
504 if (rtlpcipriv->bt_coexist.hw_coexist_all_off)
505 return;
506 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
507 "rtl8723ae_dm_bt_hw_coex_all_off(), real Do\n");
508
509 rtl8723ae_dm_bt_hw_coex_all_off_8723a(hw);
510
511 rtlpcipriv->bt_coexist.hw_coexist_all_off = true;
512}
513
514void rtl8723ae_btdm_coex_all_off(struct ieee80211_hw *hw)
515{
516 rtl8723ae_dm_bt_fw_coex_all_off(hw);
517 rtl8723ae_dm_bt_sw_coex_all_off(hw);
518 rtl8723ae_dm_bt_hw_coex_all_off(hw);
519}
520
521bool rtl8723ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw)
522{
523 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
524
525 if ((rtlpcipriv->bt_coexist.previous_state ==
526 rtlpcipriv->bt_coexist.cstate) &&
527 (rtlpcipriv->bt_coexist.previous_state_h ==
528 rtlpcipriv->bt_coexist.cstate_h))
529 return false;
530 else
531 return true;
532}
533
534bool rtl8723ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw)
535{
536 struct rtl_priv *rtlpriv = rtl_priv(hw);
537
538 if (rtlpriv->link_info.tx_busy_traffic)
539 return true;
540 else
541 return false;
542}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h
new file mode 100644
index 000000000000..76f4d122dbc1
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_bt_coexist.h
@@ -0,0 +1,160 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 * Larry Finger <Larry.Finger@lwfinger.net>
26 *
27 *****************************************************************************/
28
29#ifndef __RTL8723E_HAL_BT_COEXIST_H__
30#define __RTL8723E_HAL_BT_COEXIST_H__
31
32#include "../wifi.h"
33
34/* The reg define is for 8723 */
35#define REG_HIGH_PRIORITY_TXRX 0x770
36#define REG_LOW_PRIORITY_TXRX 0x774
37
38#define BT_FW_COEX_THRESH_TOL 6
39#define BT_FW_COEX_THRESH_20 20
40#define BT_FW_COEX_THRESH_23 23
41#define BT_FW_COEX_THRESH_25 25
42#define BT_FW_COEX_THRESH_30 30
43#define BT_FW_COEX_THRESH_35 35
44#define BT_FW_COEX_THRESH_40 40
45#define BT_FW_COEX_THRESH_45 45
46#define BT_FW_COEX_THRESH_47 47
47#define BT_FW_COEX_THRESH_50 50
48#define BT_FW_COEX_THRESH_55 55
49
50#define BT_COEX_STATE_BT30 BIT(0)
51#define BT_COEX_STATE_WIFI_HT20 BIT(1)
52#define BT_COEX_STATE_WIFI_HT40 BIT(2)
53#define BT_COEX_STATE_WIFI_LEGACY BIT(3)
54
55#define BT_COEX_STATE_WIFI_RSSI_LOW BIT(4)
56#define BT_COEX_STATE_WIFI_RSSI_MEDIUM BIT(5)
57#define BT_COEX_STATE_WIFI_RSSI_HIGH BIT(6)
58#define BT_COEX_STATE_DEC_BT_POWER BIT(7)
59
60#define BT_COEX_STATE_WIFI_IDLE BIT(8)
61#define BT_COEX_STATE_WIFI_UPLINK BIT(9)
62#define BT_COEX_STATE_WIFI_DOWNLINK BIT(10)
63
64#define BT_COEX_STATE_BT_INQ_PAGE BIT(11)
65#define BT_COEX_STATE_BT_IDLE BIT(12)
66#define BT_COEX_STATE_BT_UPLINK BIT(13)
67#define BT_COEX_STATE_BT_DOWNLINK BIT(14)
68
69#define BT_COEX_STATE_HOLD_FOR_BT_OPERATION BIT(15)
70#define BT_COEX_STATE_BT_RSSI_LOW BIT(19)
71
72#define BT_COEX_STATE_PROFILE_HID BIT(20)
73#define BT_COEX_STATE_PROFILE_A2DP BIT(21)
74#define BT_COEX_STATE_PROFILE_PAN BIT(22)
75#define BT_COEX_STATE_PROFILE_SCO BIT(23)
76
77#define BT_COEX_STATE_WIFI_RSSI_1_LOW BIT(24)
78#define BT_COEX_STATE_WIFI_RSSI_1_MEDIUM BIT(25)
79#define BT_COEX_STATE_WIFI_RSSI_1_HIGH BIT(26)
80
81#define BT_COEX_STATE_BTINFO_COMMON BIT(30)
82#define BT_COEX_STATE_BTINFO_B_HID_SCOESCO BIT(31)
83#define BT_COEX_STATE_BTINFO_B_FTP_A2DP BIT(29)
84
85#define BT_COEX_STATE_BT_CNT_LEVEL_0 BIT(0)
86#define BT_COEX_STATE_BT_CNT_LEVEL_1 BIT(1)
87#define BT_COEX_STATE_BT_CNT_LEVEL_2 BIT(2)
88#define BT_COEX_STATE_BT_CNT_LEVEL_3 BIT(3)
89
90#define BT_RSSI_STATE_HIGH 0
91#define BT_RSSI_STATE_MEDIUM 1
92#define BT_RSSI_STATE_LOW 2
93#define BT_RSSI_STATE_STAY_HIGH 3
94#define BT_RSSI_STATE_STAY_MEDIUM 4
95#define BT_RSSI_STATE_STAY_LOW 5
96
97#define BT_AGCTABLE_OFF 0
98#define BT_AGCTABLE_ON 1
99#define BT_BB_BACKOFF_OFF 0
100#define BT_BB_BACKOFF_ON 1
101#define BT_FW_NAV_OFF 0
102#define BT_FW_NAV_ON 1
103
104#define BT_COEX_MECH_NONE 0
105#define BT_COEX_MECH_SCO 1
106#define BT_COEX_MECH_HID 2
107#define BT_COEX_MECH_A2DP 3
108#define BT_COEX_MECH_PAN 4
109#define BT_COEX_MECH_HID_A2DP 5
110#define BT_COEX_MECH_HID_PAN 6
111#define BT_COEX_MECH_PAN_A2DP 7
112#define BT_COEX_MECH_HID_SCO_ESCO 8
113#define BT_COEX_MECH_FTP_A2DP 9
114#define BT_COEX_MECH_COMMON 10
115#define BT_COEX_MECH_MAX 11
116
117#define BT_DBG_PROFILE_NONE 0
118#define BT_DBG_PROFILE_SCO 1
119#define BT_DBG_PROFILE_HID 2
120#define BT_DBG_PROFILE_A2DP 3
121#define BT_DBG_PROFILE_PAN 4
122#define BT_DBG_PROFILE_HID_A2DP 5
123#define BT_DBG_PROFILE_HID_PAN 6
124#define BT_DBG_PROFILE_PAN_A2DP 7
125#define BT_DBG_PROFILE_MAX 9
126
127#define BTINFO_B_FTP BIT(7)
128#define BTINFO_B_A2DP BIT(6)
129#define BTINFO_B_HID BIT(5)
130#define BTINFO_B_SCO_BUSY BIT(4)
131#define BTINFO_B_ACL_BUSY BIT(3)
132#define BTINFO_B_INQ_PAGE BIT(2)
133#define BTINFO_B_SCO_ESCO BIT(1)
134#define BTINFO_B_CONNECTION BIT(0)
135
136
137void rtl8723ae_btdm_coex_all_off(struct ieee80211_hw *hw);
138void rtl8723ae_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw);
139
140void rtl8723ae_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw);
141void rtl8723ae_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw);
142long rtl8723ae_dm_bt_get_rx_ss(struct ieee80211_hw *hw);
143void rtl8723ae_dm_bt_balance(struct ieee80211_hw *hw,
144 bool balance_on, u8 ms0, u8 ms1);
145void rtl8723ae_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type);
146void rtl8723ae_dm_bt_bback_off_level(struct ieee80211_hw *hw, u8 type);
147u8 rtl8723ae_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
148 u8 level_num, u8 rssi_thresh,
149 u8 rssi_thresh1);
150u8 rtl8723ae_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
151 u8 level_num, u8 rssi_thresh,
152 u8 rssi_thresh1);
153void _rtl8723_dm_bt_check_wifi_state(struct ieee80211_hw *hw);
154void rtl8723ae_dm_bt_reject_ap_aggregated_packet(struct ieee80211_hw *hw,
155 bool reject);
156
157bool rtl8723ae_dm_bt_is_coexist_state_changed(struct ieee80211_hw *hw);
158bool rtl8723ae_dm_bt_is_wifi_up_link(struct ieee80211_hw *hw);
159
160#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
new file mode 100644
index 000000000000..887d521fe690
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.c
@@ -0,0 +1,1786 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 ****************************************************************************
29 */
30#include "hal_btc.h"
31#include "../pci.h"
32#include "phy.h"
33#include "fw.h"
34#include "reg.h"
35#include "def.h"
36
37void rtl8723ae_bt_coex_off_before_lps(struct ieee80211_hw *hw)
38{
39 struct rtl_priv *rtlpriv = rtl_priv(hw);
40 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
41 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
42
43 if (!rtlpcipriv->bt_coexist.bt_coexistence)
44 return;
45
46 if (ppsc->inactiveps) {
47 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
48 "[BT][DM], Before enter IPS, turn off all Coexist DM\n");
49 rtlpcipriv->bt_coexist.cstate = 0;
50 rtlpcipriv->bt_coexist.previous_state = 0;
51 rtlpcipriv->bt_coexist.cstate_h = 0;
52 rtlpcipriv->bt_coexist.previous_state_h = 0;
53 rtl8723ae_btdm_coex_all_off(hw);
54 }
55}
56
57static enum _RT_MEDIA_STATUS mgnt_link_status_query(struct ieee80211_hw *hw)
58{
59 struct rtl_priv *rtlpriv = rtl_priv(hw);
60 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
61 enum _RT_MEDIA_STATUS m_status = RT_MEDIA_DISCONNECT;
62
63 u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
64
65 if (bibss || rtlpriv->mac80211.link_state >= MAC80211_LINKED)
66 m_status = RT_MEDIA_CONNECT;
67
68 return m_status;
69}
70
71void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw,
72 bool mstatus)
73{
74 struct rtl_priv *rtlpriv = rtl_priv(hw);
75 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
76 struct rtl_phy *rtlphy = &(rtlpriv->phy);
77 u8 h2c_parameter[3] = {0};
78 u8 chnl;
79
80 if (!rtlpcipriv->bt_coexist.bt_coexistence)
81 return;
82
83 if (RT_MEDIA_CONNECT == mstatus)
84 h2c_parameter[0] = 0x1; /* 0: disconnected, 1:connected */
85 else
86 h2c_parameter[0] = 0x0;
87
88 if (mgnt_link_status_query(hw)) {
89 chnl = rtlphy->current_channel;
90 h2c_parameter[1] = chnl;
91 }
92
93 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
94 h2c_parameter[2] = 0x30;
95 else
96 h2c_parameter[2] = 0x20;
97
98 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
99 "[BTCoex], FW write 0x19 = 0x%x\n",
100 h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
101
102 rtl8723ae_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter);
103
104}
105
106static bool rtl8723ae_dm_bt_is_wifi_busy(struct ieee80211_hw *hw)
107{
108 struct rtl_priv *rtlpriv = rtl_priv(hw);
109 if (rtlpriv->link_info.busytraffic ||
110 rtlpriv->link_info.rx_busy_traffic ||
111 rtlpriv->link_info.tx_busy_traffic)
112 return true;
113 else
114 return false;
115}
116
117static void rtl8723ae_dm_bt_set_fw_3a(struct ieee80211_hw *hw,
118 u8 byte1, u8 byte2, u8 byte3,
119 u8 byte4, u8 byte5)
120{
121 struct rtl_priv *rtlpriv = rtl_priv(hw);
122 u8 h2c_parameter[5] = {0};
123
124 h2c_parameter[0] = byte1;
125 h2c_parameter[1] = byte2;
126 h2c_parameter[2] = byte3;
127 h2c_parameter[3] = byte4;
128 h2c_parameter[4] = byte5;
129 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
130 "[BTCoex], FW write 0x3a(4bytes) = 0x%x%8x\n",
131 h2c_parameter[0], h2c_parameter[1]<<24 | h2c_parameter[2]<<16 |
132 h2c_parameter[3]<<8 | h2c_parameter[4]);
133 rtl8723ae_fill_h2c_cmd(hw, 0x3a, 5, h2c_parameter);
134}
135
136static bool rtl8723ae_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
137{
138 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
139 struct rtl_priv *rtlpriv = rtl_priv(hw);
140
141 if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
142 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
143 "Need to decrease bt power\n");
144 rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_DEC_BT_POWER;
145 return true;
146 }
147
148 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER;
149 return false;
150}
151
152static bool rtl8723ae_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
153{
154 struct rtl_priv *rtlpriv = rtl_priv(hw);
155 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
156
157 if ((rtlpcipriv->bt_coexist.previous_state ==
158 rtlpcipriv->bt_coexist.cstate) &&
159 (rtlpcipriv->bt_coexist.previous_state_h ==
160 rtlpcipriv->bt_coexist.cstate_h)) {
161 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
162 "[DM][BT], Coexist state do not chang!!\n");
163 return true;
164 } else {
165 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
166 "[DM][BT], Coexist state changed!!\n");
167 return false;
168 }
169}
170
171static void rtl8723ae_dm_bt_set_coex_table(struct ieee80211_hw *hw,
172 u32 val_0x6c0, u32 val_0x6c8,
173 u32 val_0x6cc)
174{
175 struct rtl_priv *rtlpriv = rtl_priv(hw);
176
177 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
178 "set coex table, set 0x6c0 = 0x%x\n", val_0x6c0);
179 rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0);
180
181 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
182 "set coex table, set 0x6c8 = 0x%x\n", val_0x6c8);
183 rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8);
184
185 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
186 "set coex table, set 0x6cc = 0x%x\n", val_0x6cc);
187 rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc);
188}
189
190static void rtl8723ae_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool mode)
191{
192 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
193 struct rtl_priv *rtlpriv = rtl_priv(hw);
194
195 if (BT_PTA_MODE_ON == mode) {
196 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on, ");
197 /* Enable GPIO 0/1/2/3/8 pins for bt */
198 rtl_write_byte(rtlpriv, 0x40, 0x20);
199 rtlpcipriv->bt_coexist.hw_coexist_all_off = false;
200 } else {
201 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode off\n");
202 rtl_write_byte(rtlpriv, 0x40, 0x0);
203 }
204}
205
206static void rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw,
207 u8 type)
208{
209 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
210 struct rtl_priv *rtlpriv = rtl_priv(hw);
211
212 if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
213 /* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] by Jenyu*/
214 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
215 "Shrink RF Rx LPF corner!!\n");
216 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
217 0xf0ff7);
218 rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
219 } else if (BT_RF_RX_LPF_CORNER_RESUME == type) {
220 /*Resume RF Rx LPF corner*/
221 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
222 "Resume RF Rx LPF corner!!\n");
223 rtl8723ae_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
224 rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
225 }
226}
227
228static void rtl8723ae_bt_set_penalty_tx_rate_adap(struct ieee80211_hw *hw,
229 u8 ra_type)
230{
231 struct rtl_priv *rtlpriv = rtl_priv(hw);
232 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
233 u8 tmu1;
234
235 tmu1 = rtl_read_byte(rtlpriv, 0x4fd);
236 tmu1 |= BIT(0);
237 if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) {
238 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
239 "Tx rate adaptive, set low penalty!!\n");
240 tmu1 &= ~BIT(2);
241 rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
242 } else if (BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) {
243 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
244 "Tx rate adaptive, set normal!!\n");
245 tmu1 |= BIT(2);
246 }
247 rtl_write_byte(rtlpriv, 0x4fd, tmu1);
248}
249
250static void rtl8723ae_dm_bt_btdm_structure_reload(struct ieee80211_hw *hw,
251 struct btdm_8723 *btdm)
252{
253 btdm->all_off = false;
254 btdm->agc_table_en = false;
255 btdm->adc_back_off_on = false;
256 btdm->b2_ant_hid_en = false;
257 btdm->low_penalty_rate_adaptive = false;
258 btdm->rf_rx_lpf_shrink = false;
259 btdm->reject_aggre_pkt = false;
260
261 btdm->tdma_on = false;
262 btdm->tdma_ant = TDMA_2ANT;
263 btdm->tdma_nav = TDMA_NAV_OFF;
264 btdm->tdma_dac_swing = TDMA_DAC_SWING_OFF;
265 btdm->fw_dac_swing_lvl = 0x20;
266
267 btdm->tra_tdma_on = false;
268 btdm->tra_tdma_ant = TDMA_2ANT;
269 btdm->tra_tdma_nav = TDMA_NAV_OFF;
270 btdm->ignore_wlan_act = false;
271
272 btdm->ps_tdma_on = false;
273 btdm->ps_tdma_byte[0] = 0x0;
274 btdm->ps_tdma_byte[1] = 0x0;
275 btdm->ps_tdma_byte[2] = 0x0;
276 btdm->ps_tdma_byte[3] = 0x8;
277 btdm->ps_tdma_byte[4] = 0x0;
278
279 btdm->pta_on = true;
280 btdm->val_0x6c0 = 0x5a5aaaaa;
281 btdm->val_0x6c8 = 0xcc;
282 btdm->val_0x6cc = 0x3;
283
284 btdm->sw_dac_swing_on = false;
285 btdm->sw_dac_swing_lvl = 0xc0;
286 btdm->wlan_act_hi = 0x20;
287 btdm->wlan_act_lo = 0x10;
288 btdm->bt_retry_index = 2;
289
290 btdm->dec_bt_pwr = false;
291}
292
293static void dm_bt_btdm_structure_reload_all_off(struct ieee80211_hw *hw,
294 struct btdm_8723 *btdm)
295{
296 rtl8723ae_dm_bt_btdm_structure_reload(hw, btdm);
297 btdm->all_off = true;
298 btdm->pta_on = false;
299 btdm->wlan_act_hi = 0x10;
300}
301
302static bool rtl8723ae_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
303{
304 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
305 struct rtl_priv *rtlpriv = rtl_priv(hw);
306 struct btdm_8723 btdm8723;
307 bool common = false;
308
309 rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723);
310
311 if (!rtl8723ae_dm_bt_is_wifi_busy(hw)
312 && !rtlpcipriv->bt_coexist.bt_busy) {
313 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
314 "Wifi idle + Bt idle, bt coex mechanism always off!!\n");
315 dm_bt_btdm_structure_reload_all_off(hw, &btdm8723);
316 common = true;
317 } else if (rtl8723ae_dm_bt_is_wifi_busy(hw)
318 && !rtlpcipriv->bt_coexist.bt_busy) {
319 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
320 "Wifi non-idle + Bt disabled/idle!!\n");
321 btdm8723.low_penalty_rate_adaptive = true;
322 btdm8723.rf_rx_lpf_shrink = false;
323 btdm8723.reject_aggre_pkt = false;
324
325 /* sw mechanism */
326 btdm8723.agc_table_en = false;
327 btdm8723.adc_back_off_on = false;
328 btdm8723.sw_dac_swing_on = false;
329
330 btdm8723.pta_on = true;
331 btdm8723.val_0x6c0 = 0x5a5aaaaa;
332 btdm8723.val_0x6c8 = 0xcccc;
333 btdm8723.val_0x6cc = 0x3;
334
335 btdm8723.tdma_on = false;
336 btdm8723.tdma_dac_swing = TDMA_DAC_SWING_OFF;
337 btdm8723.b2_ant_hid_en = false;
338
339 common = true;
340 } else if (rtlpcipriv->bt_coexist.bt_busy) {
341 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
342 "Bt non-idle!\n");
343 if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
344 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
345 "Wifi connection exist\n");
346 common = false;
347 } else {
348 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
349 "No Wifi connection!\n");
350 btdm8723.rf_rx_lpf_shrink = true;
351 btdm8723.low_penalty_rate_adaptive = false;
352 btdm8723.reject_aggre_pkt = false;
353
354 /* sw mechanism */
355 btdm8723.agc_table_en = false;
356 btdm8723.adc_back_off_on = false;
357 btdm8723.sw_dac_swing_on = false;
358
359 btdm8723.pta_on = true;
360 btdm8723.val_0x6c0 = 0x55555555;
361 btdm8723.val_0x6c8 = 0x0000ffff;
362 btdm8723.val_0x6cc = 0x3;
363
364 btdm8723.tdma_on = false;
365 btdm8723.tdma_dac_swing = TDMA_DAC_SWING_OFF;
366 btdm8723.b2_ant_hid_en = false;
367
368 common = true;
369 }
370 }
371
372 if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw))
373 btdm8723.dec_bt_pwr = true;
374
375 if (common)
376 rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BTINFO_COMMON;
377
378 if (common && rtl8723ae_dm_bt_is_coexist_state_changed(hw))
379 rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723);
380
381 return common;
382}
383
384static void rtl8723ae_dm_bt_set_sw_full_time_dac_swing(struct ieee80211_hw *hw,
385 bool sw_dac_swing_on,
386 u32 sw_dac_swing_lvl)
387{
388 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
389 struct rtl_priv *rtlpriv = rtl_priv(hw);
390
391 if (sw_dac_swing_on) {
392 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
393 "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
394 rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000,
395 sw_dac_swing_lvl);
396 rtlpcipriv->bt_coexist.sw_coexist_all_off = false;
397 } else {
398 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
399 "[BTCoex], SwDacSwing Off!\n");
400 rtl8723ae_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
401 }
402}
403
404static void rtl8723ae_dm_bt_set_fw_dec_bt_pwr(struct ieee80211_hw *hw,
405 bool dec_bt_pwr)
406{
407 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
408 struct rtl_priv *rtlpriv = rtl_priv(hw);
409 u8 h2c_parameter[1] = {0};
410
411 h2c_parameter[0] = 0;
412
413 if (dec_bt_pwr) {
414 h2c_parameter[0] |= BIT(1);
415 rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
416 }
417
418 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
419 "[BTCoex], decrease Bt Power : %s, write 0x21 = 0x%x\n",
420 (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
421
422 rtl8723ae_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter);
423}
424
425static void rtl8723ae_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw,
426 bool enable, bool dac_swing_on)
427{
428 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
429 struct rtl_priv *rtlpriv = rtl_priv(hw);
430 u8 h2c_parameter[1] = {0};
431
432 if (enable) {
433 h2c_parameter[0] |= BIT(0);
434 rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
435 }
436 if (dac_swing_on)
437 h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */
438 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
439 "[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15 = 0x%x\n",
440 (enable ? "ON!!" : "OFF!!"), (dac_swing_on ? "ON" : "OFF"),
441 h2c_parameter[0]);
442
443 rtl8723ae_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter);
444}
445
446static void rtl8723ae_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw,
447 bool enable, u8 ant_num, u8 nav_en,
448 u8 dac_swing_en)
449{
450 struct rtl_priv *rtlpriv = rtl_priv(hw);
451 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
452 u8 h2c_parameter[1] = {0};
453 u8 h2c_parameter1[1] = {0};
454
455 h2c_parameter[0] = 0;
456 h2c_parameter1[0] = 0;
457
458 if (enable) {
459 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
460 "[BTCoex], set BT PTA update manager to trigger update!!\n");
461 h2c_parameter1[0] |= BIT(0);
462
463 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
464 "[BTCoex], turn TDMA mode ON!!\n");
465 h2c_parameter[0] |= BIT(0); /* function enable */
466 if (TDMA_1ANT == ant_num) {
467 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
468 "[BTCoex], TDMA_1ANT\n");
469 h2c_parameter[0] |= BIT(1);
470 } else if (TDMA_2ANT == ant_num) {
471 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
472 "[BTCoex], TDMA_2ANT\n");
473 } else {
474 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
475 "[BTCoex], Unknown Ant\n");
476 }
477
478 if (TDMA_NAV_OFF == nav_en) {
479 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
480 "[BTCoex], TDMA_NAV_OFF\n");
481 } else if (TDMA_NAV_ON == nav_en) {
482 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
483 "[BTCoex], TDMA_NAV_ON\n");
484 h2c_parameter[0] |= BIT(2);
485 }
486
487 if (TDMA_DAC_SWING_OFF == dac_swing_en) {
488 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
489 "[BTCoex], TDMA_DAC_SWING_OFF\n");
490 } else if (TDMA_DAC_SWING_ON == dac_swing_en) {
491 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
492 "[BTCoex], TDMA_DAC_SWING_ON\n");
493 h2c_parameter[0] |= BIT(4);
494 }
495 rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
496 } else {
497 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
498 "[BTCoex], set BT PTA update manager to no update!!\n");
499 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
500 "[BTCoex], turn TDMA mode OFF!!\n");
501 }
502
503 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
504 "[BTCoex], FW2AntTDMA, write 0x26 = 0x%x\n",
505 h2c_parameter1[0]);
506 rtl8723ae_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1);
507
508 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
509 "[BTCoex], FW2AntTDMA, write 0x14 = 0x%x\n", h2c_parameter[0]);
510 rtl8723ae_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter);
511}
512
513static void rtl8723ae_dm_bt_set_fw_ignore_wlan_act(struct ieee80211_hw *hw,
514 bool enable)
515{
516 struct rtl_priv *rtlpriv = rtl_priv(hw);
517 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
518 u8 h2c_parameter[1] = {0};
519
520 if (enable) {
521 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
522 "[BTCoex], BT Ignore Wlan_Act !!\n");
523 h2c_parameter[0] |= BIT(0); /* function enable */
524 rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
525 } else {
526 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
527 "[BTCoex], BT don't ignore Wlan_Act !!\n");
528 }
529
530 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
531 "[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25 = 0x%x\n",
532 h2c_parameter[0]);
533
534 rtl8723ae_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter);
535}
536
537static void rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw,
538 bool enable, u8 ant_num,
539 u8 nav_en)
540{
541 struct rtl_priv *rtlpriv = rtl_priv(hw);
542 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
543 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
544 u8 h2c_parameter[2] = {0};
545
546 /* Only 8723 B cut should do this */
547 if (IS_VENDOR_8723_A_CUT(rtlhal->version)) {
548 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
549 "[BTCoex], not 8723B cut, don't set Traditional TDMA!!\n");
550 return;
551 }
552
553 if (enable) {
554 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
555 "[BTCoex], turn TTDMA mode ON!!\n");
556 h2c_parameter[0] |= BIT(0); /* function enable */
557 if (TDMA_1ANT == ant_num) {
558 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
559 "[BTCoex], TTDMA_1ANT\n");
560 h2c_parameter[0] |= BIT(1);
561 } else if (TDMA_2ANT == ant_num) {
562 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
563 "[BTCoex], TTDMA_2ANT\n");
564 } else {
565 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
566 "[BTCoex], Unknown Ant\n");
567 }
568
569 if (TDMA_NAV_OFF == nav_en) {
570 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
571 "[BTCoex], TTDMA_NAV_OFF\n");
572 } else if (TDMA_NAV_ON == nav_en) {
573 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
574 "[BTCoex], TTDMA_NAV_ON\n");
575 h2c_parameter[1] |= BIT(0);
576 }
577
578 rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
579 } else {
580 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
581 "[BTCoex], turn TTDMA mode OFF!!\n");
582 }
583
584 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
585 "[BTCoex], FW Traditional TDMA, write 0x33 = 0x%x\n",
586 h2c_parameter[0] << 8 | h2c_parameter[1]);
587
588 rtl8723ae_fill_h2c_cmd(hw, 0x33, 2, h2c_parameter);
589}
590
591static void rtl8723ae_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw,
592 u8 dac_swing_lvl)
593{
594 struct rtl_priv *rtlpriv = rtl_priv(hw);
595 u8 h2c_parameter[1] = {0};
596
597 h2c_parameter[0] = dac_swing_lvl;
598
599 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
600 "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
601 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
602 "[BTCoex], write 0x29 = 0x%x\n", h2c_parameter[0]);
603
604 rtl8723ae_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter);
605}
606
607static void rtl8723ae_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw,
608 bool enable)
609{
610 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
611 struct rtl_priv *rtlpriv = rtl_priv(hw);
612 u8 h2c_parameter[1] = {0};
613
614 h2c_parameter[0] = 0;
615
616 if (enable) {
617 h2c_parameter[0] |= BIT(0);
618 rtlpcipriv->bt_coexist.fw_coexist_all_off = false;
619 }
620 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
621 "[BTCoex], Set BT HID information = 0x%x\n", enable);
622 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
623 "[BTCoex], write 0x24 = 0x%x\n", h2c_parameter[0]);
624
625 rtl8723ae_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter);
626}
627
628static void rtl8723ae_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw,
629 u8 retry_index)
630{
631 struct rtl_priv *rtlpriv = rtl_priv(hw);
632 u8 h2c_parameter[1] = {0};
633
634 h2c_parameter[0] = retry_index;
635
636 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
637 "[BTCoex], Set BT Retry Index=%d\n", retry_index);
638 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
639 "[BTCoex], write 0x23 = 0x%x\n", h2c_parameter[0]);
640
641 rtl8723ae_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter);
642}
643
644static void rtl8723ae_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw,
645 u8 wlan_act_hi, u8 wlan_act_lo)
646{
647 struct rtl_priv *rtlpriv = rtl_priv(hw);
648 u8 h2c_parameter_hi[1] = {0};
649 u8 h2c_parameter_lo[1] = {0};
650
651 h2c_parameter_hi[0] = wlan_act_hi;
652 h2c_parameter_lo[0] = wlan_act_lo;
653
654 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
655 "[BTCoex], Set WLAN_ACT Hi:Lo = 0x%x/0x%x\n", wlan_act_hi,
656 wlan_act_lo);
657 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
658 "[BTCoex], write 0x22 = 0x%x\n", h2c_parameter_hi[0]);
659 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
660 "[BTCoex], write 0x11 = 0x%x\n", h2c_parameter_lo[0]);
661
662 /* WLAN_ACT = High duration, unit:ms */
663 rtl8723ae_fill_h2c_cmd(hw, 0x22, 1, h2c_parameter_hi);
664 /* WLAN_ACT = Low duration, unit:3*625us */
665 rtl8723ae_fill_h2c_cmd(hw, 0x11, 1, h2c_parameter_lo);
666}
667
668void rtl8723ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw, struct btdm_8723 *btdm)
669{
670 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
671 struct rtl_priv *rtlpriv = rtl_priv(hw);
672 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
673 struct btdm_8723 *btdm_8723 = &rtlhal->hal_coex_8723.btdm;
674 u8 i;
675 bool fw_current_inpsmode = false;
676 bool fw_ps_awake = true;
677
678 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
679 (u8 *)(&fw_current_inpsmode));
680 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
681 (u8 *)(&fw_ps_awake));
682
683 /* check new setting is different than the old one,
684 * if all the same, don't do the setting again.
685 */
686 if (memcmp(btdm_8723, btdm, sizeof(struct btdm_8723)) == 0) {
687 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
688 "[BTCoex], the same coexist setting, return!!\n");
689 return;
690 } else { /* save the new coexist setting */
691 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
692 "[BTCoex], UPDATE TO NEW COEX SETTING!!\n");
693 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
694 "[BTCoex], original/new bAllOff = 0x%x/ 0x%x\n",
695 btdm_8723->all_off, btdm->all_off);
696 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
697 "[BTCoex], original/new agc_table_en = 0x%x/ 0x%x\n",
698 btdm_8723->agc_table_en, btdm->agc_table_en);
699 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
700 "[BTCoex], original/new adc_back_off_on = 0x%x/ 0x%x\n",
701 btdm_8723->adc_back_off_on, btdm->adc_back_off_on);
702 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
703 "[BTCoex], original/new b2_ant_hid_en = 0x%x/ 0x%x\n",
704 btdm_8723->b2_ant_hid_en, btdm->b2_ant_hid_en);
705 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
706 "[BTCoex], original/new bLowPenaltyRateAdaptive = 0x%x/ 0x%x\n",
707 btdm_8723->low_penalty_rate_adaptive,
708 btdm->low_penalty_rate_adaptive);
709 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
710 "[BTCoex], original/new bRfRxLpfShrink = 0x%x/ 0x%x\n",
711 btdm_8723->rf_rx_lpf_shrink, btdm->rf_rx_lpf_shrink);
712 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
713 "[BTCoex], original/new bRejectAggrePkt = 0x%x/ 0x%x\n",
714 btdm_8723->reject_aggre_pkt, btdm->reject_aggre_pkt);
715 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
716 "[BTCoex], original/new tdma_on = 0x%x/ 0x%x\n",
717 btdm_8723->tdma_on, btdm->tdma_on);
718 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
719 "[BTCoex], original/new tdmaAnt = 0x%x/ 0x%x\n",
720 btdm_8723->tdma_ant, btdm->tdma_ant);
721 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
722 "[BTCoex], original/new tdmaNav = 0x%x/ 0x%x\n",
723 btdm_8723->tdma_nav, btdm->tdma_nav);
724 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
725 "[BTCoex], original/new tdma_dac_swing = 0x%x/ 0x%x\n",
726 btdm_8723->tdma_dac_swing, btdm->tdma_dac_swing);
727 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
728 "[BTCoex], original/new fwDacSwingLvl = 0x%x/ 0x%x\n",
729 btdm_8723->fw_dac_swing_lvl, btdm->fw_dac_swing_lvl);
730
731 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
732 "[BTCoex], original/new bTraTdmaOn = 0x%x/ 0x%x\n",
733 btdm_8723->tra_tdma_on, btdm->tra_tdma_on);
734 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
735 "[BTCoex], original/new traTdmaAnt = 0x%x/ 0x%x\n",
736 btdm_8723->tra_tdma_ant, btdm->tra_tdma_ant);
737 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
738 "[BTCoex], original/new traTdmaNav = 0x%x/ 0x%x\n",
739 btdm_8723->tra_tdma_nav, btdm->tra_tdma_nav);
740 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
741 "[BTCoex], original/new bPsTdmaOn = 0x%x/ 0x%x\n",
742 btdm_8723->ps_tdma_on, btdm->ps_tdma_on);
743 for (i = 0; i < 5; i++) {
744 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
745 "[BTCoex], original/new psTdmaByte[i] = 0x%x/ 0x%x\n",
746 btdm_8723->ps_tdma_byte[i],
747 btdm->ps_tdma_byte[i]);
748 }
749 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
750 "[BTCoex], original/new bIgnoreWlanAct = 0x%x/ 0x%x\n",
751 btdm_8723->ignore_wlan_act, btdm->ignore_wlan_act);
752
753 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
754 "[BTCoex], original/new bPtaOn = 0x%x/ 0x%x\n",
755 btdm_8723->pta_on, btdm->pta_on);
756 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
757 "[BTCoex], original/new val_0x6c0 = 0x%x/ 0x%x\n",
758 btdm_8723->val_0x6c0, btdm->val_0x6c0);
759 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
760 "[BTCoex], original/new val_0x6c8 = 0x%x/ 0x%x\n",
761 btdm_8723->val_0x6c8, btdm->val_0x6c8);
762 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
763 "[BTCoex], original/new val_0x6cc = 0x%x/ 0x%x\n",
764 btdm_8723->val_0x6cc, btdm->val_0x6cc);
765 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
766 "[BTCoex], original/new sw_dac_swing_on = 0x%x/ 0x%x\n",
767 btdm_8723->sw_dac_swing_on, btdm->sw_dac_swing_on);
768 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
769 "[BTCoex], original/new sw_dac_swing_lvl = 0x%x/ 0x%x\n",
770 btdm_8723->sw_dac_swing_lvl,
771 btdm->sw_dac_swing_lvl);
772 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
773 "[BTCoex], original/new wlanActHi = 0x%x/ 0x%x\n",
774 btdm_8723->wlan_act_hi, btdm->wlan_act_hi);
775 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
776 "[BTCoex], original/new wlanActLo = 0x%x/ 0x%x\n",
777 btdm_8723->wlan_act_lo, btdm->wlan_act_lo);
778 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
779 "[BTCoex], original/new btRetryIndex = 0x%x/ 0x%x\n",
780 btdm_8723->bt_retry_index, btdm->bt_retry_index);
781
782 memcpy(btdm_8723, btdm, sizeof(struct btdm_8723));
783 }
784 /*
785 * Here we only consider when Bt Operation
786 * inquiry/paging/pairing is ON
787 * we only need to turn off TDMA
788 */
789
790 if (rtlpcipriv->bt_coexist.hold_for_bt_operation) {
791 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
792 "[BTCoex], set to ignore wlanAct for BT OP!!\n");
793 rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, true);
794 return;
795 }
796
797 if (btdm->all_off) {
798 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
799 "[BTCoex], disable all coexist mechanism !!\n");
800 rtl8723ae_btdm_coex_all_off(hw);
801 return;
802 }
803
804 rtl8723ae_dm_bt_reject_ap_aggregated_packet(hw, btdm->reject_aggre_pkt);
805
806 if (btdm->low_penalty_rate_adaptive)
807 rtl8723ae_bt_set_penalty_tx_rate_adap(hw,
808 BT_TX_RATE_ADAPTIVE_LOW_PENALTY);
809 else
810 rtl8723ae_bt_set_penalty_tx_rate_adap(hw,
811 BT_TX_RATE_ADAPTIVE_NORMAL);
812
813 if (btdm->rf_rx_lpf_shrink)
814 rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw,
815 BT_RF_RX_LPF_CORNER_SHRINK);
816 else
817 rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw,
818 BT_RF_RX_LPF_CORNER_RESUME);
819
820 if (btdm->agc_table_en)
821 rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_ON);
822 else
823 rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
824
825 if (btdm->adc_back_off_on)
826 rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_ON);
827 else
828 rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_OFF);
829
830 rtl8723ae_dm_bt_set_fw_bt_retry_index(hw, btdm->bt_retry_index);
831
832 rtl8723ae_dm_bt_set_fw_dac_swing_level(hw, btdm->fw_dac_swing_lvl);
833 rtl8723ae_dm_bt_set_fw_wlan_act(hw, btdm->wlan_act_hi,
834 btdm->wlan_act_lo);
835
836 rtl8723ae_dm_bt_set_coex_table(hw, btdm->val_0x6c0,
837 btdm->val_0x6c8, btdm->val_0x6cc);
838 rtl8723ae_dm_bt_set_hw_pta_mode(hw, btdm->pta_on);
839
840 /* Note: There is a constraint between TDMA and 2AntHID
841 * Only one of 2AntHid and tdma can be turned on
842 * We should turn off those mechanisms first
843 * and then turn on them on.
844 */
845 if (btdm->b2_ant_hid_en) {
846 /* turn off tdma */
847 rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on,
848 btdm->tra_tdma_ant,
849 btdm->tra_tdma_nav);
850 rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant,
851 btdm->tdma_nav,
852 btdm->tdma_dac_swing);
853
854 /* turn off Pstdma */
855 rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw,
856 btdm->ignore_wlan_act);
857 /* Antenna control by PTA, 0x870 = 0x300. */
858 rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
859
860 /* turn on 2AntHid */
861 rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, true);
862 rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, true, true);
863 } else if (btdm->tdma_on) {
864 /* turn off 2AntHid */
865 rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false);
866 rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
867
868 /* turn off pstdma */
869 rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw,
870 btdm->ignore_wlan_act);
871 /* Antenna control by PTA, 0x870 = 0x300. */
872 rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
873
874 /* turn on tdma */
875 rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on,
876 btdm->tra_tdma_ant, btdm->tra_tdma_nav);
877 rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, true, btdm->tdma_ant,
878 btdm->tdma_nav, btdm->tdma_dac_swing);
879 } else if (btdm->ps_tdma_on) {
880 /* turn off 2AntHid */
881 rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false);
882 rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
883
884 /* turn off tdma */
885 rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on,
886 btdm->tra_tdma_ant, btdm->tra_tdma_nav);
887 rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant,
888 btdm->tdma_nav, btdm->tdma_dac_swing);
889
890 /* turn on pstdma */
891 rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw,
892 btdm->ignore_wlan_act);
893 rtl8723ae_dm_bt_set_fw_3a(hw,
894 btdm->ps_tdma_byte[0],
895 btdm->ps_tdma_byte[1],
896 btdm->ps_tdma_byte[2],
897 btdm->ps_tdma_byte[3],
898 btdm->ps_tdma_byte[4]);
899 } else {
900 /* turn off 2AntHid */
901 rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false);
902 rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
903
904 /* turn off tdma */
905 rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, btdm->tra_tdma_on,
906 btdm->tra_tdma_ant, btdm->tra_tdma_nav);
907 rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, btdm->tdma_ant,
908 btdm->tdma_nav, btdm->tdma_dac_swing);
909
910 /* turn off pstdma */
911 rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw,
912 btdm->ignore_wlan_act);
913 /* Antenna control by PTA, 0x870 = 0x300. */
914 rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
915 }
916
917 /* Note:
918 * We should add delay for making sure sw DacSwing can be set
919 * sucessfully. Because of that rtl8723ae_dm_bt_set_fw_2_ant_hid()
920 * and rtl8723ae_dm_bt_set_fw_tdma_ctrl()
921 * will overwrite the reg 0x880.
922 */
923 mdelay(30);
924 rtl8723ae_dm_bt_set_sw_full_time_dac_swing(hw,
925 btdm->sw_dac_swing_on, btdm->sw_dac_swing_lvl);
926 rtl8723ae_dm_bt_set_fw_dec_bt_pwr(hw, btdm->dec_bt_pwr);
927}
928
929/*============================================================
930 * extern function start with BTDM_
931 *============================================================
932 */
933static u32 rtl8723ae_dm_bt_tx_rx_couter_h(struct ieee80211_hw *hw)
934{
935 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
936 u32 counters = 0;
937
938 counters = rtlhal->hal_coex_8723.high_priority_tx +
939 rtlhal->hal_coex_8723.high_priority_rx;
940 return counters;
941}
942
943static u32 rtl8723ae_dm_bt_tx_rx_couter_l(struct ieee80211_hw *hw)
944{
945 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
946
947 return rtlhal->hal_coex_8723.low_priority_tx +
948 rtlhal->hal_coex_8723.low_priority_rx;
949}
950
951static u8 rtl8723ae_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw)
952{
953 struct rtl_priv *rtlpriv = rtl_priv(hw);
954 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
955 u32 bt_tx_rx_cnt = 0;
956 u8 bt_tx_rx_cnt_lvl = 0;
957
958 bt_tx_rx_cnt = rtl8723ae_dm_bt_tx_rx_couter_h(hw) +
959 rtl8723ae_dm_bt_tx_rx_couter_l(hw);
960 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
961 "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt);
962
963 rtlpcipriv->bt_coexist.cstate_h &=
964 ~(BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1 |
965 BT_COEX_STATE_BT_CNT_LEVEL_2);
966
967 if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) {
968 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
969 "[BTCoex], BT TxRx Counters at level 3\n");
970 bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3;
971 rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_3;
972 } else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) {
973 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
974 "[BTCoex], BT TxRx Counters at level 2\n");
975 bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2;
976 rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_2;
977 } else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) {
978 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
979 "[BTCoex], BT TxRx Counters at level 1\n");
980 bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1;
981 rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_1;
982 } else {
983 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
984 "[BTCoex], BT TxRx Counters at level 0\n");
985 bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0;
986 rtlpcipriv->bt_coexist.cstate_h |= BT_COEX_STATE_BT_CNT_LEVEL_0;
987 }
988 return bt_tx_rx_cnt_lvl;
989}
990
991static void rtl8723ae_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
992{
993 struct rtl_priv *rtlpriv = rtl_priv(hw);
994 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
995 struct rtl_phy *rtlphy = &(rtlpriv->phy);
996 struct btdm_8723 btdm8723;
997 u8 bt_rssi_state, bt_rssi_state1;
998 u8 bt_tx_rx_cnt_lvl;
999
1000 rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723);
1001
1002 btdm8723.rf_rx_lpf_shrink = true;
1003 btdm8723.low_penalty_rate_adaptive = true;
1004 btdm8723.reject_aggre_pkt = false;
1005
1006 bt_tx_rx_cnt_lvl = rtl8723ae_dm_bt_bt_tx_rx_counter_level(hw);
1007 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1008 "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
1009
1010 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
1011 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
1012 /* coex table */
1013 btdm8723.val_0x6c0 = 0x55555555;
1014 btdm8723.val_0x6c8 = 0xffff;
1015 btdm8723.val_0x6cc = 0x3;
1016
1017 /* sw mechanism */
1018 btdm8723.agc_table_en = false;
1019 btdm8723.adc_back_off_on = false;
1020 btdm8723.sw_dac_swing_on = false;
1021
1022 /* fw mechanism */
1023 btdm8723.ps_tdma_on = true;
1024 if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
1025 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1026 "[BTCoex], BT TxRx Counters >= 1400\n");
1027 btdm8723.ps_tdma_byte[0] = 0xa3;
1028 btdm8723.ps_tdma_byte[1] = 0x5;
1029 btdm8723.ps_tdma_byte[2] = 0x5;
1030 btdm8723.ps_tdma_byte[3] = 0x2;
1031 btdm8723.ps_tdma_byte[4] = 0x80;
1032 } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
1033 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1034 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
1035 btdm8723.ps_tdma_byte[0] = 0xa3;
1036 btdm8723.ps_tdma_byte[1] = 0xa;
1037 btdm8723.ps_tdma_byte[2] = 0xa;
1038 btdm8723.ps_tdma_byte[3] = 0x2;
1039 btdm8723.ps_tdma_byte[4] = 0x80;
1040 } else {
1041 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1042 "[BTCoex], BT TxRx Counters < 1200\n");
1043 btdm8723.ps_tdma_byte[0] = 0xa3;
1044 btdm8723.ps_tdma_byte[1] = 0xf;
1045 btdm8723.ps_tdma_byte[2] = 0xf;
1046 btdm8723.ps_tdma_byte[3] = 0x2;
1047 btdm8723.ps_tdma_byte[4] = 0x80;
1048 }
1049 } else {
1050 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1051 "HT20 or Legacy\n");
1052 bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2,
1053 47, 0);
1054 bt_rssi_state1 = rtl8723ae_dm_bt_check_coex_rssi_state1(hw, 2,
1055 27, 0);
1056
1057 /* coex table */
1058 btdm8723.val_0x6c0 = 0x55555555;
1059 btdm8723.val_0x6c8 = 0xffff;
1060 btdm8723.val_0x6cc = 0x3;
1061
1062 /* sw mechanism */
1063 if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
1064 (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
1065 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1066 "Wifi rssi high\n");
1067 btdm8723.agc_table_en = true;
1068 btdm8723.adc_back_off_on = true;
1069 btdm8723.sw_dac_swing_on = false;
1070 } else {
1071 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1072 "Wifi rssi low\n");
1073 btdm8723.agc_table_en = false;
1074 btdm8723.adc_back_off_on = false;
1075 btdm8723.sw_dac_swing_on = false;
1076 }
1077
1078 /* fw mechanism */
1079 btdm8723.ps_tdma_on = true;
1080 if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
1081 (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) {
1082 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1083 "Wifi rssi-1 high\n");
1084 /* only rssi high we need to do this,
1085 * when rssi low, the value will modified by fw
1086 */
1087 rtl_write_byte(rtlpriv, 0x883, 0x40);
1088 if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
1089 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1090 "[BTCoex], BT TxRx Counters >= 1400\n");
1091 btdm8723.ps_tdma_byte[0] = 0xa3;
1092 btdm8723.ps_tdma_byte[1] = 0x5;
1093 btdm8723.ps_tdma_byte[2] = 0x5;
1094 btdm8723.ps_tdma_byte[3] = 0x83;
1095 btdm8723.ps_tdma_byte[4] = 0x80;
1096 } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
1097 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1098 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
1099 btdm8723.ps_tdma_byte[0] = 0xa3;
1100 btdm8723.ps_tdma_byte[1] = 0xa;
1101 btdm8723.ps_tdma_byte[2] = 0xa;
1102 btdm8723.ps_tdma_byte[3] = 0x83;
1103 btdm8723.ps_tdma_byte[4] = 0x80;
1104 } else {
1105 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1106 "[BTCoex], BT TxRx Counters < 1200\n");
1107 btdm8723.ps_tdma_byte[0] = 0xa3;
1108 btdm8723.ps_tdma_byte[1] = 0xf;
1109 btdm8723.ps_tdma_byte[2] = 0xf;
1110 btdm8723.ps_tdma_byte[3] = 0x83;
1111 btdm8723.ps_tdma_byte[4] = 0x80;
1112 }
1113 } else {
1114 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1115 "Wifi rssi-1 low\n");
1116 if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
1117 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1118 "[BTCoex], BT TxRx Counters >= 1400\n");
1119 btdm8723.ps_tdma_byte[0] = 0xa3;
1120 btdm8723.ps_tdma_byte[1] = 0x5;
1121 btdm8723.ps_tdma_byte[2] = 0x5;
1122 btdm8723.ps_tdma_byte[3] = 0x2;
1123 btdm8723.ps_tdma_byte[4] = 0x80;
1124 } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
1125 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1126 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
1127 btdm8723.ps_tdma_byte[0] = 0xa3;
1128 btdm8723.ps_tdma_byte[1] = 0xa;
1129 btdm8723.ps_tdma_byte[2] = 0xa;
1130 btdm8723.ps_tdma_byte[3] = 0x2;
1131 btdm8723.ps_tdma_byte[4] = 0x80;
1132 } else {
1133 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1134 "[BTCoex], BT TxRx Counters < 1200\n");
1135 btdm8723.ps_tdma_byte[0] = 0xa3;
1136 btdm8723.ps_tdma_byte[1] = 0xf;
1137 btdm8723.ps_tdma_byte[2] = 0xf;
1138 btdm8723.ps_tdma_byte[3] = 0x2;
1139 btdm8723.ps_tdma_byte[4] = 0x80;
1140 }
1141 }
1142 }
1143
1144 if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw))
1145 btdm8723.dec_bt_pwr = true;
1146
1147 /* Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO */
1148
1149 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1150 "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
1151 rtlhal->hal_coex_8723.bt_inq_page_start_time,
1152 bt_tx_rx_cnt_lvl);
1153 if ((rtlhal->hal_coex_8723.bt_inq_page_start_time) ||
1154 (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) {
1155 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1156 "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
1157 btdm8723.ps_tdma_on = true;
1158 btdm8723.ps_tdma_byte[0] = 0xa3;
1159 btdm8723.ps_tdma_byte[1] = 0x5;
1160 btdm8723.ps_tdma_byte[2] = 0x5;
1161 btdm8723.ps_tdma_byte[3] = 0x2;
1162 btdm8723.ps_tdma_byte[4] = 0x80;
1163 }
1164
1165 if (rtl8723ae_dm_bt_is_coexist_state_changed(hw))
1166 rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723);
1167}
1168
1169static void rtl8723ae_dm_bt_2_ant_fta2dp(struct ieee80211_hw *hw)
1170{
1171 struct rtl_priv *rtlpriv = rtl_priv(hw);
1172 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1173 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1174 struct btdm_8723 btdm8723;
1175 u8 bt_rssi_state, bt_rssi_state1;
1176 u32 bt_tx_rx_cnt_lvl;
1177
1178 rtl8723ae_dm_bt_btdm_structure_reload(hw, &btdm8723);
1179 btdm8723.rf_rx_lpf_shrink = true;
1180 btdm8723.low_penalty_rate_adaptive = true;
1181 btdm8723.reject_aggre_pkt = false;
1182
1183 bt_tx_rx_cnt_lvl = rtl8723ae_dm_bt_bt_tx_rx_counter_level(hw);
1184
1185 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1186 "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
1187
1188 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
1189 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
1190 bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2,
1191 37, 0);
1192
1193 /* coex table */
1194 btdm8723.val_0x6c0 = 0x55555555;
1195 btdm8723.val_0x6c8 = 0xffff;
1196 btdm8723.val_0x6cc = 0x3;
1197
1198 /* sw mechanism */
1199 btdm8723.agc_table_en = false;
1200 btdm8723.adc_back_off_on = true;
1201 btdm8723.sw_dac_swing_on = false;
1202
1203 /* fw mechanism */
1204 btdm8723.ps_tdma_on = true;
1205 if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
1206 (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
1207 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1208 "Wifi rssi high\n");
1209 if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
1210 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1211 "[BTCoex], BT TxRx Counters >= 1400\n");
1212 btdm8723.ps_tdma_byte[0] = 0xa3;
1213 btdm8723.ps_tdma_byte[1] = 0x5;
1214 btdm8723.ps_tdma_byte[2] = 0x5;
1215 btdm8723.ps_tdma_byte[3] = 0x81;
1216 btdm8723.ps_tdma_byte[4] = 0x80;
1217 } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
1218 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1219 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
1220 btdm8723.ps_tdma_byte[0] = 0xa3;
1221 btdm8723.ps_tdma_byte[1] = 0xa;
1222 btdm8723.ps_tdma_byte[2] = 0xa;
1223 btdm8723.ps_tdma_byte[3] = 0x81;
1224 btdm8723.ps_tdma_byte[4] = 0x80;
1225 } else {
1226 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1227 "[BTCoex], BT TxRx Counters < 1200\n");
1228 btdm8723.ps_tdma_byte[0] = 0xa3;
1229 btdm8723.ps_tdma_byte[1] = 0xf;
1230 btdm8723.ps_tdma_byte[2] = 0xf;
1231 btdm8723.ps_tdma_byte[3] = 0x81;
1232 btdm8723.ps_tdma_byte[4] = 0x80;
1233 }
1234 } else {
1235 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1236 "Wifi rssi low\n");
1237 if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
1238 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1239 "[BTCoex], BT TxRx Counters >= 1400\n");
1240 btdm8723.ps_tdma_byte[0] = 0xa3;
1241 btdm8723.ps_tdma_byte[1] = 0x5;
1242 btdm8723.ps_tdma_byte[2] = 0x5;
1243 btdm8723.ps_tdma_byte[3] = 0x0;
1244 btdm8723.ps_tdma_byte[4] = 0x80;
1245 } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
1246 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1247 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
1248 btdm8723.ps_tdma_byte[0] = 0xa3;
1249 btdm8723.ps_tdma_byte[1] = 0xa;
1250 btdm8723.ps_tdma_byte[2] = 0xa;
1251 btdm8723.ps_tdma_byte[3] = 0x0;
1252 btdm8723.ps_tdma_byte[4] = 0x80;
1253 } else {
1254 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1255 "[BTCoex], BT TxRx Counters < 1200\n");
1256 btdm8723.ps_tdma_byte[0] = 0xa3;
1257 btdm8723.ps_tdma_byte[1] = 0xf;
1258 btdm8723.ps_tdma_byte[2] = 0xf;
1259 btdm8723.ps_tdma_byte[3] = 0x0;
1260 btdm8723.ps_tdma_byte[4] = 0x80;
1261 }
1262 }
1263 } else {
1264 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1265 "HT20 or Legacy\n");
1266 bt_rssi_state = rtl8723ae_dm_bt_check_coex_rssi_state(hw, 2,
1267 47, 0);
1268 bt_rssi_state1 = rtl8723ae_dm_bt_check_coex_rssi_state1(hw, 2,
1269 27, 0);
1270
1271 /* coex table */
1272 btdm8723.val_0x6c0 = 0x55555555;
1273 btdm8723.val_0x6c8 = 0xffff;
1274 btdm8723.val_0x6cc = 0x3;
1275
1276 /* sw mechanism */
1277 if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
1278 (bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
1279 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1280 "Wifi rssi high\n");
1281 btdm8723.agc_table_en = true;
1282 btdm8723.adc_back_off_on = true;
1283 btdm8723.sw_dac_swing_on = false;
1284 } else {
1285 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1286 "Wifi rssi low\n");
1287 btdm8723.agc_table_en = false;
1288 btdm8723.adc_back_off_on = false;
1289 btdm8723.sw_dac_swing_on = false;
1290 }
1291
1292 /* fw mechanism */
1293 btdm8723.ps_tdma_on = true;
1294 if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
1295 (bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) {
1296 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1297 "Wifi rssi-1 high\n");
1298 /* only rssi high we need to do this,
1299 * when rssi low, the value will modified by fw
1300 */
1301 rtl_write_byte(rtlpriv, 0x883, 0x40);
1302 if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
1303 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1304 "[BTCoex], BT TxRx Counters >= 1400\n");
1305 btdm8723.ps_tdma_byte[0] = 0xa3;
1306 btdm8723.ps_tdma_byte[1] = 0x5;
1307 btdm8723.ps_tdma_byte[2] = 0x5;
1308 btdm8723.ps_tdma_byte[3] = 0x81;
1309 btdm8723.ps_tdma_byte[4] = 0x80;
1310 } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
1311 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1312 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
1313 btdm8723.ps_tdma_byte[0] = 0xa3;
1314 btdm8723.ps_tdma_byte[1] = 0xa;
1315 btdm8723.ps_tdma_byte[2] = 0xa;
1316 btdm8723.ps_tdma_byte[3] = 0x81;
1317 btdm8723.ps_tdma_byte[4] = 0x80;
1318 } else {
1319 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1320 "[BTCoex], BT TxRx Counters < 1200\n");
1321 btdm8723.ps_tdma_byte[0] = 0xa3;
1322 btdm8723.ps_tdma_byte[1] = 0xf;
1323 btdm8723.ps_tdma_byte[2] = 0xf;
1324 btdm8723.ps_tdma_byte[3] = 0x81;
1325 btdm8723.ps_tdma_byte[4] = 0x80;
1326 }
1327 } else {
1328 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1329 "Wifi rssi-1 low\n");
1330 if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
1331 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1332 "[BTCoex], BT TxRx Counters >= 1400\n");
1333 btdm8723.ps_tdma_byte[0] = 0xa3;
1334 btdm8723.ps_tdma_byte[1] = 0x5;
1335 btdm8723.ps_tdma_byte[2] = 0x5;
1336 btdm8723.ps_tdma_byte[3] = 0x0;
1337 btdm8723.ps_tdma_byte[4] = 0x80;
1338 } else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
1339 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1340 "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
1341 btdm8723.ps_tdma_byte[0] = 0xa3;
1342 btdm8723.ps_tdma_byte[1] = 0xa;
1343 btdm8723.ps_tdma_byte[2] = 0xa;
1344 btdm8723.ps_tdma_byte[3] = 0x0;
1345 btdm8723.ps_tdma_byte[4] = 0x80;
1346 } else {
1347 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1348 "[BTCoex], BT TxRx Counters < 1200\n");
1349 btdm8723.ps_tdma_byte[0] = 0xa3;
1350 btdm8723.ps_tdma_byte[1] = 0xf;
1351 btdm8723.ps_tdma_byte[2] = 0xf;
1352 btdm8723.ps_tdma_byte[3] = 0x0;
1353 btdm8723.ps_tdma_byte[4] = 0x80;
1354 }
1355 }
1356 }
1357
1358 if (rtl8723ae_dm_bt_need_to_dec_bt_pwr(hw))
1359 btdm8723.dec_bt_pwr = true;
1360
1361 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1362 "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
1363 rtlhal->hal_coex_8723.bt_inq_page_start_time,
1364 bt_tx_rx_cnt_lvl);
1365
1366 if ((rtlhal->hal_coex_8723.bt_inq_page_start_time) ||
1367 (BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) {
1368 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1369 "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
1370 btdm8723.ps_tdma_on = true;
1371 btdm8723.ps_tdma_byte[0] = 0xa3;
1372 btdm8723.ps_tdma_byte[1] = 0x5;
1373 btdm8723.ps_tdma_byte[2] = 0x5;
1374 btdm8723.ps_tdma_byte[3] = 0x83;
1375 btdm8723.ps_tdma_byte[4] = 0x80;
1376 }
1377
1378 if (rtl8723ae_dm_bt_is_coexist_state_changed(hw))
1379 rtl8723ae_dm_bt_set_bt_dm(hw, &btdm8723);
1380}
1381
1382static void rtl8723ae_dm_bt_inq_page_monitor(struct ieee80211_hw *hw)
1383{
1384 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1385 struct rtl_priv *rtlpriv = rtl_priv(hw);
1386 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1387 u32 cur_time = jiffies;
1388
1389 if (rtlhal->hal_coex_8723.c2h_bt_inquiry_page) {
1390 /* bt inquiry or page is started. */
1391 if (rtlhal->hal_coex_8723.bt_inq_page_start_time == 0) {
1392 rtlpcipriv->bt_coexist.cstate |=
1393 BT_COEX_STATE_BT_INQ_PAGE;
1394 rtlhal->hal_coex_8723.bt_inq_page_start_time = cur_time;
1395 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1396 "[BTCoex], BT Inquiry/page is started at time : 0x%x\n",
1397 rtlhal->hal_coex_8723.bt_inq_page_start_time);
1398 }
1399 }
1400 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1401 "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n",
1402 rtlhal->hal_coex_8723.bt_inq_page_start_time, cur_time);
1403
1404 if (rtlhal->hal_coex_8723.bt_inq_page_start_time) {
1405 if ((((long)cur_time -
1406 (long)rtlhal->hal_coex_8723.bt_inq_page_start_time) / HZ) >=
1407 10) {
1408 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1409 "[BTCoex], BT Inquiry/page >= 10sec!!!");
1410 rtlhal->hal_coex_8723.bt_inq_page_start_time = 0;
1411 rtlpcipriv->bt_coexist.cstate &=
1412 ~BT_COEX_STATE_BT_INQ_PAGE;
1413 }
1414 }
1415}
1416
1417static void rtl8723ae_dm_bt_reset_action_profile_state(struct ieee80211_hw *hw)
1418{
1419 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1420
1421 rtlpcipriv->bt_coexist.cstate &=
1422 ~(BT_COEX_STATE_PROFILE_HID | BT_COEX_STATE_PROFILE_A2DP |
1423 BT_COEX_STATE_PROFILE_PAN | BT_COEX_STATE_PROFILE_SCO);
1424
1425 rtlpcipriv->bt_coexist.cstate &=
1426 ~(BT_COEX_STATE_BTINFO_COMMON |
1427 BT_COEX_STATE_BTINFO_B_HID_SCOESCO |
1428 BT_COEX_STATE_BTINFO_B_FTP_A2DP);
1429}
1430
1431static void _rtl8723ae_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
1432{
1433 struct rtl_priv *rtlpriv = rtl_priv(hw);
1434 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1435 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1436 u8 bt_retry_cnt;
1437 u8 bt_info_original;
1438 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1439 "[BTCoex] Get bt info by fw!!\n");
1440
1441 _rtl8723_dm_bt_check_wifi_state(hw);
1442
1443 if (rtlhal->hal_coex_8723.c2h_bt_info_req_sent) {
1444 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1445 "[BTCoex] c2h for btInfo not rcvd yet!!\n");
1446 }
1447
1448 bt_retry_cnt = rtlhal->hal_coex_8723.bt_retry_cnt;
1449 bt_info_original = rtlhal->hal_coex_8723.c2h_bt_info_original;
1450
1451 /* when bt inquiry or page scan, we have to set h2c 0x25
1452 * ignore wlanact for continuous 4x2secs
1453 */
1454 rtl8723ae_dm_bt_inq_page_monitor(hw);
1455 rtl8723ae_dm_bt_reset_action_profile_state(hw);
1456
1457 if (rtl8723ae_dm_bt_is_2_ant_common_action(hw)) {
1458 rtlpcipriv->bt_coexist.bt_profile_case = BT_COEX_MECH_COMMON;
1459 rtlpcipriv->bt_coexist.bt_profile_action = BT_COEX_MECH_COMMON;
1460
1461 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1462 "Action 2-Ant common.\n");
1463 } else {
1464 if ((bt_info_original & BTINFO_B_HID) ||
1465 (bt_info_original & BTINFO_B_SCO_BUSY) ||
1466 (bt_info_original & BTINFO_B_SCO_ESCO)) {
1467 rtlpcipriv->bt_coexist.cstate |=
1468 BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
1469 rtlpcipriv->bt_coexist.bt_profile_case =
1470 BT_COEX_MECH_HID_SCO_ESCO;
1471 rtlpcipriv->bt_coexist.bt_profile_action =
1472 BT_COEX_MECH_HID_SCO_ESCO;
1473 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1474 "[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n");
1475 rtl8723ae_dm_bt_2_ant_hid_sco_esco(hw);
1476 } else if ((bt_info_original & BTINFO_B_FTP) ||
1477 (bt_info_original & BTINFO_B_A2DP)) {
1478 rtlpcipriv->bt_coexist.cstate |=
1479 BT_COEX_STATE_BTINFO_B_FTP_A2DP;
1480 rtlpcipriv->bt_coexist.bt_profile_case =
1481 BT_COEX_MECH_FTP_A2DP;
1482 rtlpcipriv->bt_coexist.bt_profile_action =
1483 BT_COEX_MECH_FTP_A2DP;
1484 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1485 "BTInfo: bFTP|bA2DP\n");
1486 rtl8723ae_dm_bt_2_ant_fta2dp(hw);
1487 } else {
1488 rtlpcipriv->bt_coexist.cstate |=
1489 BT_COEX_STATE_BTINFO_B_HID_SCOESCO;
1490 rtlpcipriv->bt_coexist.bt_profile_case =
1491 BT_COEX_MECH_NONE;
1492 rtlpcipriv->bt_coexist.bt_profile_action =
1493 BT_COEX_MECH_NONE;
1494 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1495 "[BTCoex], BTInfo: undefined case!!!!\n");
1496 rtl8723ae_dm_bt_2_ant_hid_sco_esco(hw);
1497 }
1498 }
1499}
1500
1501static void _rtl8723ae_dm_bt_coexist_1_ant(struct ieee80211_hw *hw)
1502{
1503}
1504
1505void rtl8723ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw)
1506{
1507 rtl8723ae_dm_bt_set_coex_table(hw, 0x5a5aaaaa, 0xcc, 0x3);
1508 rtl8723ae_dm_bt_set_hw_pta_mode(hw, true);
1509}
1510
1511void rtl8723ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw)
1512{
1513 rtl8723ae_dm_bt_set_fw_ignore_wlan_act(hw, false);
1514 rtl8723ae_dm_bt_set_fw_3a(hw, 0x0, 0x0, 0x0, 0x8, 0x0);
1515 rtl8723ae_dm_bt_set_fw_2_ant_hid(hw, false, false);
1516 rtl8723ae_dm_bt_set_fw_tra_tdma_ctrl(hw, false,
1517 TDMA_2ANT, TDMA_NAV_OFF);
1518 rtl8723ae_dm_bt_set_fw_tdma_ctrl(hw, false, TDMA_2ANT,
1519 TDMA_NAV_OFF, TDMA_DAC_SWING_OFF);
1520 rtl8723ae_dm_bt_set_fw_dac_swing_level(hw, 0);
1521 rtl8723ae_dm_bt_set_fw_bt_hid_info(hw, false);
1522 rtl8723ae_dm_bt_set_fw_bt_retry_index(hw, 2);
1523 rtl8723ae_dm_bt_set_fw_wlan_act(hw, 0x10, 0x10);
1524 rtl8723ae_dm_bt_set_fw_dec_bt_pwr(hw, false);
1525}
1526
1527void rtl8723ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw)
1528{
1529 rtl8723ae_dm_bt_agc_table(hw, BT_AGCTABLE_OFF);
1530 rtl8723ae_dm_bt_bback_off_level(hw, BT_BB_BACKOFF_OFF);
1531 rtl8723ae_dm_bt_reject_ap_aggregated_packet(hw, false);
1532
1533 rtl8723ae_bt_set_penalty_tx_rate_adap(hw, BT_TX_RATE_ADAPTIVE_NORMAL);
1534 rtl8723ae_dm_bt_set_sw_rf_rx_lpf_corner(hw, BT_RF_RX_LPF_CORNER_RESUME);
1535 rtl8723ae_dm_bt_set_sw_full_time_dac_swing(hw, false, 0xc0);
1536}
1537
1538static void rtl8723ae_dm_bt_query_bt_information(struct ieee80211_hw *hw)
1539{
1540 struct rtl_priv *rtlpriv = rtl_priv(hw);
1541 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1542 u8 h2c_parameter[1] = {0};
1543
1544 rtlhal->hal_coex_8723.c2h_bt_info_req_sent = true;
1545
1546 h2c_parameter[0] |= BIT(0);
1547
1548 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1549 "Query Bt information, write 0x38 = 0x%x\n",
1550 h2c_parameter[0]);
1551
1552 rtl8723ae_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter);
1553}
1554
1555static void rtl8723ae_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw)
1556{
1557 struct rtl_priv *rtlpriv = rtl_priv(hw);
1558 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1559 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1560 u32 reg_htx_rx, reg_ltx_rx, u32_tmp;
1561 u32 reg_htx, reg_hrx, reg_ltx, reg_lrx;
1562
1563 reg_htx_rx = REG_HIGH_PRIORITY_TXRX;
1564 reg_ltx_rx = REG_LOW_PRIORITY_TXRX;
1565
1566 u32_tmp = rtl_read_dword(rtlpriv, reg_htx_rx);
1567 reg_htx = u32_tmp & MASKLWORD;
1568 reg_hrx = (u32_tmp & MASKHWORD)>>16;
1569
1570 u32_tmp = rtl_read_dword(rtlpriv, reg_ltx_rx);
1571 reg_ltx = u32_tmp & MASKLWORD;
1572 reg_lrx = (u32_tmp & MASKHWORD)>>16;
1573
1574 if (rtlpcipriv->bt_coexist.lps_counter > 1) {
1575 reg_htx %= rtlpcipriv->bt_coexist.lps_counter;
1576 reg_hrx %= rtlpcipriv->bt_coexist.lps_counter;
1577 reg_ltx %= rtlpcipriv->bt_coexist.lps_counter;
1578 reg_lrx %= rtlpcipriv->bt_coexist.lps_counter;
1579 }
1580
1581 rtlhal->hal_coex_8723.high_priority_tx = reg_htx;
1582 rtlhal->hal_coex_8723.high_priority_rx = reg_hrx;
1583 rtlhal->hal_coex_8723.low_priority_tx = reg_ltx;
1584 rtlhal->hal_coex_8723.low_priority_rx = reg_lrx;
1585
1586 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1587 "High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
1588 reg_htx_rx, reg_htx, reg_htx, reg_hrx, reg_hrx);
1589 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1590 "Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
1591 reg_ltx_rx, reg_ltx, reg_ltx, reg_lrx, reg_lrx);
1592 rtlpcipriv->bt_coexist.lps_counter = 0;
1593}
1594
1595static void rtl8723ae_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw)
1596{
1597 struct rtl_priv *rtlpriv = rtl_priv(hw);
1598 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1599 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1600 bool bt_alife = true;
1601
1602 if (rtlhal->hal_coex_8723.high_priority_tx == 0 &&
1603 rtlhal->hal_coex_8723.high_priority_rx == 0 &&
1604 rtlhal->hal_coex_8723.low_priority_tx == 0 &&
1605 rtlhal->hal_coex_8723.low_priority_rx == 0)
1606 bt_alife = false;
1607 if (rtlhal->hal_coex_8723.high_priority_tx == 0xeaea &&
1608 rtlhal->hal_coex_8723.high_priority_rx == 0xeaea &&
1609 rtlhal->hal_coex_8723.low_priority_tx == 0xeaea &&
1610 rtlhal->hal_coex_8723.low_priority_rx == 0xeaea)
1611 bt_alife = false;
1612 if (rtlhal->hal_coex_8723.high_priority_tx == 0xffff &&
1613 rtlhal->hal_coex_8723.high_priority_rx == 0xffff &&
1614 rtlhal->hal_coex_8723.low_priority_tx == 0xffff &&
1615 rtlhal->hal_coex_8723.low_priority_rx == 0xffff)
1616 bt_alife = false;
1617 if (bt_alife) {
1618 rtlpcipriv->bt_coexist.bt_active_zero_cnt = 0;
1619 rtlpcipriv->bt_coexist.cur_bt_disabled = false;
1620 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1621 "8723A BT is enabled !!\n");
1622 } else {
1623 rtlpcipriv->bt_coexist.bt_active_zero_cnt++;
1624 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1625 "8723A bt all counters = 0, %d times!!\n",
1626 rtlpcipriv->bt_coexist.bt_active_zero_cnt);
1627 if (rtlpcipriv->bt_coexist.bt_active_zero_cnt >= 2) {
1628 rtlpcipriv->bt_coexist.cur_bt_disabled = true;
1629 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1630 "8723A BT is disabled !!\n");
1631 }
1632 }
1633 if (rtlpcipriv->bt_coexist.pre_bt_disabled !=
1634 rtlpcipriv->bt_coexist.cur_bt_disabled) {
1635 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1636 "8723A BT is from %s to %s!!\n",
1637 (rtlpcipriv->bt_coexist.pre_bt_disabled ?
1638 "disabled" : "enabled"),
1639 (rtlpcipriv->bt_coexist.cur_bt_disabled ?
1640 "disabled" : "enabled"));
1641 rtlpcipriv->bt_coexist.pre_bt_disabled
1642 = rtlpcipriv->bt_coexist.cur_bt_disabled;
1643 }
1644}
1645
1646
1647void rtl8723ae_dm_bt_coexist_8723(struct ieee80211_hw *hw)
1648{
1649 struct rtl_priv *rtlpriv = rtl_priv(hw);
1650 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1651
1652 rtl8723ae_dm_bt_query_bt_information(hw);
1653 rtl8723ae_dm_bt_bt_hw_counters_monitor(hw);
1654 rtl8723ae_dm_bt_bt_enable_disable_check(hw);
1655
1656 if (rtlpcipriv->bt_coexist.bt_ant_num == ANT_X2) {
1657 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1658 "[BTCoex], 2 Ant mechanism\n");
1659 _rtl8723ae_dm_bt_coexist_2_ant(hw);
1660 } else {
1661 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1662 "[BTCoex], 1 Ant mechanism\n");
1663 _rtl8723ae_dm_bt_coexist_1_ant(hw);
1664 }
1665
1666 if (!rtl8723ae_dm_bt_is_same_coexist_state(hw)) {
1667 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1668 "[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
1669 rtlpcipriv->bt_coexist.previous_state_h,
1670 rtlpcipriv->bt_coexist.previous_state,
1671 rtlpcipriv->bt_coexist.cstate_h,
1672 rtlpcipriv->bt_coexist.cstate);
1673 rtlpcipriv->bt_coexist.previous_state
1674 = rtlpcipriv->bt_coexist.cstate;
1675 rtlpcipriv->bt_coexist.previous_state_h
1676 = rtlpcipriv->bt_coexist.cstate_h;
1677 }
1678}
1679
1680static void rtl8723ae_dm_bt_parse_bt_info(struct ieee80211_hw *hw,
1681 u8 *tmbuf, u8 len)
1682{
1683 struct rtl_priv *rtlpriv = rtl_priv(hw);
1684 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1685 struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
1686 u8 bt_info;
1687 u8 i;
1688
1689 rtlhal->hal_coex_8723.c2h_bt_info_req_sent = false;
1690 rtlhal->hal_coex_8723.bt_retry_cnt = 0;
1691 for (i = 0; i < len; i++) {
1692 if (i == 0)
1693 rtlhal->hal_coex_8723.c2h_bt_info_original = tmbuf[i];
1694 else if (i == 1)
1695 rtlhal->hal_coex_8723.bt_retry_cnt = tmbuf[i];
1696 if (i == len-1) {
1697 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1698 "0x%2x]", tmbuf[i]);
1699 } else {
1700 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
1701 "0x%2x, ", tmbuf[i]);
1702 }
1703 }
1704 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1705 "BT info bt_info (Data)= 0x%x\n",
1706 rtlhal->hal_coex_8723.c2h_bt_info_original);
1707 bt_info = rtlhal->hal_coex_8723.c2h_bt_info_original;
1708
1709 if (bt_info & BIT(2))
1710 rtlhal->hal_coex_8723.c2h_bt_inquiry_page = true;
1711 else
1712 rtlhal->hal_coex_8723.c2h_bt_inquiry_page = false;
1713
1714 if (bt_info & BTINFO_B_CONNECTION) {
1715 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1716 "[BTC2H], BTInfo: bConnect=true\n");
1717 rtlpcipriv->bt_coexist.bt_busy = true;
1718 rtlpcipriv->bt_coexist.cstate &= ~BT_COEX_STATE_BT_IDLE;
1719 } else {
1720 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
1721 "[BTC2H], BTInfo: bConnect=false\n");
1722 rtlpcipriv->bt_coexist.bt_busy = false;
1723 rtlpcipriv->bt_coexist.cstate |= BT_COEX_STATE_BT_IDLE;
1724 }
1725}
1726void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw)
1727{
1728 struct rtl_priv *rtlpriv = rtl_priv(hw);
1729 struct c2h_evt_hdr c2h_event;
1730 u8 *ptmbuf;
1731 u8 index;
1732 u8 u1tmp;
1733
1734 memset(&c2h_event, 0, sizeof(c2h_event));
1735 u1tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL);
1736 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
1737 "&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1tmp);
1738 c2h_event.cmd_id = u1tmp & 0xF;
1739 c2h_event.cmd_len = (u1tmp & 0xF0) >> 4;
1740 c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1);
1741 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
1742 "cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
1743 c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq);
1744 u1tmp = rtl_read_byte(rtlpriv, 0x01AF);
1745 if (u1tmp == C2H_EVT_HOST_CLOSE) {
1746 return;
1747 } else if (u1tmp != C2H_EVT_FW_CLOSE) {
1748 rtl_write_byte(rtlpriv, 0x1AF, 0x00);
1749 return;
1750 }
1751 ptmbuf = kmalloc(c2h_event.cmd_len, GFP_KERNEL);
1752 if (ptmbuf == NULL) {
1753 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
1754 "malloc cmd buf failed\n");
1755 return;
1756 }
1757
1758 /* Read the content */
1759 for (index = 0; index < c2h_event.cmd_len; index++)
1760 ptmbuf[index] = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL +
1761 2 + index);
1762
1763 switch (c2h_event.cmd_id) {
1764 case C2H_BT_RSSI:
1765 break;
1766
1767 case C2H_BT_OP_MODE:
1768 break;
1769
1770 case BT_INFO:
1771 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
1772 "BT info Byte[0] (ID) is 0x%x\n", c2h_event.cmd_id);
1773 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
1774 "BT info Byte[1] (Seq) is 0x%x\n", c2h_event.cmd_seq);
1775 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
1776 "BT info Byte[2] (Data)= 0x%x\n", ptmbuf[0]);
1777
1778 rtl8723ae_dm_bt_parse_bt_info(hw, ptmbuf, c2h_event.cmd_len);
1779 break;
1780 default:
1781 break;
1782 }
1783 kfree(ptmbuf);
1784
1785 rtl_write_byte(rtlpriv, 0x01AF, C2H_EVT_HOST_CLOSE);
1786}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h
new file mode 100644
index 000000000000..4325ecd58f0c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hal_btc.h
@@ -0,0 +1,151 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 * Larry Finger <Larry.Finger@lwfinger.net>
26 *
27 ****************************************************************************
28 */
29
30#ifndef __RTL8723E_HAL_BTC_H__
31#define __RTL8723E_HAL_BTC_H__
32
33#include "../wifi.h"
34#include "btc.h"
35#include "hal_bt_coexist.h"
36
37#define BT_TXRX_CNT_THRES_1 1200
38#define BT_TXRX_CNT_THRES_2 1400
39#define BT_TXRX_CNT_THRES_3 3000
40#define BT_TXRX_CNT_LEVEL_0 0 /* < 1200 */
41#define BT_TXRX_CNT_LEVEL_1 1 /* >= 1200 && < 1400 */
42#define BT_TXRX_CNT_LEVEL_2 2 /* >= 1400 */
43#define BT_TXRX_CNT_LEVEL_3 3
44
45/* TDMA mode definition */
46#define TDMA_2ANT 0
47#define TDMA_1ANT 1
48#define TDMA_NAV_OFF 0
49#define TDMA_NAV_ON 1
50#define TDMA_DAC_SWING_OFF 0
51#define TDMA_DAC_SWING_ON 1
52
53/* PTA mode related definition */
54#define BT_PTA_MODE_OFF 0
55#define BT_PTA_MODE_ON 1
56
57/* Penalty Tx Rate Adaptive */
58#define BT_TX_RATE_ADAPTIVE_NORMAL 0
59#define BT_TX_RATE_ADAPTIVE_LOW_PENALTY 1
60
61/* RF Corner */
62#define BT_RF_RX_LPF_CORNER_RESUME 0
63#define BT_RF_RX_LPF_CORNER_SHRINK 1
64
65#define C2H_EVT_HOST_CLOSE 0x00
66#define C2H_EVT_FW_CLOSE 0xFF
67
68enum bt_traffic_mode {
69 BT_MOTOR_EXT_BE = 0x00,
70 BT_MOTOR_EXT_GUL = 0x01,
71 BT_MOTOR_EXT_GUB = 0x02,
72 BT_MOTOR_EXT_GULB = 0x03
73};
74
75enum bt_traffic_mode_profile {
76 BT_PROFILE_NONE,
77 BT_PROFILE_A2DP,
78 BT_PROFILE_PAN,
79 BT_PROFILE_HID,
80 BT_PROFILE_SCO
81};
82
83enum hci_ext_bt_operation {
84 HCI_BT_OP_NONE = 0x0,
85 HCI_BT_OP_INQUIRE_START = 0x1,
86 HCI_BT_OP_INQUIRE_FINISH = 0x2,
87 HCI_BT_OP_PAGING_START = 0x3,
88 HCI_BT_OP_PAGING_SUCCESS = 0x4,
89 HCI_BT_OP_PAGING_UNSUCCESS = 0x5,
90 HCI_BT_OP_PAIRING_START = 0x6,
91 HCI_BT_OP_PAIRING_FINISH = 0x7,
92 HCI_BT_OP_BT_DEV_ENABLE = 0x8,
93 HCI_BT_OP_BT_DEV_DISABLE = 0x9,
94 HCI_BT_OP_MAX,
95};
96
97enum bt_spec {
98 BT_SPEC_1_0_b = 0x00,
99 BT_SPEC_1_1 = 0x01,
100 BT_SPEC_1_2 = 0x02,
101 BT_SPEC_2_0_EDR = 0x03,
102 BT_SPEC_2_1_EDR = 0x04,
103 BT_SPEC_3_0_HS = 0x05,
104 BT_SPEC_4_0 = 0x06
105};
106
107struct c2h_evt_hdr {
108 u8 cmd_id;
109 u8 cmd_len;
110 u8 cmd_seq;
111};
112
113enum bt_state {
114 BT_INFO_STATE_DISABLED = 0,
115 BT_INFO_STATE_NO_CONNECTION = 1,
116 BT_INFO_STATE_CONNECT_IDLE = 2,
117 BT_INFO_STATE_INQ_OR_PAG = 3,
118 BT_INFO_STATE_ACL_ONLY_BUSY = 4,
119 BT_INFO_STATE_SCO_ONLY_BUSY = 5,
120 BT_INFO_STATE_ACL_SCO_BUSY = 6,
121 BT_INFO_STATE_HID_BUSY = 7,
122 BT_INFO_STATE_HID_SCO_BUSY = 8,
123 BT_INFO_STATE_MAX = 7
124};
125
126enum rtl8723ae_c2h_evt {
127 C2H_DBG = 0,
128 C2H_TSF = 1,
129 C2H_AP_RPT_RSP = 2,
130 C2H_CCX_TX_RPT = 3, /* The FW notify the report of the specific */
131 /* tx packet. */
132 C2H_BT_RSSI = 4,
133 C2H_BT_OP_MODE = 5,
134 C2H_HW_INFO_EXCH = 10,
135 C2H_C2H_H2C_TEST = 11,
136 BT_INFO = 12,
137 MAX_C2HEVENT
138};
139
140void rtl8723ae_dm_bt_fw_coex_all_off_8723a(struct ieee80211_hw *hw);
141void rtl8723ae_dm_bt_sw_coex_all_off_8723a(struct ieee80211_hw *hw);
142void rtl8723ae_dm_bt_hw_coex_all_off_8723a(struct ieee80211_hw *hw);
143void rtl8723ae_dm_bt_coexist_8723(struct ieee80211_hw *hw);
144void rtl8723ae_dm_bt_set_bt_dm(struct ieee80211_hw *hw,
145 struct btdm_8723 *p_btdm);
146void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw);
147void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw,
148 bool mstatus);
149void rtl8723ae_bt_coex_off_before_lps(struct ieee80211_hw *hw);
150
151#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
new file mode 100644
index 000000000000..0a8c03863fb2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
@@ -0,0 +1,2380 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../efuse.h"
32#include "../base.h"
33#include "../regd.h"
34#include "../cam.h"
35#include "../ps.h"
36#include "../pci.h"
37#include "reg.h"
38#include "def.h"
39#include "phy.h"
40#include "dm.h"
41#include "fw.h"
42#include "led.h"
43#include "hw.h"
44#include "pwrseqcmd.h"
45#include "pwrseq.h"
46#include "btc.h"
47
48static void _rtl8723ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
49 u8 set_bits, u8 clear_bits)
50{
51 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
52 struct rtl_priv *rtlpriv = rtl_priv(hw);
53
54 rtlpci->reg_bcn_ctrl_val |= set_bits;
55 rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
56
57 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
58}
59
60static void _rtl8723ae_stop_tx_beacon(struct ieee80211_hw *hw)
61{
62 struct rtl_priv *rtlpriv = rtl_priv(hw);
63 u8 tmp1byte;
64
65 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
66 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
67 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
68 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
69 tmp1byte &= ~(BIT(0));
70 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
71}
72
73static void _rtl8723ae_resume_tx_beacon(struct ieee80211_hw *hw)
74{
75 struct rtl_priv *rtlpriv = rtl_priv(hw);
76 u8 tmp1byte;
77
78 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
79 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
80 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
81 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
82 tmp1byte |= BIT(1);
83 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
84}
85
86static void _rtl8723ae_enable_bcn_sufunc(struct ieee80211_hw *hw)
87{
88 _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(1));
89}
90
91static void _rtl8723ae_disable_bcn_sufunc(struct ieee80211_hw *hw)
92{
93 _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(1), 0);
94}
95
96void rtl8723ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
97{
98 struct rtl_priv *rtlpriv = rtl_priv(hw);
99 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
100 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
101
102 switch (variable) {
103 case HW_VAR_RCR:
104 *((u32 *) (val)) = rtlpci->receive_config;
105 break;
106 case HW_VAR_RF_STATE:
107 *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
108 break;
109 case HW_VAR_FWLPS_RF_ON:{
110 enum rf_pwrstate rfState;
111 u32 val_rcr;
112
113 rtlpriv->cfg->ops->get_hw_reg(hw,
114 HW_VAR_RF_STATE,
115 (u8 *) (&rfState));
116 if (rfState == ERFOFF) {
117 *((bool *) (val)) = true;
118 } else {
119 val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
120 val_rcr &= 0x00070000;
121 if (val_rcr)
122 *((bool *) (val)) = false;
123 else
124 *((bool *) (val)) = true;
125 }
126 break; }
127 case HW_VAR_FW_PSMODE_STATUS:
128 *((bool *) (val)) = ppsc->fw_current_inpsmode;
129 break;
130 case HW_VAR_CORRECT_TSF:{
131 u64 tsf;
132 u32 *ptsf_low = (u32 *)&tsf;
133 u32 *ptsf_high = ((u32 *)&tsf) + 1;
134
135 *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
136 *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
137
138 *((u64 *) (val)) = tsf;
139
140 break; }
141 default:
142 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
143 "switch case not process\n");
144 break;
145 }
146}
147
148void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
149{
150 struct rtl_priv *rtlpriv = rtl_priv(hw);
151 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
152 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
153 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
154 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
155 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
156 u8 idx;
157
158 switch (variable) {
159 case HW_VAR_ETHER_ADDR:
160 for (idx = 0; idx < ETH_ALEN; idx++) {
161 rtl_write_byte(rtlpriv, (REG_MACID + idx),
162 val[idx]);
163 }
164 break;
165 case HW_VAR_BASIC_RATE:{
166 u16 rate_cfg = ((u16 *) val)[0];
167 u8 rate_index = 0;
168 rate_cfg = rate_cfg & 0x15f;
169 rate_cfg |= 0x01;
170 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
171 rtl_write_byte(rtlpriv, REG_RRSR + 1,
172 (rate_cfg >> 8) & 0xff);
173 while (rate_cfg > 0x1) {
174 rate_cfg = (rate_cfg >> 1);
175 rate_index++;
176 }
177 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
178 rate_index);
179 break; }
180 case HW_VAR_BSSID:
181 for (idx = 0; idx < ETH_ALEN; idx++) {
182 rtl_write_byte(rtlpriv, (REG_BSSID + idx),
183 val[idx]);
184 }
185 break;
186 case HW_VAR_SIFS:
187 rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
188 rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
189
190 rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
191 rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
192
193 if (!mac->ht_enable)
194 rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
195 0x0e0e);
196 else
197 rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
198 *((u16 *) val));
199 break;
200 case HW_VAR_SLOT_TIME:{
201 u8 e_aci;
202
203 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
204 "HW_VAR_SLOT_TIME %x\n", val[0]);
205
206 rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
207
208 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
209 rtlpriv->cfg->ops->set_hw_reg(hw,
210 HW_VAR_AC_PARAM,
211 (u8 *) (&e_aci));
212 }
213 break; }
214 case HW_VAR_ACK_PREAMBLE:{
215 u8 reg_tmp;
216 u8 short_preamble = (bool) (*(u8 *) val);
217 reg_tmp = (mac->cur_40_prime_sc) << 5;
218 if (short_preamble)
219 reg_tmp |= 0x80;
220
221 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
222 break; }
223 case HW_VAR_AMPDU_MIN_SPACE:{
224 u8 min_spacing_to_set;
225 u8 sec_min_space;
226
227 min_spacing_to_set = *((u8 *) val);
228 if (min_spacing_to_set <= 7) {
229 sec_min_space = 0;
230
231 if (min_spacing_to_set < sec_min_space)
232 min_spacing_to_set = sec_min_space;
233
234 mac->min_space_cfg = ((mac->min_space_cfg &
235 0xf8) |
236 min_spacing_to_set);
237
238 *val = min_spacing_to_set;
239
240 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
241 "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
242 mac->min_space_cfg);
243
244 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
245 mac->min_space_cfg);
246 }
247 break; }
248 case HW_VAR_SHORTGI_DENSITY:{
249 u8 density_to_set;
250
251 density_to_set = *((u8 *) val);
252 mac->min_space_cfg |= (density_to_set << 3);
253
254 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
255 "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
256 mac->min_space_cfg);
257
258 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
259 mac->min_space_cfg);
260
261 break; }
262 case HW_VAR_AMPDU_FACTOR:{
263 u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9};
264 u8 regtoset_bt[4] = {0x31, 0x74, 0x42, 0x97};
265 u8 factor_toset;
266 u8 *p_regtoset = NULL;
267 u8 index;
268
269 if ((pcipriv->bt_coexist.bt_coexistence) &&
270 (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
271 p_regtoset = regtoset_bt;
272 else
273 p_regtoset = regtoset_normal;
274
275 factor_toset = *((u8 *) val);
276 if (factor_toset <= 3) {
277 factor_toset = (1 << (factor_toset + 2));
278 if (factor_toset > 0xf)
279 factor_toset = 0xf;
280
281 for (index = 0; index < 4; index++) {
282 if ((p_regtoset[index] & 0xf0) >
283 (factor_toset << 4))
284 p_regtoset[index] =
285 (p_regtoset[index] & 0x0f) |
286 (factor_toset << 4);
287
288 if ((p_regtoset[index] & 0x0f) >
289 factor_toset)
290 p_regtoset[index] =
291 (p_regtoset[index] & 0xf0) |
292 (factor_toset);
293
294 rtl_write_byte(rtlpriv,
295 (REG_AGGLEN_LMT + index),
296 p_regtoset[index]);
297
298 }
299
300 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
301 "Set HW_VAR_AMPDU_FACTOR: %#x\n",
302 factor_toset);
303 }
304 break; }
305 case HW_VAR_AC_PARAM:{
306 u8 e_aci = *((u8 *) val);
307 rtl8723ae_dm_init_edca_turbo(hw);
308
309 if (rtlpci->acm_method != eAcmWay2_SW)
310 rtlpriv->cfg->ops->set_hw_reg(hw,
311 HW_VAR_ACM_CTRL,
312 (u8 *) (&e_aci));
313 break; }
314 case HW_VAR_ACM_CTRL:{
315 u8 e_aci = *((u8 *) val);
316 union aci_aifsn *p_aci_aifsn =
317 (union aci_aifsn *)(&(mac->ac[0].aifs));
318 u8 acm = p_aci_aifsn->f.acm;
319 u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
320
321 acm_ctrl |= ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
322
323 if (acm) {
324 switch (e_aci) {
325 case AC0_BE:
326 acm_ctrl |= AcmHw_BeqEn;
327 break;
328 case AC2_VI:
329 acm_ctrl |= AcmHw_ViqEn;
330 break;
331 case AC3_VO:
332 acm_ctrl |= AcmHw_VoqEn;
333 break;
334 default:
335 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
336 "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
337 acm);
338 break;
339 }
340 } else {
341 switch (e_aci) {
342 case AC0_BE:
343 acm_ctrl &= (~AcmHw_BeqEn);
344 break;
345 case AC2_VI:
346 acm_ctrl &= (~AcmHw_ViqEn);
347 break;
348 case AC3_VO:
349 acm_ctrl &= (~AcmHw_BeqEn);
350 break;
351 default:
352 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
353 "switch case not processed\n");
354 break;
355 }
356 }
357
358 RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
359 "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
360 acm_ctrl);
361 rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
362 break; }
363 case HW_VAR_RCR:
364 rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
365 rtlpci->receive_config = ((u32 *) (val))[0];
366 break;
367 case HW_VAR_RETRY_LIMIT:{
368 u8 retry_limit = ((u8 *) (val))[0];
369
370 rtl_write_word(rtlpriv, REG_RL,
371 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
372 retry_limit << RETRY_LIMIT_LONG_SHIFT);
373 break; }
374 case HW_VAR_DUAL_TSF_RST:
375 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
376 break;
377 case HW_VAR_EFUSE_BYTES:
378 rtlefuse->efuse_usedbytes = *((u16 *) val);
379 break;
380 case HW_VAR_EFUSE_USAGE:
381 rtlefuse->efuse_usedpercentage = *((u8 *) val);
382 break;
383 case HW_VAR_IO_CMD:
384 rtl8723ae_phy_set_io_cmd(hw, (*(enum io_type *)val));
385 break;
386 case HW_VAR_WPA_CONFIG:
387 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
388 break;
389 case HW_VAR_SET_RPWM:{
390 u8 rpwm_val;
391
392 rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
393 udelay(1);
394
395 if (rpwm_val & BIT(7)) {
396 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
397 (*(u8 *) val));
398 } else {
399 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
400 ((*(u8 *) val) | BIT(7)));
401 }
402
403 break; }
404 case HW_VAR_H2C_FW_PWRMODE:{
405 u8 psmode = (*(u8 *) val);
406
407 if (psmode != FW_PS_ACTIVE_MODE)
408 rtl8723ae_dm_rf_saving(hw, true);
409
410 rtl8723ae_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
411 break; }
412 case HW_VAR_FW_PSMODE_STATUS:
413 ppsc->fw_current_inpsmode = *((bool *) val);
414 break;
415 case HW_VAR_H2C_FW_JOINBSSRPT:{
416 u8 mstatus = (*(u8 *) val);
417 u8 tmp_regcr, tmp_reg422;
418 bool recover = false;
419
420 if (mstatus == RT_MEDIA_CONNECT) {
421 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, NULL);
422
423 tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
424 rtl_write_byte(rtlpriv, REG_CR + 1,
425 (tmp_regcr | BIT(0)));
426
427 _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
428 _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
429
430 tmp_reg422 = rtl_read_byte(rtlpriv,
431 REG_FWHW_TXQ_CTRL + 2);
432 if (tmp_reg422 & BIT(6))
433 recover = true;
434 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
435 tmp_reg422 & (~BIT(6)));
436
437 rtl8723ae_set_fw_rsvdpagepkt(hw, 0);
438
439 _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
440 _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
441
442 if (recover)
443 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
444 tmp_reg422);
445
446 rtl_write_byte(rtlpriv, REG_CR + 1,
447 (tmp_regcr & ~(BIT(0))));
448 }
449 rtl8723ae_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
450
451 break; }
452 case HW_VAR_AID:{
453 u16 u2btmp;
454 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
455 u2btmp &= 0xC000;
456 rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
457 mac->assoc_id));
458 break; }
459 case HW_VAR_CORRECT_TSF:{
460 u8 btype_ibss = ((u8 *) (val))[0];
461
462 if (btype_ibss == true)
463 _rtl8723ae_stop_tx_beacon(hw);
464
465 _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(3));
466
467 rtl_write_dword(rtlpriv, REG_TSFTR,
468 (u32) (mac->tsf & 0xffffffff));
469 rtl_write_dword(rtlpriv, REG_TSFTR + 4,
470 (u32) ((mac->tsf >> 32) & 0xffffffff));
471
472 _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(3), 0);
473
474 if (btype_ibss == true)
475 _rtl8723ae_resume_tx_beacon(hw);
476 break; }
477 default:
478 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
479 "switch case not processed\n");
480 break;
481 }
482}
483
484static bool _rtl8723ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
485{
486 struct rtl_priv *rtlpriv = rtl_priv(hw);
487 bool status = true;
488 long count = 0;
489 u32 value = _LLT_INIT_ADDR(address) |
490 _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
491
492 rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
493
494 do {
495 value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
496 if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
497 break;
498
499 if (count > POLLING_LLT_THRESHOLD) {
500 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
501 "Failed to polling write LLT done at address %d!\n",
502 address);
503 status = false;
504 break;
505 }
506 } while (++count);
507
508 return status;
509}
510
511static bool _rtl8723ae_llt_table_init(struct ieee80211_hw *hw)
512{
513 struct rtl_priv *rtlpriv = rtl_priv(hw);
514 unsigned short i;
515 u8 txpktbuf_bndy;
516 u8 maxPage;
517 bool status;
518 u8 ubyte;
519
520 maxPage = 255;
521 txpktbuf_bndy = 246;
522
523 rtl_write_byte(rtlpriv, REG_CR, 0x8B);
524
525 rtl_write_word(rtlpriv, REG_RQPN_NPQ, 0x0000);
526
527 rtl_write_dword(rtlpriv, REG_RQPN, 0x80ac1c29);
528 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x03);
529
530 rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x27FF0000 | txpktbuf_bndy));
531 rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
532
533 rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
534 rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
535
536 rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
537 rtl_write_byte(rtlpriv, REG_PBP, 0x11);
538 rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
539
540 for (i = 0; i < (txpktbuf_bndy - 1); i++) {
541 status = _rtl8723ae_llt_write(hw, i, i + 1);
542 if (true != status)
543 return status;
544 }
545
546 status = _rtl8723ae_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
547 if (true != status)
548 return status;
549
550 for (i = txpktbuf_bndy; i < maxPage; i++) {
551 status = _rtl8723ae_llt_write(hw, i, (i + 1));
552 if (true != status)
553 return status;
554 }
555
556 status = _rtl8723ae_llt_write(hw, maxPage, txpktbuf_bndy);
557 if (true != status)
558 return status;
559
560 rtl_write_byte(rtlpriv, REG_CR, 0xff);
561 ubyte = rtl_read_byte(rtlpriv, REG_RQPN + 3);
562 rtl_write_byte(rtlpriv, REG_RQPN + 3, ubyte | BIT(7));
563
564 return true;
565}
566
567static void _rtl8723ae_gen_refresh_led_state(struct ieee80211_hw *hw)
568{
569 struct rtl_priv *rtlpriv = rtl_priv(hw);
570 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
571 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
572 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
573
574 if (rtlpriv->rtlhal.up_first_time)
575 return;
576
577 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
578 rtl8723ae_sw_led_on(hw, pLed0);
579 else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
580 rtl8723ae_sw_led_on(hw, pLed0);
581 else
582 rtl8723ae_sw_led_off(hw, pLed0);
583}
584
585static bool _rtl8712e_init_mac(struct ieee80211_hw *hw)
586{
587 struct rtl_priv *rtlpriv = rtl_priv(hw);
588 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
589 unsigned char bytetmp;
590 unsigned short wordtmp;
591 u16 retry = 0;
592 u16 tmpu2b;
593 bool mac_func_enable;
594
595 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
596 bytetmp = rtl_read_byte(rtlpriv, REG_CR);
597 if (bytetmp == 0xFF)
598 mac_func_enable = true;
599 else
600 mac_func_enable = false;
601
602
603 /* HW Power on sequence */
604 if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
605 PWR_INTF_PCI_MSK, Rtl8723_NIC_ENABLE_FLOW))
606 return false;
607
608 bytetmp = rtl_read_byte(rtlpriv, REG_PCIE_CTRL_REG+2);
609 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG+2, bytetmp | BIT(4));
610
611 /* eMAC time out function enable, 0x369[7]=1 */
612 bytetmp = rtl_read_byte(rtlpriv, 0x369);
613 rtl_write_byte(rtlpriv, 0x369, bytetmp | BIT(7));
614
615 /* ePHY reg 0x1e bit[4]=1 using MDIO interface,
616 * we should do this before Enabling ASPM backdoor.
617 */
618 do {
619 rtl_write_word(rtlpriv, 0x358, 0x5e);
620 udelay(100);
621 rtl_write_word(rtlpriv, 0x356, 0xc280);
622 rtl_write_word(rtlpriv, 0x354, 0xc290);
623 rtl_write_word(rtlpriv, 0x358, 0x3e);
624 udelay(100);
625 rtl_write_word(rtlpriv, 0x358, 0x5e);
626 udelay(100);
627 tmpu2b = rtl_read_word(rtlpriv, 0x356);
628 retry++;
629 } while (tmpu2b != 0xc290 && retry < 100);
630
631 if (retry >= 100) {
632 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
633 "InitMAC(): ePHY configure fail!!!\n");
634 return false;
635 }
636
637 rtl_write_word(rtlpriv, REG_CR, 0x2ff);
638 rtl_write_word(rtlpriv, REG_CR + 1, 0x06);
639
640 if (!mac_func_enable) {
641 if (_rtl8723ae_llt_table_init(hw) == false)
642 return false;
643 }
644
645 rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
646 rtl_write_byte(rtlpriv, REG_HISRE, 0xff);
647
648 rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x27ff);
649
650 wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0xf;
651 wordtmp |= 0xF771;
652 rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
653
654 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
655 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
656 rtl_write_word(rtlpriv, REG_RXFLTMAP2, 0xFFFF);
657 rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
658
659 rtl_write_byte(rtlpriv, 0x4d0, 0x0);
660
661 rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
662 ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
663 DMA_BIT_MASK(32));
664 rtl_write_dword(rtlpriv, REG_MGQ_DESA,
665 (u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
666 DMA_BIT_MASK(32));
667 rtl_write_dword(rtlpriv, REG_VOQ_DESA,
668 (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
669 rtl_write_dword(rtlpriv, REG_VIQ_DESA,
670 (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
671 rtl_write_dword(rtlpriv, REG_BEQ_DESA,
672 (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
673 rtl_write_dword(rtlpriv, REG_BKQ_DESA,
674 (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
675 rtl_write_dword(rtlpriv, REG_HQ_DESA,
676 (u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
677 DMA_BIT_MASK(32));
678 rtl_write_dword(rtlpriv, REG_RX_DESA,
679 (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
680 DMA_BIT_MASK(32));
681
682 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x74);
683
684 rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
685
686 bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
687 rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
688 do {
689 retry++;
690 bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
691 } while ((retry < 200) && (bytetmp & BIT(7)));
692
693 _rtl8723ae_gen_refresh_led_state(hw);
694
695 rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
696
697 return true;
698}
699
700static void _rtl8723ae_hw_configure(struct ieee80211_hw *hw)
701{
702 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
703 struct rtl_priv *rtlpriv = rtl_priv(hw);
704 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
705 u8 reg_bw_opmode;
706 u32 reg_ratr, reg_prsr;
707
708 reg_bw_opmode = BW_OPMODE_20MHZ;
709 reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
710 RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
711 reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
712
713 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
714
715 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
716
717 rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
718
719 rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
720
721 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, 0x0);
722
723 rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F80);
724
725 rtl_write_word(rtlpriv, REG_RL, 0x0707);
726
727 rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x02012802);
728
729 rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
730
731 rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
732 rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
733 rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
734 rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
735
736 if ((pcipriv->bt_coexist.bt_coexistence) &&
737 (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
738 rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431);
739 else
740 rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
741
742 rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2);
743
744 rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
745
746 rtlpci->reg_bcn_ctrl_val = 0x1f;
747 rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val);
748
749 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
750
751 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
752
753 rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
754 rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
755
756 if ((pcipriv->bt_coexist.bt_coexistence) &&
757 (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
758 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
759 rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402);
760 } else {
761 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
762 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
763 }
764
765 if ((pcipriv->bt_coexist.bt_coexistence) &&
766 (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
767 rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
768 else
769 rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666);
770
771 rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
772
773 rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010);
774 rtl_write_word(rtlpriv, REG_MAC_SPEC_SIFS, 0x1010);
775
776 rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x1010);
777
778 rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010);
779
780 rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff);
781 rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff);
782
783 rtl_write_dword(rtlpriv, 0x394, 0x1);
784}
785
786static void _rtl8723ae_enable_aspm_back_door(struct ieee80211_hw *hw)
787{
788 struct rtl_priv *rtlpriv = rtl_priv(hw);
789 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
790
791 rtl_write_byte(rtlpriv, 0x34b, 0x93);
792 rtl_write_word(rtlpriv, 0x350, 0x870c);
793 rtl_write_byte(rtlpriv, 0x352, 0x1);
794
795 if (ppsc->support_backdoor)
796 rtl_write_byte(rtlpriv, 0x349, 0x1b);
797 else
798 rtl_write_byte(rtlpriv, 0x349, 0x03);
799
800 rtl_write_word(rtlpriv, 0x350, 0x2718);
801 rtl_write_byte(rtlpriv, 0x352, 0x1);
802}
803
804void rtl8723ae_enable_hw_security_config(struct ieee80211_hw *hw)
805{
806 struct rtl_priv *rtlpriv = rtl_priv(hw);
807 u8 sec_reg_value;
808
809 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
810 "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
811 rtlpriv->sec.pairwise_enc_algorithm,
812 rtlpriv->sec.group_enc_algorithm);
813
814 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
815 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
816 "not open hw encryption\n");
817 return;
818 }
819
820 sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
821
822 if (rtlpriv->sec.use_defaultkey) {
823 sec_reg_value |= SCR_TxUseDK;
824 sec_reg_value |= SCR_RxUseDK;
825 }
826
827 sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
828
829 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
830
831 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
832 "The SECR-value %x\n", sec_reg_value);
833
834 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
835
836}
837
838int rtl8723ae_hw_init(struct ieee80211_hw *hw)
839{
840 struct rtl_priv *rtlpriv = rtl_priv(hw);
841 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
842 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
843 struct rtl_phy *rtlphy = &(rtlpriv->phy);
844 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
845 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
846 bool rtstatus = true;
847 int err;
848 u8 tmp_u1b;
849
850 rtlpriv->rtlhal.being_init_adapter = true;
851 rtlpriv->intf_ops->disable_aspm(hw);
852 rtstatus = _rtl8712e_init_mac(hw);
853 if (rtstatus != true) {
854 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
855 err = 1;
856 return err;
857 }
858
859 err = rtl8723ae_download_fw(hw);
860 if (err) {
861 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
862 "Failed to download FW. Init HW without FW now..\n");
863 err = 1;
864 rtlhal->fw_ready = false;
865 return err;
866 } else {
867 rtlhal->fw_ready = true;
868 }
869
870 rtlhal->last_hmeboxnum = 0;
871 rtl8723ae_phy_mac_config(hw);
872 /* because the last function modifies RCR, we update
873 * rcr var here, or TP will be unstable as ther receive_config
874 * is wrong, RX RCR_ACRC32 will cause TP unstable & Rx
875 * RCR_APP_ICV will cause mac80211 unassoc for cisco 1252
876 */
877 rtlpci->receive_config = rtl_read_dword(rtlpriv, REG_RCR);
878 rtlpci->receive_config &= ~(RCR_ACRC32 | RCR_AICV);
879 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
880
881 rtl8723ae_phy_bb_config(hw);
882 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
883 rtl8723ae_phy_rf_config(hw);
884 if (IS_VENDOR_UMC_A_CUT(rtlhal->version)) {
885 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255);
886 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00);
887 } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
888 rtl_set_rfreg(hw, RF90_PATH_A, 0x0C, MASKDWORD, 0x894AE);
889 rtl_set_rfreg(hw, RF90_PATH_A, 0x0A, MASKDWORD, 0x1AF31);
890 rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, MASKDWORD, 0x8F425);
891 rtl_set_rfreg(hw, RF90_PATH_A, RF_SYN_G2, MASKDWORD, 0x4F200);
892 rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK1, MASKDWORD, 0x44053);
893 rtl_set_rfreg(hw, RF90_PATH_A, RF_RCK2, MASKDWORD, 0x80201);
894 }
895 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
896 RF_CHNLBW, RFREG_OFFSET_MASK);
897 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
898 RF_CHNLBW, RFREG_OFFSET_MASK);
899 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
900 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
901 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
902 _rtl8723ae_hw_configure(hw);
903 rtl_cam_reset_all_entry(hw);
904 rtl8723ae_enable_hw_security_config(hw);
905
906 ppsc->rfpwr_state = ERFON;
907
908 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
909 _rtl8723ae_enable_aspm_back_door(hw);
910 rtlpriv->intf_ops->enable_aspm(hw);
911
912 rtl8723ae_bt_hw_init(hw);
913
914 if (ppsc->rfpwr_state == ERFON) {
915 rtl8723ae_phy_set_rfpath_switch(hw, 1);
916 if (rtlphy->iqk_initialized) {
917 rtl8723ae_phy_iq_calibrate(hw, true);
918 } else {
919 rtl8723ae_phy_iq_calibrate(hw, false);
920 rtlphy->iqk_initialized = true;
921 }
922
923 rtl8723ae_phy_lc_calibrate(hw);
924 }
925
926 tmp_u1b = efuse_read_1byte(hw, 0x1FA);
927 if (!(tmp_u1b & BIT(0))) {
928 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
929 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
930 }
931
932 if (!(tmp_u1b & BIT(4))) {
933 tmp_u1b = rtl_read_byte(rtlpriv, 0x16) & 0x0F;
934 rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
935 udelay(10);
936 rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
937 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
938 }
939 rtl8723ae_dm_init(hw);
940 rtlpriv->rtlhal.being_init_adapter = false;
941 return err;
942}
943
944static enum version_8723e _rtl8723ae_read_chip_version(struct ieee80211_hw *hw)
945{
946 struct rtl_priv *rtlpriv = rtl_priv(hw);
947 struct rtl_phy *rtlphy = &(rtlpriv->phy);
948 enum version_8723e version = 0x0000;
949 u32 value32;
950
951 value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
952 if (value32 & TRP_VAUX_EN) {
953 version = (enum version_8723e)(version |
954 ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0));
955 /* RTL8723 with BT function. */
956 version = (enum version_8723e)(version |
957 ((value32 & BT_FUNC) ? CHIP_8723 : 0));
958
959 } else {
960 /* Normal mass production chip. */
961 version = (enum version_8723e) NORMAL_CHIP;
962 version = (enum version_8723e)(version |
963 ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0));
964 /* RTL8723 with BT function. */
965 version = (enum version_8723e)(version |
966 ((value32 & BT_FUNC) ? CHIP_8723 : 0));
967 if (IS_CHIP_VENDOR_UMC(version))
968 version = (enum version_8723e)(version |
969 ((value32 & CHIP_VER_RTL_MASK)));/* IC version (CUT) */
970 if (IS_8723_SERIES(version)) {
971 value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
972 /* ROM code version */
973 version = (enum version_8723e)(version |
974 ((value32 & RF_RL_ID)>>20));
975 }
976 }
977
978 if (IS_8723_SERIES(version)) {
979 value32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
980 rtlphy->polarity_ctl = ((value32 & WL_HWPDN_SL) ?
981 RT_POLARITY_HIGH_ACT :
982 RT_POLARITY_LOW_ACT);
983 }
984 switch (version) {
985 case VERSION_TEST_UMC_CHIP_8723:
986 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
987 "Chip Version ID: VERSION_TEST_UMC_CHIP_8723.\n");
988 break;
989 case VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT:
990 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
991 "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT.\n");
992 break;
993 case VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT:
994 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
995 "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n");
996 break;
997 default:
998 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
999 "Chip Version ID: Unknown. Bug?\n");
1000 break;
1001 }
1002
1003 if (IS_8723_SERIES(version))
1004 rtlphy->rf_type = RF_1T1R;
1005
1006 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
1007 (rtlphy->rf_type == RF_2T2R) ? "RF_2T2R" : "RF_1T1R");
1008
1009 return version;
1010}
1011
1012static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw,
1013 enum nl80211_iftype type)
1014{
1015 struct rtl_priv *rtlpriv = rtl_priv(hw);
1016 u8 bt_msr = rtl_read_byte(rtlpriv, MSR) & 0xfc;
1017 enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
1018
1019 rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
1020 RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
1021 "clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
1022
1023 if (type == NL80211_IFTYPE_UNSPECIFIED ||
1024 type == NL80211_IFTYPE_STATION) {
1025 _rtl8723ae_stop_tx_beacon(hw);
1026 _rtl8723ae_enable_bcn_sufunc(hw);
1027 } else if (type == NL80211_IFTYPE_ADHOC ||
1028 type == NL80211_IFTYPE_AP) {
1029 _rtl8723ae_resume_tx_beacon(hw);
1030 _rtl8723ae_disable_bcn_sufunc(hw);
1031 } else {
1032 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1033 "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
1034 type);
1035 }
1036
1037 switch (type) {
1038 case NL80211_IFTYPE_UNSPECIFIED:
1039 bt_msr |= MSR_NOLINK;
1040 ledaction = LED_CTL_LINK;
1041 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1042 "Set Network type to NO LINK!\n");
1043 break;
1044 case NL80211_IFTYPE_ADHOC:
1045 bt_msr |= MSR_ADHOC;
1046 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1047 "Set Network type to Ad Hoc!\n");
1048 break;
1049 case NL80211_IFTYPE_STATION:
1050 bt_msr |= MSR_INFRA;
1051 ledaction = LED_CTL_LINK;
1052 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1053 "Set Network type to STA!\n");
1054 break;
1055 case NL80211_IFTYPE_AP:
1056 bt_msr |= MSR_AP;
1057 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1058 "Set Network type to AP!\n");
1059 break;
1060 default:
1061 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1062 "Network type %d not supported!\n",
1063 type);
1064 return 1;
1065 break;
1066
1067 }
1068
1069 rtl_write_byte(rtlpriv, (MSR), bt_msr);
1070 rtlpriv->cfg->ops->led_control(hw, ledaction);
1071 if ((bt_msr & 0x03) == MSR_AP)
1072 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
1073 else
1074 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
1075 return 0;
1076}
1077
1078void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1079{
1080 struct rtl_priv *rtlpriv = rtl_priv(hw);
1081 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1082 u32 reg_rcr = rtlpci->receive_config;
1083
1084 if (rtlpriv->psc.rfpwr_state != ERFON)
1085 return;
1086
1087 if (check_bssid == true) {
1088 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1089 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1090 (u8 *)(&reg_rcr));
1091 _rtl8723ae_set_bcn_ctrl_reg(hw, 0, BIT(4));
1092 } else if (check_bssid == false) {
1093 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1094 _rtl8723ae_set_bcn_ctrl_reg(hw, BIT(4), 0);
1095 rtlpriv->cfg->ops->set_hw_reg(hw,
1096 HW_VAR_RCR, (u8 *) (&reg_rcr));
1097 }
1098}
1099
1100int rtl8723ae_set_network_type(struct ieee80211_hw *hw,
1101 enum nl80211_iftype type)
1102{
1103 struct rtl_priv *rtlpriv = rtl_priv(hw);
1104
1105 if (_rtl8723ae_set_media_status(hw, type))
1106 return -EOPNOTSUPP;
1107
1108 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1109 if (type != NL80211_IFTYPE_AP)
1110 rtl8723ae_set_check_bssid(hw, true);
1111 } else {
1112 rtl8723ae_set_check_bssid(hw, false);
1113 }
1114 return 0;
1115}
1116
1117/* don't set REG_EDCA_BE_PARAM here because mac80211 will send pkt when scan */
1118void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci)
1119{
1120 struct rtl_priv *rtlpriv = rtl_priv(hw);
1121
1122 rtl8723ae_dm_init_edca_turbo(hw);
1123 switch (aci) {
1124 case AC1_BK:
1125 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
1126 break;
1127 case AC0_BE:
1128 /* rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4ac_param); */
1129 break;
1130 case AC2_VI:
1131 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
1132 break;
1133 case AC3_VO:
1134 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
1135 break;
1136 default:
1137 RT_ASSERT(false, "invalid aci: %d !\n", aci);
1138 break;
1139 }
1140}
1141
1142void rtl8723ae_enable_interrupt(struct ieee80211_hw *hw)
1143{
1144 struct rtl_priv *rtlpriv = rtl_priv(hw);
1145 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1146
1147 rtl_write_dword(rtlpriv, 0x3a8, rtlpci->irq_mask[0] & 0xFFFFFFFF);
1148 rtl_write_dword(rtlpriv, 0x3ac, rtlpci->irq_mask[1] & 0xFFFFFFFF);
1149 rtlpci->irq_enabled = true;
1150}
1151
1152void rtl8723ae_disable_interrupt(struct ieee80211_hw *hw)
1153{
1154 struct rtl_priv *rtlpriv = rtl_priv(hw);
1155 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1156
1157 rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
1158 rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
1159 rtlpci->irq_enabled = false;
1160 synchronize_irq(rtlpci->pdev->irq);
1161}
1162
1163static void _rtl8723ae_poweroff_adapter(struct ieee80211_hw *hw)
1164{
1165 struct rtl_priv *rtlpriv = rtl_priv(hw);
1166 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1167 u8 u1tmp;
1168
1169 /* Combo (PCIe + USB) Card and PCIe-MF Card */
1170 /* 1. Run LPS WL RFOFF flow */
1171 rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
1172 PWR_INTF_PCI_MSK, Rtl8723_NIC_LPS_ENTER_FLOW);
1173
1174 /* 2. 0x1F[7:0] = 0 */
1175 /* turn off RF */
1176 rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
1177 if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready)
1178 rtl8723ae_firmware_selfreset(hw);
1179
1180 /* Reset MCU. Suggested by Filen. */
1181 u1tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
1182 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1tmp & (~BIT(2))));
1183
1184 /* g. MCUFWDL 0x80[1:0]=0 */
1185 /* reset MCU ready status */
1186 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
1187
1188 /* HW card disable configuration. */
1189 rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
1190 PWR_INTF_PCI_MSK, Rtl8723_NIC_DISABLE_FLOW);
1191
1192 /* Reset MCU IO Wrapper */
1193 u1tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
1194 rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, (u1tmp & (~BIT(0))));
1195 u1tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL + 1);
1196 rtl_write_byte(rtlpriv, REG_RSV_CTRL + 1, u1tmp | BIT(0));
1197
1198 /* 7. RSV_CTRL 0x1C[7:0] = 0x0E */
1199 /* lock ISO/CLK/Power control register */
1200 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
1201}
1202
1203void rtl8723ae_card_disable(struct ieee80211_hw *hw)
1204{
1205 struct rtl_priv *rtlpriv = rtl_priv(hw);
1206 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1207 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1208 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1209 enum nl80211_iftype opmode;
1210
1211 mac->link_state = MAC80211_NOLINK;
1212 opmode = NL80211_IFTYPE_UNSPECIFIED;
1213 _rtl8723ae_set_media_status(hw, opmode);
1214 if (rtlpci->driver_is_goingto_unload ||
1215 ppsc->rfoff_reason > RF_CHANGE_BY_PS)
1216 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1217 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1218 _rtl8723ae_poweroff_adapter(hw);
1219
1220 /* after power off we should do iqk again */
1221 rtlpriv->phy.iqk_initialized = false;
1222}
1223
1224void rtl8723ae_interrupt_recognized(struct ieee80211_hw *hw,
1225 u32 *p_inta, u32 *p_intb)
1226{
1227 struct rtl_priv *rtlpriv = rtl_priv(hw);
1228 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1229
1230 *p_inta = rtl_read_dword(rtlpriv, 0x3a0) & rtlpci->irq_mask[0];
1231 rtl_write_dword(rtlpriv, 0x3a0, *p_inta);
1232}
1233
1234void rtl8723ae_set_beacon_related_registers(struct ieee80211_hw *hw)
1235{
1236
1237 struct rtl_priv *rtlpriv = rtl_priv(hw);
1238 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1239 u16 bcn_interval, atim_window;
1240
1241 bcn_interval = mac->beacon_interval;
1242 atim_window = 2; /*FIX MERGE */
1243 rtl8723ae_disable_interrupt(hw);
1244 rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
1245 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1246 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
1247 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
1248 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
1249 rtl_write_byte(rtlpriv, 0x606, 0x30);
1250 rtl8723ae_enable_interrupt(hw);
1251}
1252
1253void rtl8723ae_set_beacon_interval(struct ieee80211_hw *hw)
1254{
1255 struct rtl_priv *rtlpriv = rtl_priv(hw);
1256 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1257 u16 bcn_interval = mac->beacon_interval;
1258
1259 RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
1260 "beacon_interval:%d\n", bcn_interval);
1261 rtl8723ae_disable_interrupt(hw);
1262 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1263 rtl8723ae_enable_interrupt(hw);
1264}
1265
1266void rtl8723ae_update_interrupt_mask(struct ieee80211_hw *hw,
1267 u32 add_msr, u32 rm_msr)
1268{
1269 struct rtl_priv *rtlpriv = rtl_priv(hw);
1270 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1271
1272 RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
1273 "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
1274
1275 if (add_msr)
1276 rtlpci->irq_mask[0] |= add_msr;
1277 if (rm_msr)
1278 rtlpci->irq_mask[0] &= (~rm_msr);
1279 rtl8723ae_disable_interrupt(hw);
1280 rtl8723ae_enable_interrupt(hw);
1281}
1282
1283static u8 _rtl8723ae_get_chnl_group(u8 chnl)
1284{
1285 u8 group;
1286
1287 if (chnl < 3)
1288 group = 0;
1289 else if (chnl < 9)
1290 group = 1;
1291 else
1292 group = 2;
1293 return group;
1294}
1295
1296static void _rtl8723ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1297 bool autoload_fail,
1298 u8 *hwinfo)
1299{
1300 struct rtl_priv *rtlpriv = rtl_priv(hw);
1301 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1302 u8 rf_path, index, tempval;
1303 u16 i;
1304
1305 for (rf_path = 0; rf_path < 1; rf_path++) {
1306 for (i = 0; i < 3; i++) {
1307 if (!autoload_fail) {
1308 rtlefuse->eeprom_chnlarea_txpwr_cck
1309 [rf_path][i] =
1310 hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
1311 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1312 [rf_path][i] =
1313 hwinfo[EEPROM_TXPOWERHT40_1S + rf_path *
1314 3 + i];
1315 } else {
1316 rtlefuse->eeprom_chnlarea_txpwr_cck
1317 [rf_path][i] =
1318 EEPROM_DEFAULT_TXPOWERLEVEL;
1319 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1320 [rf_path][i] =
1321 EEPROM_DEFAULT_TXPOWERLEVEL;
1322 }
1323 }
1324 }
1325
1326 for (i = 0; i < 3; i++) {
1327 if (!autoload_fail)
1328 tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
1329 else
1330 tempval = EEPROM_DEFAULT_HT40_2SDIFF;
1331 rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_A][i] =
1332 (tempval & 0xf);
1333 rtlefuse->eprom_chnl_txpwr_ht40_2sdf[RF90_PATH_B][i] =
1334 ((tempval & 0xf0) >> 4);
1335 }
1336
1337 for (rf_path = 0; rf_path < 2; rf_path++)
1338 for (i = 0; i < 3; i++)
1339 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1340 "RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
1341 i, rtlefuse->eeprom_chnlarea_txpwr_cck
1342 [rf_path][i]);
1343 for (rf_path = 0; rf_path < 2; rf_path++)
1344 for (i = 0; i < 3; i++)
1345 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1346 "RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
1347 rf_path, i,
1348 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1349 [rf_path][i]);
1350 for (rf_path = 0; rf_path < 2; rf_path++)
1351 for (i = 0; i < 3; i++)
1352 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1353 "RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
1354 rf_path, i,
1355 rtlefuse->eprom_chnl_txpwr_ht40_2sdf
1356 [rf_path][i]);
1357
1358 for (rf_path = 0; rf_path < 2; rf_path++) {
1359 for (i = 0; i < 14; i++) {
1360 index = _rtl8723ae_get_chnl_group((u8) i);
1361
1362 rtlefuse->txpwrlevel_cck[rf_path][i] =
1363 rtlefuse->eeprom_chnlarea_txpwr_cck
1364 [rf_path][index];
1365 rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
1366 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1367 [rf_path][index];
1368
1369 if ((rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1370 [rf_path][index] -
1371 rtlefuse->eprom_chnl_txpwr_ht40_2sdf[rf_path]
1372 [index]) > 0) {
1373 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
1374 rtlefuse->eeprom_chnlarea_txpwr_ht40_1s
1375 [rf_path][index] -
1376 rtlefuse->eprom_chnl_txpwr_ht40_2sdf
1377 [rf_path][index];
1378 } else {
1379 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
1380 }
1381 }
1382
1383 for (i = 0; i < 14; i++) {
1384 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1385 "RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
1386 "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
1387 rtlefuse->txpwrlevel_cck[rf_path][i],
1388 rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
1389 rtlefuse->txpwrlevel_ht40_2s[rf_path][i]);
1390 }
1391 }
1392
1393 for (i = 0; i < 3; i++) {
1394 if (!autoload_fail) {
1395 rtlefuse->eeprom_pwrlimit_ht40[i] =
1396 hwinfo[EEPROM_TXPWR_GROUP + i];
1397 rtlefuse->eeprom_pwrlimit_ht20[i] =
1398 hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
1399 } else {
1400 rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
1401 rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
1402 }
1403 }
1404
1405 for (rf_path = 0; rf_path < 2; rf_path++) {
1406 for (i = 0; i < 14; i++) {
1407 index = _rtl8723ae_get_chnl_group((u8) i);
1408
1409 if (rf_path == RF90_PATH_A) {
1410 rtlefuse->pwrgroup_ht20[rf_path][i] =
1411 (rtlefuse->eeprom_pwrlimit_ht20[index] &
1412 0xf);
1413 rtlefuse->pwrgroup_ht40[rf_path][i] =
1414 (rtlefuse->eeprom_pwrlimit_ht40[index] &
1415 0xf);
1416 } else if (rf_path == RF90_PATH_B) {
1417 rtlefuse->pwrgroup_ht20[rf_path][i] =
1418 ((rtlefuse->eeprom_pwrlimit_ht20[index] &
1419 0xf0) >> 4);
1420 rtlefuse->pwrgroup_ht40[rf_path][i] =
1421 ((rtlefuse->eeprom_pwrlimit_ht40[index] &
1422 0xf0) >> 4);
1423 }
1424
1425 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1426 "RF-%d pwrgroup_ht20[%d] = 0x%x\n", rf_path, i,
1427 rtlefuse->pwrgroup_ht20[rf_path][i]);
1428 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1429 "RF-%d pwrgroup_ht40[%d] = 0x%x\n", rf_path, i,
1430 rtlefuse->pwrgroup_ht40[rf_path][i]);
1431 }
1432 }
1433
1434 for (i = 0; i < 14; i++) {
1435 index = _rtl8723ae_get_chnl_group((u8) i);
1436
1437 if (!autoload_fail)
1438 tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
1439 else
1440 tempval = EEPROM_DEFAULT_HT20_DIFF;
1441
1442 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
1443 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
1444 ((tempval >> 4) & 0xF);
1445
1446 if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
1447 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
1448
1449 if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
1450 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
1451
1452 index = _rtl8723ae_get_chnl_group((u8) i);
1453
1454 if (!autoload_fail)
1455 tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
1456 else
1457 tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
1458
1459 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
1460 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
1461 ((tempval >> 4) & 0xF);
1462 }
1463
1464 rtlefuse->legacy_ht_txpowerdiff =
1465 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
1466
1467 for (i = 0; i < 14; i++)
1468 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1469 "RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
1470 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]);
1471 for (i = 0; i < 14; i++)
1472 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1473 "RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
1474 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]);
1475 for (i = 0; i < 14; i++)
1476 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1477 "RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
1478 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]);
1479 for (i = 0; i < 14; i++)
1480 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1481 "RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
1482 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]);
1483
1484 if (!autoload_fail)
1485 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
1486 else
1487 rtlefuse->eeprom_regulatory = 0;
1488 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1489 "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
1490
1491 if (!autoload_fail)
1492 rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
1493 else
1494 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
1495 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1496 "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
1497 rtlefuse->eeprom_tssi[RF90_PATH_A],
1498 rtlefuse->eeprom_tssi[RF90_PATH_B]);
1499
1500 if (!autoload_fail)
1501 tempval = hwinfo[EEPROM_THERMAL_METER];
1502 else
1503 tempval = EEPROM_DEFAULT_THERMALMETER;
1504 rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
1505
1506 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
1507 rtlefuse->apk_thermalmeterignore = true;
1508
1509 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
1510 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1511 "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
1512}
1513
1514static void _rtl8723ae_read_adapter_info(struct ieee80211_hw *hw,
1515 bool pseudo_test)
1516{
1517 struct rtl_priv *rtlpriv = rtl_priv(hw);
1518 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1519 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1520 u16 i, usvalue;
1521 u8 hwinfo[HWSET_MAX_SIZE];
1522 u16 eeprom_id;
1523
1524 if (pseudo_test) {
1525 /* need add */
1526 return;
1527 }
1528 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
1529 rtl_efuse_shadow_map_update(hw);
1530
1531 memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
1532 HWSET_MAX_SIZE);
1533 } else if (rtlefuse->epromtype == EEPROM_93C46) {
1534 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1535 "RTL819X Not boot from eeprom, check it !!");
1536 }
1537
1538 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"),
1539 hwinfo, HWSET_MAX_SIZE);
1540
1541 eeprom_id = *((u16 *)&hwinfo[0]);
1542 if (eeprom_id != RTL8190_EEPROM_ID) {
1543 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1544 "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
1545 rtlefuse->autoload_failflag = true;
1546 } else {
1547 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
1548 rtlefuse->autoload_failflag = false;
1549 }
1550
1551 if (rtlefuse->autoload_failflag == true)
1552 return;
1553
1554 rtlefuse->eeprom_vid = *(u16 *) &hwinfo[EEPROM_VID];
1555 rtlefuse->eeprom_did = *(u16 *) &hwinfo[EEPROM_DID];
1556 rtlefuse->eeprom_svid = *(u16 *) &hwinfo[EEPROM_SVID];
1557 rtlefuse->eeprom_smid = *(u16 *) &hwinfo[EEPROM_SMID];
1558 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1559 "EEPROMId = 0x%4x\n", eeprom_id);
1560 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1561 "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
1562 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1563 "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
1564 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1565 "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
1566 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1567 "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
1568
1569 for (i = 0; i < 6; i += 2) {
1570 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
1571 *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
1572 }
1573
1574 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1575 "dev_addr: %pM\n", rtlefuse->dev_addr);
1576
1577 _rtl8723ae_read_txpower_info_from_hwpg(hw,
1578 rtlefuse->autoload_failflag, hwinfo);
1579
1580 rtl8723ae_read_bt_coexist_info_from_hwpg(hw,
1581 rtlefuse->autoload_failflag, hwinfo);
1582
1583 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
1584 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1585 rtlefuse->txpwr_fromeprom = true;
1586 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
1587
1588 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1589 "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
1590
1591 /* set channel paln to world wide 13 */
1592 rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
1593
1594 if (rtlhal->oem_id == RT_CID_DEFAULT) {
1595 switch (rtlefuse->eeprom_oemid) {
1596 case EEPROM_CID_DEFAULT:
1597 if (rtlefuse->eeprom_did == 0x8176) {
1598 if (CHK_SVID_SMID(0x10EC, 0x6151) ||
1599 CHK_SVID_SMID(0x10EC, 0x6152) ||
1600 CHK_SVID_SMID(0x10EC, 0x6154) ||
1601 CHK_SVID_SMID(0x10EC, 0x6155) ||
1602 CHK_SVID_SMID(0x10EC, 0x6177) ||
1603 CHK_SVID_SMID(0x10EC, 0x6178) ||
1604 CHK_SVID_SMID(0x10EC, 0x6179) ||
1605 CHK_SVID_SMID(0x10EC, 0x6180) ||
1606 CHK_SVID_SMID(0x10EC, 0x8151) ||
1607 CHK_SVID_SMID(0x10EC, 0x8152) ||
1608 CHK_SVID_SMID(0x10EC, 0x8154) ||
1609 CHK_SVID_SMID(0x10EC, 0x8155) ||
1610 CHK_SVID_SMID(0x10EC, 0x8181) ||
1611 CHK_SVID_SMID(0x10EC, 0x8182) ||
1612 CHK_SVID_SMID(0x10EC, 0x8184) ||
1613 CHK_SVID_SMID(0x10EC, 0x8185) ||
1614 CHK_SVID_SMID(0x10EC, 0x9151) ||
1615 CHK_SVID_SMID(0x10EC, 0x9152) ||
1616 CHK_SVID_SMID(0x10EC, 0x9154) ||
1617 CHK_SVID_SMID(0x10EC, 0x9155) ||
1618 CHK_SVID_SMID(0x10EC, 0x9181) ||
1619 CHK_SVID_SMID(0x10EC, 0x9182) ||
1620 CHK_SVID_SMID(0x10EC, 0x9184) ||
1621 CHK_SVID_SMID(0x10EC, 0x9185))
1622 rtlhal->oem_id = RT_CID_TOSHIBA;
1623 else if (rtlefuse->eeprom_svid == 0x1025)
1624 rtlhal->oem_id = RT_CID_819x_Acer;
1625 else if (CHK_SVID_SMID(0x10EC, 0x6191) ||
1626 CHK_SVID_SMID(0x10EC, 0x6192) ||
1627 CHK_SVID_SMID(0x10EC, 0x6193) ||
1628 CHK_SVID_SMID(0x10EC, 0x7191) ||
1629 CHK_SVID_SMID(0x10EC, 0x7192) ||
1630 CHK_SVID_SMID(0x10EC, 0x7193) ||
1631 CHK_SVID_SMID(0x10EC, 0x8191) ||
1632 CHK_SVID_SMID(0x10EC, 0x8192) ||
1633 CHK_SVID_SMID(0x10EC, 0x8193))
1634 rtlhal->oem_id = RT_CID_819x_SAMSUNG;
1635 else if (CHK_SVID_SMID(0x10EC, 0x8195) ||
1636 CHK_SVID_SMID(0x10EC, 0x9195) ||
1637 CHK_SVID_SMID(0x10EC, 0x7194) ||
1638 CHK_SVID_SMID(0x10EC, 0x8200) ||
1639 CHK_SVID_SMID(0x10EC, 0x8201) ||
1640 CHK_SVID_SMID(0x10EC, 0x8202) ||
1641 CHK_SVID_SMID(0x10EC, 0x9200))
1642 rtlhal->oem_id = RT_CID_819x_Lenovo;
1643 else if (CHK_SVID_SMID(0x10EC, 0x8197) ||
1644 CHK_SVID_SMID(0x10EC, 0x9196))
1645 rtlhal->oem_id = RT_CID_819x_CLEVO;
1646 else if (CHK_SVID_SMID(0x1028, 0x8194) ||
1647 CHK_SVID_SMID(0x1028, 0x8198) ||
1648 CHK_SVID_SMID(0x1028, 0x9197) ||
1649 CHK_SVID_SMID(0x1028, 0x9198))
1650 rtlhal->oem_id = RT_CID_819x_DELL;
1651 else if (CHK_SVID_SMID(0x103C, 0x1629))
1652 rtlhal->oem_id = RT_CID_819x_HP;
1653 else if (CHK_SVID_SMID(0x1A32, 0x2315))
1654 rtlhal->oem_id = RT_CID_819x_QMI;
1655 else if (CHK_SVID_SMID(0x10EC, 0x8203))
1656 rtlhal->oem_id = RT_CID_819x_PRONETS;
1657 else if (CHK_SVID_SMID(0x1043, 0x84B5))
1658 rtlhal->oem_id =
1659 RT_CID_819x_Edimax_ASUS;
1660 else
1661 rtlhal->oem_id = RT_CID_DEFAULT;
1662 } else if (rtlefuse->eeprom_did == 0x8178) {
1663 if (CHK_SVID_SMID(0x10EC, 0x6181) ||
1664 CHK_SVID_SMID(0x10EC, 0x6182) ||
1665 CHK_SVID_SMID(0x10EC, 0x6184) ||
1666 CHK_SVID_SMID(0x10EC, 0x6185) ||
1667 CHK_SVID_SMID(0x10EC, 0x7181) ||
1668 CHK_SVID_SMID(0x10EC, 0x7182) ||
1669 CHK_SVID_SMID(0x10EC, 0x7184) ||
1670 CHK_SVID_SMID(0x10EC, 0x7185) ||
1671 CHK_SVID_SMID(0x10EC, 0x8181) ||
1672 CHK_SVID_SMID(0x10EC, 0x8182) ||
1673 CHK_SVID_SMID(0x10EC, 0x8184) ||
1674 CHK_SVID_SMID(0x10EC, 0x8185) ||
1675 CHK_SVID_SMID(0x10EC, 0x9181) ||
1676 CHK_SVID_SMID(0x10EC, 0x9182) ||
1677 CHK_SVID_SMID(0x10EC, 0x9184) ||
1678 CHK_SVID_SMID(0x10EC, 0x9185))
1679 rtlhal->oem_id = RT_CID_TOSHIBA;
1680 else if (rtlefuse->eeprom_svid == 0x1025)
1681 rtlhal->oem_id = RT_CID_819x_Acer;
1682 else if (CHK_SVID_SMID(0x10EC, 0x8186))
1683 rtlhal->oem_id = RT_CID_819x_PRONETS;
1684 else if (CHK_SVID_SMID(0x1043, 0x8486))
1685 rtlhal->oem_id =
1686 RT_CID_819x_Edimax_ASUS;
1687 else
1688 rtlhal->oem_id = RT_CID_DEFAULT;
1689 } else {
1690 rtlhal->oem_id = RT_CID_DEFAULT;
1691 }
1692 break;
1693 case EEPROM_CID_TOSHIBA:
1694 rtlhal->oem_id = RT_CID_TOSHIBA;
1695 break;
1696 case EEPROM_CID_CCX:
1697 rtlhal->oem_id = RT_CID_CCX;
1698 break;
1699 case EEPROM_CID_QMI:
1700 rtlhal->oem_id = RT_CID_819x_QMI;
1701 break;
1702 case EEPROM_CID_WHQL:
1703 break;
1704 default:
1705 rtlhal->oem_id = RT_CID_DEFAULT;
1706 break;
1707
1708 }
1709 }
1710}
1711
1712static void _rtl8723ae_hal_customized_behavior(struct ieee80211_hw *hw)
1713{
1714 struct rtl_priv *rtlpriv = rtl_priv(hw);
1715 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1716 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1717
1718 switch (rtlhal->oem_id) {
1719 case RT_CID_819x_HP:
1720 pcipriv->ledctl.led_opendrain = true;
1721 break;
1722 case RT_CID_819x_Lenovo:
1723 case RT_CID_DEFAULT:
1724 case RT_CID_TOSHIBA:
1725 case RT_CID_CCX:
1726 case RT_CID_819x_Acer:
1727 case RT_CID_WHQL:
1728 default:
1729 break;
1730 }
1731 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1732 "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
1733}
1734
1735void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw)
1736{
1737 struct rtl_priv *rtlpriv = rtl_priv(hw);
1738 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1739 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1740 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1741 u8 tmp_u1b;
1742 u32 value32;
1743
1744 value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST]);
1745 value32 = (value32 & ~EFUSE_SEL_MASK) | EFUSE_SEL(EFUSE_WIFI_SEL_0);
1746 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST], value32);
1747
1748 rtlhal->version = _rtl8723ae_read_chip_version(hw);
1749
1750 if (get_rf_type(rtlphy) == RF_1T1R)
1751 rtlpriv->dm.rfpath_rxenable[0] = true;
1752 else
1753 rtlpriv->dm.rfpath_rxenable[0] =
1754 rtlpriv->dm.rfpath_rxenable[1] = true;
1755 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
1756 rtlhal->version);
1757
1758 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
1759 if (tmp_u1b & BIT(4)) {
1760 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
1761 rtlefuse->epromtype = EEPROM_93C46;
1762 } else {
1763 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
1764 rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
1765 }
1766 if (tmp_u1b & BIT(5)) {
1767 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
1768 rtlefuse->autoload_failflag = false;
1769 _rtl8723ae_read_adapter_info(hw, false);
1770 } else {
1771 rtlefuse->autoload_failflag = true;
1772 _rtl8723ae_read_adapter_info(hw, false);
1773 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
1774 }
1775 _rtl8723ae_hal_customized_behavior(hw);
1776}
1777
1778static void rtl8723ae_update_hal_rate_table(struct ieee80211_hw *hw,
1779 struct ieee80211_sta *sta)
1780{
1781 struct rtl_priv *rtlpriv = rtl_priv(hw);
1782 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1783 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1784 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1785 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1786 u32 ratr_value;
1787 u8 ratr_index = 0;
1788 u8 nmode = mac->ht_enable;
1789 u8 mimo_ps = IEEE80211_SMPS_OFF;
1790 u8 curtxbw_40mhz = mac->bw_40;
1791 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1792 1 : 0;
1793 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1794 1 : 0;
1795 enum wireless_mode wirelessmode = mac->mode;
1796
1797 if (rtlhal->current_bandtype == BAND_ON_5G)
1798 ratr_value = sta->supp_rates[1] << 4;
1799 else
1800 ratr_value = sta->supp_rates[0];
1801 if (mac->opmode == NL80211_IFTYPE_ADHOC)
1802 ratr_value = 0xfff;
1803 ratr_value |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
1804 sta->ht_cap.mcs.rx_mask[0] << 12);
1805 switch (wirelessmode) {
1806 case WIRELESS_MODE_B:
1807 if (ratr_value & 0x0000000c)
1808 ratr_value &= 0x0000000d;
1809 else
1810 ratr_value &= 0x0000000f;
1811 break;
1812 case WIRELESS_MODE_G:
1813 ratr_value &= 0x00000FF5;
1814 break;
1815 case WIRELESS_MODE_N_24G:
1816 case WIRELESS_MODE_N_5G:
1817 nmode = 1;
1818 if (mimo_ps == IEEE80211_SMPS_STATIC) {
1819 ratr_value &= 0x0007F005;
1820 } else {
1821 u32 ratr_mask;
1822
1823 if (get_rf_type(rtlphy) == RF_1T2R ||
1824 get_rf_type(rtlphy) == RF_1T1R)
1825 ratr_mask = 0x000ff005;
1826 else
1827 ratr_mask = 0x0f0ff005;
1828
1829 ratr_value &= ratr_mask;
1830 }
1831 break;
1832 default:
1833 if (rtlphy->rf_type == RF_1T2R)
1834 ratr_value &= 0x000ff0ff;
1835 else
1836 ratr_value &= 0x0f0ff0ff;
1837
1838 break;
1839 }
1840
1841 if ((pcipriv->bt_coexist.bt_coexistence) &&
1842 (pcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) &&
1843 (pcipriv->bt_coexist.bt_cur_state) &&
1844 (pcipriv->bt_coexist.bt_ant_isolation) &&
1845 ((pcipriv->bt_coexist.bt_service == BT_SCO) ||
1846 (pcipriv->bt_coexist.bt_service == BT_BUSY)))
1847 ratr_value &= 0x0fffcfc0;
1848 else
1849 ratr_value &= 0x0FFFFFFF;
1850
1851 if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
1852 (!curtxbw_40mhz && curshortgi_20mhz)))
1853 ratr_value |= 0x10000000;
1854
1855 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
1856
1857 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1858 "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
1859}
1860
1861static void rtl8723ae_update_hal_rate_mask(struct ieee80211_hw *hw,
1862 struct ieee80211_sta *sta, u8 rssi_level)
1863{
1864 struct rtl_priv *rtlpriv = rtl_priv(hw);
1865 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1866 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1867 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1868 struct rtl_sta_info *sta_entry = NULL;
1869 u32 ratr_bitmap;
1870 u8 ratr_index;
1871 u8 curtxbw_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
1872 ? 1 : 0;
1873 u8 curshortgi_40mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
1874 1 : 0;
1875 u8 curshortgi_20mhz = (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
1876 1 : 0;
1877 enum wireless_mode wirelessmode = 0;
1878 bool shortgi = false;
1879 u8 rate_mask[5];
1880 u8 macid = 0;
1881 u8 mimo_ps = IEEE80211_SMPS_OFF;
1882
1883 sta_entry = (struct rtl_sta_info *) sta->drv_priv;
1884 wirelessmode = sta_entry->wireless_mode;
1885 if (mac->opmode == NL80211_IFTYPE_STATION)
1886 curtxbw_40mhz = mac->bw_40;
1887 else if (mac->opmode == NL80211_IFTYPE_AP ||
1888 mac->opmode == NL80211_IFTYPE_ADHOC)
1889 macid = sta->aid + 1;
1890
1891 if (rtlhal->current_bandtype == BAND_ON_5G)
1892 ratr_bitmap = sta->supp_rates[1] << 4;
1893 else
1894 ratr_bitmap = sta->supp_rates[0];
1895 if (mac->opmode == NL80211_IFTYPE_ADHOC)
1896 ratr_bitmap = 0xfff;
1897 ratr_bitmap |= (sta->ht_cap.mcs.rx_mask[1] << 20 |
1898 sta->ht_cap.mcs.rx_mask[0] << 12);
1899 switch (wirelessmode) {
1900 case WIRELESS_MODE_B:
1901 ratr_index = RATR_INX_WIRELESS_B;
1902 if (ratr_bitmap & 0x0000000c)
1903 ratr_bitmap &= 0x0000000d;
1904 else
1905 ratr_bitmap &= 0x0000000f;
1906 break;
1907 case WIRELESS_MODE_G:
1908 ratr_index = RATR_INX_WIRELESS_GB;
1909
1910 if (rssi_level == 1)
1911 ratr_bitmap &= 0x00000f00;
1912 else if (rssi_level == 2)
1913 ratr_bitmap &= 0x00000ff0;
1914 else
1915 ratr_bitmap &= 0x00000ff5;
1916 break;
1917 case WIRELESS_MODE_A:
1918 ratr_index = RATR_INX_WIRELESS_A;
1919 ratr_bitmap &= 0x00000ff0;
1920 break;
1921 case WIRELESS_MODE_N_24G:
1922 case WIRELESS_MODE_N_5G:
1923 ratr_index = RATR_INX_WIRELESS_NGB;
1924
1925 if (mimo_ps == IEEE80211_SMPS_STATIC) {
1926 if (rssi_level == 1)
1927 ratr_bitmap &= 0x00070000;
1928 else if (rssi_level == 2)
1929 ratr_bitmap &= 0x0007f000;
1930 else
1931 ratr_bitmap &= 0x0007f005;
1932 } else {
1933 if (rtlphy->rf_type == RF_1T2R ||
1934 rtlphy->rf_type == RF_1T1R) {
1935 if (curtxbw_40mhz) {
1936 if (rssi_level == 1)
1937 ratr_bitmap &= 0x000f0000;
1938 else if (rssi_level == 2)
1939 ratr_bitmap &= 0x000ff000;
1940 else
1941 ratr_bitmap &= 0x000ff015;
1942 } else {
1943 if (rssi_level == 1)
1944 ratr_bitmap &= 0x000f0000;
1945 else if (rssi_level == 2)
1946 ratr_bitmap &= 0x000ff000;
1947 else
1948 ratr_bitmap &= 0x000ff005;
1949 }
1950 } else {
1951 if (curtxbw_40mhz) {
1952 if (rssi_level == 1)
1953 ratr_bitmap &= 0x0f0f0000;
1954 else if (rssi_level == 2)
1955 ratr_bitmap &= 0x0f0ff000;
1956 else
1957 ratr_bitmap &= 0x0f0ff015;
1958 } else {
1959 if (rssi_level == 1)
1960 ratr_bitmap &= 0x0f0f0000;
1961 else if (rssi_level == 2)
1962 ratr_bitmap &= 0x0f0ff000;
1963 else
1964 ratr_bitmap &= 0x0f0ff005;
1965 }
1966 }
1967 }
1968
1969 if ((curtxbw_40mhz && curshortgi_40mhz) ||
1970 (!curtxbw_40mhz && curshortgi_20mhz)) {
1971 if (macid == 0)
1972 shortgi = true;
1973 else if (macid == 1)
1974 shortgi = false;
1975 }
1976 break;
1977 default:
1978 ratr_index = RATR_INX_WIRELESS_NGB;
1979
1980 if (rtlphy->rf_type == RF_1T2R)
1981 ratr_bitmap &= 0x000ff0ff;
1982 else
1983 ratr_bitmap &= 0x0f0ff0ff;
1984 break;
1985 }
1986 sta_entry->ratr_index = ratr_index;
1987
1988 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1989 "ratr_bitmap :%x\n", ratr_bitmap);
1990 /* convert ratr_bitmap to le byte array */
1991 rate_mask[0] = ratr_bitmap;
1992 rate_mask[1] = (ratr_bitmap >>= 8);
1993 rate_mask[2] = (ratr_bitmap >>= 8);
1994 rate_mask[3] = ((ratr_bitmap >> 8) & 0x0f) | (ratr_index << 4);
1995 rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
1996 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1997 "Rate_index:%x, ratr_bitmap: %*phC\n",
1998 ratr_index, 5, rate_mask);
1999 rtl8723ae_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
2000}
2001
2002void rtl8723ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
2003 struct ieee80211_sta *sta, u8 rssi_level)
2004{
2005 struct rtl_priv *rtlpriv = rtl_priv(hw);
2006
2007 if (rtlpriv->dm.useramask)
2008 rtl8723ae_update_hal_rate_mask(hw, sta, rssi_level);
2009 else
2010 rtl8723ae_update_hal_rate_table(hw, sta);
2011}
2012
2013void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw)
2014{
2015 struct rtl_priv *rtlpriv = rtl_priv(hw);
2016 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2017 u16 sifs_timer;
2018
2019 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
2020 (u8 *)&mac->slot_time);
2021 if (!mac->ht_enable)
2022 sifs_timer = 0x0a0a;
2023 else
2024 sifs_timer = 0x1010;
2025 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
2026}
2027
2028bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
2029{
2030 struct rtl_priv *rtlpriv = rtl_priv(hw);
2031 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2032 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2033 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
2034 u8 u1tmp;
2035 bool actuallyset = false;
2036
2037 if (rtlpriv->rtlhal.being_init_adapter)
2038 return false;
2039
2040 if (ppsc->swrf_processing)
2041 return false;
2042
2043 spin_lock(&rtlpriv->locks.rf_ps_lock);
2044 if (ppsc->rfchange_inprogress) {
2045 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2046 return false;
2047 } else {
2048 ppsc->rfchange_inprogress = true;
2049 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2050 }
2051
2052 cur_rfstate = ppsc->rfpwr_state;
2053
2054 rtl_write_byte(rtlpriv, REG_GPIO_IO_SEL_2,
2055 rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL_2)&~(BIT(1)));
2056
2057 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL_2);
2058
2059 if (rtlphy->polarity_ctl)
2060 e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFOFF : ERFON;
2061 else
2062 e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
2063
2064 if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
2065 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2066 "GPIOChangeRF - HW Radio ON, RF ON\n");
2067
2068 e_rfpowerstate_toset = ERFON;
2069 ppsc->hwradiooff = false;
2070 actuallyset = true;
2071 } else if ((ppsc->hwradiooff == false)
2072 && (e_rfpowerstate_toset == ERFOFF)) {
2073 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2074 "GPIOChangeRF - HW Radio OFF, RF OFF\n");
2075
2076 e_rfpowerstate_toset = ERFOFF;
2077 ppsc->hwradiooff = true;
2078 actuallyset = true;
2079 }
2080
2081 if (actuallyset) {
2082 spin_lock(&rtlpriv->locks.rf_ps_lock);
2083 ppsc->rfchange_inprogress = false;
2084 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2085 } else {
2086 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
2087 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2088
2089 spin_lock(&rtlpriv->locks.rf_ps_lock);
2090 ppsc->rfchange_inprogress = false;
2091 spin_unlock(&rtlpriv->locks.rf_ps_lock);
2092 }
2093
2094 *valid = 1;
2095 return !ppsc->hwradiooff;
2096}
2097
2098void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index,
2099 u8 *p_macaddr, bool is_group, u8 enc_algo,
2100 bool is_wepkey, bool clear_all)
2101{
2102 struct rtl_priv *rtlpriv = rtl_priv(hw);
2103 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2104 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
2105 u8 *macaddr = p_macaddr;
2106 u32 entry_id = 0;
2107 bool is_pairwise = false;
2108 static u8 cam_const_addr[4][6] = {
2109 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
2110 {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
2111 {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
2112 {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
2113 };
2114 static u8 cam_const_broad[] = {
2115 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2116 };
2117
2118 if (clear_all) {
2119 u8 idx = 0;
2120 u8 cam_offset = 0;
2121 u8 clear_number = 5;
2122
2123 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
2124
2125 for (idx = 0; idx < clear_number; idx++) {
2126 rtl_cam_mark_invalid(hw, cam_offset + idx);
2127 rtl_cam_empty_entry(hw, cam_offset + idx);
2128
2129 if (idx < 5) {
2130 memset(rtlpriv->sec.key_buf[idx], 0,
2131 MAX_KEY_LEN);
2132 rtlpriv->sec.key_len[idx] = 0;
2133 }
2134 }
2135 } else {
2136 switch (enc_algo) {
2137 case WEP40_ENCRYPTION:
2138 enc_algo = CAM_WEP40;
2139 break;
2140 case WEP104_ENCRYPTION:
2141 enc_algo = CAM_WEP104;
2142 break;
2143 case TKIP_ENCRYPTION:
2144 enc_algo = CAM_TKIP;
2145 break;
2146 case AESCCMP_ENCRYPTION:
2147 enc_algo = CAM_AES;
2148 break;
2149 default:
2150 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2151 "switch case not processed\n");
2152 enc_algo = CAM_TKIP;
2153 break;
2154 }
2155
2156 if (is_wepkey || rtlpriv->sec.use_defaultkey) {
2157 macaddr = cam_const_addr[key_index];
2158 entry_id = key_index;
2159 } else {
2160 if (is_group) {
2161 macaddr = cam_const_broad;
2162 entry_id = key_index;
2163 } else {
2164 if (mac->opmode == NL80211_IFTYPE_AP) {
2165 entry_id = rtl_cam_get_free_entry(hw,
2166 macaddr);
2167 if (entry_id >= TOTAL_CAM_ENTRY) {
2168 RT_TRACE(rtlpriv, COMP_SEC,
2169 DBG_EMERG,
2170 "Can not find free hw security cam entry\n");
2171 return;
2172 }
2173 } else {
2174 entry_id = CAM_PAIRWISE_KEY_POSITION;
2175 }
2176
2177 key_index = PAIRWISE_KEYIDX;
2178 is_pairwise = true;
2179 }
2180 }
2181
2182 if (rtlpriv->sec.key_len[key_index] == 0) {
2183 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2184 "delete one entry, entry_id is %d\n",
2185 entry_id);
2186 if (mac->opmode == NL80211_IFTYPE_AP)
2187 rtl_cam_del_entry(hw, p_macaddr);
2188 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
2189 } else {
2190 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2191 "add one entry\n");
2192 if (is_pairwise) {
2193 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2194 "set Pairwiase key\n");
2195
2196 rtl_cam_add_one_entry(hw, macaddr, key_index,
2197 entry_id, enc_algo,
2198 CAM_CONFIG_NO_USEDK,
2199 rtlpriv->sec.key_buf[key_index]);
2200 } else {
2201 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2202 "set group key\n");
2203
2204 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
2205 rtl_cam_add_one_entry(hw,
2206 rtlefuse->dev_addr,
2207 PAIRWISE_KEYIDX,
2208 CAM_PAIRWISE_KEY_POSITION,
2209 enc_algo,
2210 CAM_CONFIG_NO_USEDK,
2211 rtlpriv->sec.key_buf
2212 [entry_id]);
2213 }
2214
2215 rtl_cam_add_one_entry(hw, macaddr, key_index,
2216 entry_id, enc_algo,
2217 CAM_CONFIG_NO_USEDK,
2218 rtlpriv->sec.key_buf[entry_id]);
2219 }
2220
2221 }
2222 }
2223}
2224
2225static void rtl8723ae_bt_var_init(struct ieee80211_hw *hw)
2226{
2227 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2228 struct rtl_priv *rtlpriv = rtl_priv(hw);
2229
2230 pcipriv->bt_coexist.bt_coexistence =
2231 pcipriv->bt_coexist.eeprom_bt_coexist;
2232 pcipriv->bt_coexist.bt_ant_num =
2233 pcipriv->bt_coexist.eeprom_bt_ant_num;
2234 pcipriv->bt_coexist.bt_coexist_type =
2235 pcipriv->bt_coexist.eeprom_bt_type;
2236
2237 pcipriv->bt_coexist.bt_ant_isolation =
2238 pcipriv->bt_coexist.eeprom_bt_ant_isol;
2239
2240 pcipriv->bt_coexist.bt_radio_shared_type =
2241 pcipriv->bt_coexist.eeprom_bt_radio_shared;
2242
2243 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2244 "BT Coexistance = 0x%x\n",
2245 pcipriv->bt_coexist.bt_coexistence);
2246
2247 if (pcipriv->bt_coexist.bt_coexistence) {
2248 pcipriv->bt_coexist.bt_busy_traffic = false;
2249 pcipriv->bt_coexist.bt_traffic_mode_set = false;
2250 pcipriv->bt_coexist.bt_non_traffic_mode_set = false;
2251
2252 pcipriv->bt_coexist.cstate = 0;
2253 pcipriv->bt_coexist.previous_state = 0;
2254
2255 if (pcipriv->bt_coexist.bt_ant_num == ANT_X2) {
2256 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2257 "BlueTooth BT_Ant_Num = Antx2\n");
2258 } else if (pcipriv->bt_coexist.bt_ant_num == ANT_X1) {
2259 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2260 "BlueTooth BT_Ant_Num = Antx1\n");
2261 }
2262
2263 switch (pcipriv->bt_coexist.bt_coexist_type) {
2264 case BT_2WIRE:
2265 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2266 "BlueTooth BT_CoexistType = BT_2Wire\n");
2267 break;
2268 case BT_ISSC_3WIRE:
2269 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2270 "BlueTooth BT_CoexistType = BT_ISSC_3Wire\n");
2271 break;
2272 case BT_ACCEL:
2273 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2274 "BlueTooth BT_CoexistType = BT_ACCEL\n");
2275 break;
2276 case BT_CSR_BC4:
2277 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2278 "BlueTooth BT_CoexistType = BT_CSR_BC4\n");
2279 break;
2280 case BT_CSR_BC8:
2281 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2282 "BlueTooth BT_CoexistType = BT_CSR_BC8\n");
2283 break;
2284 case BT_RTL8756:
2285 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2286 "BlueTooth BT_CoexistType = BT_RTL8756\n");
2287 break;
2288 default:
2289 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2290 "BlueTooth BT_CoexistType = Unknown\n");
2291 break;
2292 }
2293 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2294 "BlueTooth BT_Ant_isolation = %d\n",
2295 pcipriv->bt_coexist.bt_ant_isolation);
2296 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
2297 "BT_RadioSharedType = 0x%x\n",
2298 pcipriv->bt_coexist.bt_radio_shared_type);
2299 pcipriv->bt_coexist.bt_active_zero_cnt = 0;
2300 pcipriv->bt_coexist.cur_bt_disabled = false;
2301 pcipriv->bt_coexist.pre_bt_disabled = false;
2302 }
2303}
2304
2305void rtl8723ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
2306 bool auto_load_fail, u8 *hwinfo)
2307{
2308 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2309 struct rtl_priv *rtlpriv = rtl_priv(hw);
2310 u8 value;
2311 u32 tmpu_32;
2312
2313 if (!auto_load_fail) {
2314 tmpu_32 = rtl_read_dword(rtlpriv, REG_MULTI_FUNC_CTRL);
2315 if (tmpu_32 & BIT(18))
2316 pcipriv->bt_coexist.eeprom_bt_coexist = 1;
2317 else
2318 pcipriv->bt_coexist.eeprom_bt_coexist = 0;
2319 value = hwinfo[RF_OPTION4];
2320 pcipriv->bt_coexist.eeprom_bt_type = BT_RTL8723A;
2321 pcipriv->bt_coexist.eeprom_bt_ant_num = (value & 0x1);
2322 pcipriv->bt_coexist.eeprom_bt_ant_isol = ((value & 0x10) >> 4);
2323 pcipriv->bt_coexist.eeprom_bt_radio_shared =
2324 ((value & 0x20) >> 5);
2325 } else {
2326 pcipriv->bt_coexist.eeprom_bt_coexist = 0;
2327 pcipriv->bt_coexist.eeprom_bt_type = BT_RTL8723A;
2328 pcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
2329 pcipriv->bt_coexist.eeprom_bt_ant_isol = 0;
2330 pcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
2331 }
2332
2333 rtl8723ae_bt_var_init(hw);
2334}
2335
2336void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw)
2337{
2338 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2339
2340 /* 0:Low, 1:High, 2:From Efuse. */
2341 pcipriv->bt_coexist.reg_bt_iso = 2;
2342 /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
2343 pcipriv->bt_coexist.reg_bt_sco = 3;
2344 /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
2345 pcipriv->bt_coexist.reg_bt_sco = 0;
2346}
2347
2348
2349void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw)
2350{
2351}
2352
2353void rtl8723ae_suspend(struct ieee80211_hw *hw)
2354{
2355}
2356
2357void rtl8723ae_resume(struct ieee80211_hw *hw)
2358{
2359}
2360
2361/* Turn on AAP (RCR:bit 0) for promicuous mode. */
2362void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
2363 bool allow_all_da, bool write_into_reg)
2364{
2365 struct rtl_priv *rtlpriv = rtl_priv(hw);
2366 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
2367
2368 if (allow_all_da) /* Set BIT0 */
2369 rtlpci->receive_config |= RCR_AAP;
2370 else /* Clear BIT0 */
2371 rtlpci->receive_config &= ~RCR_AAP;
2372
2373 if (write_into_reg)
2374 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
2375
2376
2377 RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
2378 "receive_config=0x%08X, write_into_reg=%d\n",
2379 rtlpci->receive_config, write_into_reg);
2380}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
new file mode 100644
index 000000000000..6fa24f79b1d7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.h
@@ -0,0 +1,73 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_HW_H__
31#define __RTL8723E_HW_H__
32
33#define CHK_SVID_SMID(_val1, _val2) \
34 ((rtlefuse->eeprom_svid == (_val1)) && \
35 (rtlefuse->eeprom_smid == (_val2)))
36
37void rtl8723ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
38void rtl8723ae_read_eeprom_info(struct ieee80211_hw *hw);
39
40void rtl8723ae_interrupt_recognized(struct ieee80211_hw *hw,
41 u32 *p_inta, u32 *p_intb);
42int rtl8723ae_hw_init(struct ieee80211_hw *hw);
43void rtl8723ae_card_disable(struct ieee80211_hw *hw);
44void rtl8723ae_enable_interrupt(struct ieee80211_hw *hw);
45void rtl8723ae_disable_interrupt(struct ieee80211_hw *hw);
46int rtl8723ae_set_network_type(struct ieee80211_hw *hw,
47 enum nl80211_iftype type);
48void rtl8723ae_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
49void rtl8723ae_set_qos(struct ieee80211_hw *hw, int aci);
50void rtl8723ae_set_beacon_related_registers(struct ieee80211_hw *hw);
51void rtl8723ae_set_beacon_interval(struct ieee80211_hw *hw);
52void rtl8723ae_update_interrupt_mask(struct ieee80211_hw *hw,
53 u32 add_msr, u32 rm_msr);
54void rtl8723ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
55void rtl8723ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
56 struct ieee80211_sta *sta, u8 rssi_level);
57void rtl8723ae_update_channel_access_setting(struct ieee80211_hw *hw);
58bool rtl8723ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
59void rtl8723ae_enable_hw_security_config(struct ieee80211_hw *hw);
60void rtl8723ae_set_key(struct ieee80211_hw *hw, u32 key_index,
61 u8 *p_macaddr, bool is_group, u8 enc_algo,
62 bool is_wepkey, bool clear_all);
63
64void rtl8723ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
65 bool autoload_fail, u8 *hwinfo);
66void rtl8723ae_bt_reg_init(struct ieee80211_hw *hw);
67void rtl8723ae_bt_hw_init(struct ieee80211_hw *hw);
68void rtl8723ae_suspend(struct ieee80211_hw *hw);
69void rtl8723ae_resume(struct ieee80211_hw *hw);
70void rtl8723ae_allow_all_destaddr(struct ieee80211_hw *hw,
71 bool allow_all_da, bool write_into_reg);
72
73#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/rtlwifi/rtl8723ae/led.c
new file mode 100644
index 000000000000..9c4e1d811187
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/led.c
@@ -0,0 +1,151 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "reg.h"
33#include "led.h"
34
35static void _rtl8723ae_init_led(struct ieee80211_hw *hw,
36 struct rtl_led *pled, enum rtl_led_pin ledpin)
37{
38 pled->hw = hw;
39 pled->ledpin = ledpin;
40 pled->ledon = false;
41}
42
43void rtl8723ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
44{
45 struct rtl_priv *rtlpriv = rtl_priv(hw);
46 u8 ledcfg;
47
48 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
49 "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
50
51 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
52
53 switch (pled->ledpin) {
54 case LED_PIN_GPIO0:
55 break;
56 case LED_PIN_LED0:
57 rtl_write_byte(rtlpriv,
58 REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
59 break;
60 case LED_PIN_LED1:
61 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
62 break;
63 default:
64 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
65 "switch case not processed\n");
66 break;
67 }
68 pled->ledon = true;
69}
70
71void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
72{
73 struct rtl_priv *rtlpriv = rtl_priv(hw);
74 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
75 u8 ledcfg;
76
77 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
78 "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
79
80 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
81
82 switch (pled->ledpin) {
83 case LED_PIN_GPIO0:
84 break;
85 case LED_PIN_LED0:
86 ledcfg &= 0xf0;
87 if (pcipriv->ledctl.led_opendrain)
88 rtl_write_byte(rtlpriv, REG_LEDCFG2,
89 (ledcfg | BIT(1) | BIT(5) | BIT(6)));
90 else
91 rtl_write_byte(rtlpriv, REG_LEDCFG2,
92 (ledcfg | BIT(3) | BIT(5) | BIT(6)));
93 break;
94 case LED_PIN_LED1:
95 ledcfg &= 0x0f;
96 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
97 break;
98 default:
99 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
100 "switch case not processed\n");
101 break;
102 }
103 pled->ledon = false;
104}
105
106void rtl8723ae_init_sw_leds(struct ieee80211_hw *hw)
107{
108 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
109
110 _rtl8723ae_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
111 _rtl8723ae_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
112}
113
114static void _rtl8723ae_sw_led_control(struct ieee80211_hw *hw,
115 enum led_ctl_mode ledaction)
116{
117 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
118 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
119
120 switch (ledaction) {
121 case LED_CTL_POWER_ON:
122 case LED_CTL_LINK:
123 case LED_CTL_NO_LINK:
124 rtl8723ae_sw_led_on(hw, pLed0);
125 break;
126 case LED_CTL_POWER_OFF:
127 rtl8723ae_sw_led_off(hw, pLed0);
128 break;
129 default:
130 break;
131 }
132}
133
134void rtl8723ae_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
135{
136 struct rtl_priv *rtlpriv = rtl_priv(hw);
137 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
138
139 if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
140 (ledaction == LED_CTL_TX ||
141 ledaction == LED_CTL_RX ||
142 ledaction == LED_CTL_SITE_SURVEY ||
143 ledaction == LED_CTL_LINK ||
144 ledaction == LED_CTL_NO_LINK ||
145 ledaction == LED_CTL_START_TO_LINK ||
146 ledaction == LED_CTL_POWER_ON)) {
147 return;
148 }
149 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
150 _rtl8723ae_sw_led_control(hw, ledaction);
151}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/led.h b/drivers/net/wireless/rtlwifi/rtl8723ae/led.h
new file mode 100644
index 000000000000..2cb88e78f62a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/led.h
@@ -0,0 +1,39 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_LED_H__
31#define __RTL92CE_LED_H__
32
33void rtl8723ae_init_sw_leds(struct ieee80211_hw *hw);
34void rtl8723ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
35void rtl8723ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
36void rtl8723ae_led_control(struct ieee80211_hw *hw,
37 enum led_ctl_mode ledaction);
38
39#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
new file mode 100644
index 000000000000..39cc7938eedf
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
@@ -0,0 +1,2044 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../ps.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "rf.h"
37#include "dm.h"
38#include "table.h"
39
40/* static forward definitions */
41static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
42 enum radio_path rfpath, u32 offset);
43static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
44 enum radio_path rfpath,
45 u32 offset, u32 data);
46static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
47 enum radio_path rfpath, u32 offset);
48static void _phy_rf_serial_write(struct ieee80211_hw *hw,
49 enum radio_path rfpath, u32 offset, u32 data);
50static u32 _phy_calculate_bit_shift(u32 bitmask);
51static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
52static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw);
53static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype);
54static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype);
55static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw);
56static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
57 u32 cmdtableidx, u32 cmdtablesz,
58 enum swchnlcmd_id cmdid,
59 u32 para1, u32 para2,
60 u32 msdelay);
61static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
62 u8 *stage, u8 *step, u32 *delay);
63static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
64 enum wireless_mode wirelessmode,
65 long power_indbm);
66static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
67 enum wireless_mode wirelessmode, u8 txpwridx);
68static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw);
69
70u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
71 u32 bitmask)
72{
73 struct rtl_priv *rtlpriv = rtl_priv(hw);
74 u32 returnvalue, originalvalue, bitshift;
75
76 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
77 "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
78 originalvalue = rtl_read_dword(rtlpriv, regaddr);
79 bitshift = _phy_calculate_bit_shift(bitmask);
80 returnvalue = (originalvalue & bitmask) >> bitshift;
81
82 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
83 "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask, regaddr,
84 originalvalue);
85
86 return returnvalue;
87}
88
89void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
90 u32 regaddr, u32 bitmask, u32 data)
91{
92 struct rtl_priv *rtlpriv = rtl_priv(hw);
93 u32 originalvalue, bitshift;
94
95 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
96 "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr,
97 bitmask, data);
98
99 if (bitmask != MASKDWORD) {
100 originalvalue = rtl_read_dword(rtlpriv, regaddr);
101 bitshift = _phy_calculate_bit_shift(bitmask);
102 data = ((originalvalue & (~bitmask)) | (data << bitshift));
103 }
104
105 rtl_write_dword(rtlpriv, regaddr, data);
106
107 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
108 "regaddr(%#x), bitmask(%#x), data(%#x)\n",
109 regaddr, bitmask, data);
110}
111
112u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
113 enum radio_path rfpath, u32 regaddr, u32 bitmask)
114{
115 struct rtl_priv *rtlpriv = rtl_priv(hw);
116 u32 original_value, readback_value, bitshift;
117 struct rtl_phy *rtlphy = &(rtlpriv->phy);
118 unsigned long flags;
119
120 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
121 "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
122 regaddr, rfpath, bitmask);
123
124 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
125
126 if (rtlphy->rf_mode != RF_OP_BY_FW)
127 original_value = _phy_rf_serial_read(hw, rfpath, regaddr);
128 else
129 original_value = _phy_fw_rf_serial_read(hw, rfpath, regaddr);
130
131 bitshift = _phy_calculate_bit_shift(bitmask);
132 readback_value = (original_value & bitmask) >> bitshift;
133
134 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
135
136 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
137 "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
138 regaddr, rfpath, bitmask, original_value);
139
140 return readback_value;
141}
142
143void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
144 enum radio_path rfpath,
145 u32 regaddr, u32 bitmask, u32 data)
146{
147 struct rtl_priv *rtlpriv = rtl_priv(hw);
148 struct rtl_phy *rtlphy = &(rtlpriv->phy);
149 u32 original_value, bitshift;
150 unsigned long flags;
151
152 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
153 "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
154 regaddr, bitmask, data, rfpath);
155
156 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
157
158 if (rtlphy->rf_mode != RF_OP_BY_FW) {
159 if (bitmask != RFREG_OFFSET_MASK) {
160 original_value = _phy_rf_serial_read(hw, rfpath,
161 regaddr);
162 bitshift = _phy_calculate_bit_shift(bitmask);
163 data = ((original_value & (~bitmask)) |
164 (data << bitshift));
165 }
166
167 _phy_rf_serial_write(hw, rfpath, regaddr, data);
168 } else {
169 if (bitmask != RFREG_OFFSET_MASK) {
170 original_value = _phy_fw_rf_serial_read(hw, rfpath,
171 regaddr);
172 bitshift = _phy_calculate_bit_shift(bitmask);
173 data = ((original_value & (~bitmask)) |
174 (data << bitshift));
175 }
176 _phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
177 }
178
179 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
180
181 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
182 "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
183 regaddr, bitmask, data, rfpath);
184}
185
186static u32 _phy_fw_rf_serial_read(struct ieee80211_hw *hw,
187 enum radio_path rfpath, u32 offset)
188{
189 RT_ASSERT(false, "deprecated!\n");
190 return 0;
191}
192
193static void _phy_fw_rf_serial_write(struct ieee80211_hw *hw,
194 enum radio_path rfpath,
195 u32 offset, u32 data)
196{
197 RT_ASSERT(false, "deprecated!\n");
198}
199
200static u32 _phy_rf_serial_read(struct ieee80211_hw *hw,
201 enum radio_path rfpath, u32 offset)
202{
203 struct rtl_priv *rtlpriv = rtl_priv(hw);
204 struct rtl_phy *rtlphy = &(rtlpriv->phy);
205 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
206 u32 newoffset;
207 u32 tmplong, tmplong2;
208 u8 rfpi_enable = 0;
209 u32 retvalue;
210
211 offset &= 0x3f;
212 newoffset = offset;
213 if (RT_CANNOT_IO(hw)) {
214 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
215 return 0xFFFFFFFF;
216 }
217 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
218 if (rfpath == RF90_PATH_A)
219 tmplong2 = tmplong;
220 else
221 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
222 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
223 (newoffset << 23) | BLSSIREADEDGE;
224 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
225 tmplong & (~BLSSIREADEDGE));
226 mdelay(1);
227 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
228 mdelay(1);
229 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
230 tmplong | BLSSIREADEDGE);
231 mdelay(1);
232 if (rfpath == RF90_PATH_A)
233 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
234 BIT(8));
235 else if (rfpath == RF90_PATH_B)
236 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
237 BIT(8));
238 if (rfpi_enable)
239 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
240 BLSSIREADBACKDATA);
241 else
242 retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
243 BLSSIREADBACKDATA);
244 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
245 rfpath, pphyreg->rf_rb, retvalue);
246 return retvalue;
247}
248
249static void _phy_rf_serial_write(struct ieee80211_hw *hw,
250 enum radio_path rfpath, u32 offset, u32 data)
251{
252 u32 data_and_addr;
253 u32 newoffset;
254 struct rtl_priv *rtlpriv = rtl_priv(hw);
255 struct rtl_phy *rtlphy = &(rtlpriv->phy);
256 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
257
258 if (RT_CANNOT_IO(hw)) {
259 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
260 return;
261 }
262 offset &= 0x3f;
263 newoffset = offset;
264 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
265 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
266 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
267 rfpath, pphyreg->rf3wire_offset, data_and_addr);
268}
269
270static u32 _phy_calculate_bit_shift(u32 bitmask)
271{
272 u32 i;
273
274 for (i = 0; i <= 31; i++) {
275 if (((bitmask >> i) & 0x1) == 1)
276 break;
277 }
278 return i;
279}
280
281static void _rtl8723ae_phy_bb_config_1t(struct ieee80211_hw *hw)
282{
283 rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
284 rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
285 rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
286 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
287 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
288 rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
289 rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
290 rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
291 rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
292 rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
293}
294
295bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw)
296{
297 struct rtl_priv *rtlpriv = rtl_priv(hw);
298 bool rtstatus = _phy_cfg_mac_w_header(hw);
299 rtl_write_byte(rtlpriv, 0x04CA, 0x0A);
300 return rtstatus;
301}
302
303bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw)
304{
305 bool rtstatus = true;
306 struct rtl_priv *rtlpriv = rtl_priv(hw);
307 u8 tmpu1b;
308 u8 reg_hwparafile = 1;
309
310 _phy_init_bb_rf_reg_def(hw);
311
312 /* 1. 0x28[1] = 1 */
313 tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_PLL_CTRL);
314 udelay(2);
315 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, (tmpu1b|BIT(1)));
316 udelay(2);
317 /* 2. 0x29[7:0] = 0xFF */
318 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL+1, 0xff);
319 udelay(2);
320
321 /* 3. 0x02[1:0] = 2b'11 */
322 tmpu1b = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN);
323 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, (tmpu1b |
324 FEN_BB_GLB_RSTn | FEN_BBRSTB));
325
326 /* 4. 0x25[6] = 0 */
327 tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+1);
328 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+1, (tmpu1b&(~BIT(6))));
329
330 /* 5. 0x24[20] = 0 Advised by SD3 Alex Wang. 2011.02.09. */
331 tmpu1b = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2);
332 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, (tmpu1b&(~BIT(4))));
333
334 /* 6. 0x1f[7:0] = 0x07 */
335 rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x07);
336
337 if (reg_hwparafile == 1)
338 rtstatus = _phy_bb8192c_config_parafile(hw);
339 return rtstatus;
340}
341
342bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw)
343{
344 return rtl8723ae_phy_rf6052_config(hw);
345}
346
347static bool _phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
348{
349 struct rtl_priv *rtlpriv = rtl_priv(hw);
350 struct rtl_phy *rtlphy = &(rtlpriv->phy);
351 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
352 bool rtstatus;
353
354 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
355 rtstatus = _phy_cfg_bb_w_header(hw, BASEBAND_CONFIG_PHY_REG);
356 if (rtstatus != true) {
357 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!");
358 return false;
359 }
360
361 if (rtlphy->rf_type == RF_1T2R) {
362 _rtl8723ae_phy_bb_config_1t(hw);
363 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
364 }
365 if (rtlefuse->autoload_failflag == false) {
366 rtlphy->pwrgroup_cnt = 0;
367 rtstatus = _phy_cfg_bb_w_pgheader(hw, BASEBAND_CONFIG_PHY_REG);
368 }
369 if (rtstatus != true) {
370 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!");
371 return false;
372 }
373 rtstatus = _phy_cfg_bb_w_header(hw, BASEBAND_CONFIG_AGC_TAB);
374 if (rtstatus != true) {
375 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
376 return false;
377 }
378 rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
379 RFPGA0_XA_HSSIPARAMETER2, 0x200));
380 return true;
381}
382
383static bool _phy_cfg_mac_w_header(struct ieee80211_hw *hw)
384{
385 struct rtl_priv *rtlpriv = rtl_priv(hw);
386 u32 i;
387 u32 arraylength;
388 u32 *ptrarray;
389
390 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl723MACPHY_Array\n");
391 arraylength = RTL8723E_MACARRAYLENGTH;
392 ptrarray = RTL8723EMAC_ARRAY;
393
394 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
395 "Img:RTL8192CEMAC_2T_ARRAY\n");
396 for (i = 0; i < arraylength; i = i + 2)
397 rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
398 return true;
399}
400
401static bool _phy_cfg_bb_w_header(struct ieee80211_hw *hw, u8 configtype)
402{
403 int i;
404 u32 *phy_regarray_table;
405 u32 *agctab_array_table;
406 u16 phy_reg_arraylen, agctab_arraylen;
407 struct rtl_priv *rtlpriv = rtl_priv(hw);
408
409 agctab_arraylen = RTL8723E_AGCTAB_1TARRAYLENGTH;
410 agctab_array_table = RTL8723EAGCTAB_1TARRAY;
411 phy_reg_arraylen = RTL8723E_PHY_REG_1TARRAY_LENGTH;
412 phy_regarray_table = RTL8723EPHY_REG_1TARRAY;
413 if (configtype == BASEBAND_CONFIG_PHY_REG) {
414 for (i = 0; i < phy_reg_arraylen; i = i + 2) {
415 if (phy_regarray_table[i] == 0xfe)
416 mdelay(50);
417 else if (phy_regarray_table[i] == 0xfd)
418 mdelay(5);
419 else if (phy_regarray_table[i] == 0xfc)
420 mdelay(1);
421 else if (phy_regarray_table[i] == 0xfb)
422 udelay(50);
423 else if (phy_regarray_table[i] == 0xfa)
424 udelay(5);
425 else if (phy_regarray_table[i] == 0xf9)
426 udelay(1);
427 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
428 phy_regarray_table[i + 1]);
429 udelay(1);
430 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
431 "The phy_regarray_table[0] is %x"
432 " Rtl819XPHY_REGArray[1] is %x\n",
433 phy_regarray_table[i],
434 phy_regarray_table[i + 1]);
435 }
436 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
437 for (i = 0; i < agctab_arraylen; i = i + 2) {
438 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
439 agctab_array_table[i + 1]);
440 udelay(1);
441 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
442 "The agctab_array_table[0] is "
443 "%x Rtl819XPHY_REGArray[1] is %x\n",
444 agctab_array_table[i],
445 agctab_array_table[i + 1]);
446 }
447 }
448 return true;
449}
450
451static void _st_pwrIdx_dfrate_off(struct ieee80211_hw *hw, u32 regaddr,
452 u32 bitmask, u32 data)
453{
454 struct rtl_priv *rtlpriv = rtl_priv(hw);
455 struct rtl_phy *rtlphy = &(rtlpriv->phy);
456
457 switch (regaddr) {
458 case RTXAGC_A_RATE18_06:
459 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0] = data;
460 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
461 "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
462 rtlphy->pwrgroup_cnt,
463 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][0]);
464 break;
465 case RTXAGC_A_RATE54_24:
466 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1] = data;
467 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
468 "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
469 rtlphy->pwrgroup_cnt,
470 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][1]);
471 break;
472 case RTXAGC_A_CCK1_MCS32:
473 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6] = data;
474 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
475 "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
476 rtlphy->pwrgroup_cnt,
477 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][6]);
478 break;
479 case RTXAGC_B_CCK11_A_CCK2_11:
480 if (bitmask == 0xffffff00) {
481 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7] = data;
482 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
483 "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
484 rtlphy->pwrgroup_cnt,
485 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][7]);
486 }
487 if (bitmask == 0x000000ff) {
488 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15] = data;
489 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
490 "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
491 rtlphy->pwrgroup_cnt,
492 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][15]);
493 }
494 break;
495 case RTXAGC_A_MCS03_MCS00:
496 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2] = data;
497 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
498 "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
499 rtlphy->pwrgroup_cnt,
500 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][2]);
501 break;
502 case RTXAGC_A_MCS07_MCS04:
503 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3] = data;
504 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
505 "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
506 rtlphy->pwrgroup_cnt,
507 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][3]);
508 break;
509 case RTXAGC_A_MCS11_MCS08:
510 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4] = data;
511 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
512 "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
513 rtlphy->pwrgroup_cnt,
514 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][4]);
515 break;
516 case RTXAGC_A_MCS15_MCS12:
517 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5] = data;
518 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
519 "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
520 rtlphy->pwrgroup_cnt,
521 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][5]);
522 break;
523 case RTXAGC_B_RATE18_06:
524 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8] = data;
525 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
526 "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
527 rtlphy->pwrgroup_cnt,
528 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][8]);
529 break;
530 case RTXAGC_B_RATE54_24:
531 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9] = data;
532 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
533 "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
534 rtlphy->pwrgroup_cnt,
535 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][9]);
536 break;
537 case RTXAGC_B_CCK1_55_MCS32:
538 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14] = data;
539 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
540 "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
541 rtlphy->pwrgroup_cnt,
542 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][14]);
543 break;
544 case RTXAGC_B_MCS03_MCS00:
545 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10] = data;
546 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
547 "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
548 rtlphy->pwrgroup_cnt,
549 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][10]);
550 break;
551 case RTXAGC_B_MCS07_MCS04:
552 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11] = data;
553 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
554 "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
555 rtlphy->pwrgroup_cnt,
556 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][11]);
557 break;
558 case RTXAGC_B_MCS11_MCS08:
559 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12] = data;
560 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
561 "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
562 rtlphy->pwrgroup_cnt,
563 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][12]);
564 break;
565 case RTXAGC_B_MCS15_MCS12:
566 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13] = data;
567 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
568 "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
569 rtlphy->pwrgroup_cnt,
570 rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][13]);
571 rtlphy->pwrgroup_cnt++;
572 break;
573 }
574}
575
576static bool _phy_cfg_bb_w_pgheader(struct ieee80211_hw *hw, u8 configtype)
577{
578 struct rtl_priv *rtlpriv = rtl_priv(hw);
579 int i;
580 u32 *phy_regarray_table_pg;
581 u16 phy_regarray_pg_len;
582
583 phy_regarray_pg_len = RTL8723E_PHY_REG_ARRAY_PGLENGTH;
584 phy_regarray_table_pg = RTL8723EPHY_REG_ARRAY_PG;
585
586 if (configtype == BASEBAND_CONFIG_PHY_REG) {
587 for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
588 if (phy_regarray_table_pg[i] == 0xfe)
589 mdelay(50);
590 else if (phy_regarray_table_pg[i] == 0xfd)
591 mdelay(5);
592 else if (phy_regarray_table_pg[i] == 0xfc)
593 mdelay(1);
594 else if (phy_regarray_table_pg[i] == 0xfb)
595 udelay(50);
596 else if (phy_regarray_table_pg[i] == 0xfa)
597 udelay(5);
598 else if (phy_regarray_table_pg[i] == 0xf9)
599 udelay(1);
600
601 _st_pwrIdx_dfrate_off(hw, phy_regarray_table_pg[i],
602 phy_regarray_table_pg[i + 1],
603 phy_regarray_table_pg[i + 2]);
604 }
605 } else {
606 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
607 "configtype != BaseBand_Config_PHY_REG\n");
608 }
609 return true;
610}
611
612bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
613 enum radio_path rfpath)
614{
615 struct rtl_priv *rtlpriv = rtl_priv(hw);
616 int i;
617 bool rtstatus = true;
618 u32 *radioa_array_table;
619 u32 *radiob_array_table;
620 u16 radioa_arraylen, radiob_arraylen;
621
622 radioa_arraylen = Rtl8723ERADIOA_1TARRAYLENGTH;
623 radioa_array_table = RTL8723E_RADIOA_1TARRAY;
624 radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH;
625 radiob_array_table = RTL8723E_RADIOB_1TARRAY;
626
627 rtstatus = true;
628
629 switch (rfpath) {
630 case RF90_PATH_A:
631 for (i = 0; i < radioa_arraylen; i = i + 2) {
632 if (radioa_array_table[i] == 0xfe)
633 mdelay(50);
634 else if (radioa_array_table[i] == 0xfd)
635 mdelay(5);
636 else if (radioa_array_table[i] == 0xfc)
637 mdelay(1);
638 else if (radioa_array_table[i] == 0xfb)
639 udelay(50);
640 else if (radioa_array_table[i] == 0xfa)
641 udelay(5);
642 else if (radioa_array_table[i] == 0xf9)
643 udelay(1);
644 else {
645 rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
646 RFREG_OFFSET_MASK,
647 radioa_array_table[i + 1]);
648 udelay(1);
649 }
650 }
651 break;
652 case RF90_PATH_B:
653 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
654 "switch case not process\n");
655 break;
656 case RF90_PATH_C:
657 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
658 "switch case not process\n");
659 break;
660 case RF90_PATH_D:
661 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
662 "switch case not process\n");
663 break;
664 }
665 return true;
666}
667
668void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
669{
670 struct rtl_priv *rtlpriv = rtl_priv(hw);
671 struct rtl_phy *rtlphy = &(rtlpriv->phy);
672
673 rtlphy->default_initialgain[0] =
674 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
675 rtlphy->default_initialgain[1] =
676 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
677 rtlphy->default_initialgain[2] =
678 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
679 rtlphy->default_initialgain[3] =
680 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
681
682 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
683 "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
684 rtlphy->default_initialgain[0],
685 rtlphy->default_initialgain[1],
686 rtlphy->default_initialgain[2],
687 rtlphy->default_initialgain[3]);
688
689 rtlphy->framesync = (u8) rtl_get_bbreg(hw,
690 ROFDM0_RXDETECTOR3, MASKBYTE0);
691 rtlphy->framesync_c34 = rtl_get_bbreg(hw,
692 ROFDM0_RXDETECTOR2, MASKDWORD);
693
694 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
695 "Default framesync (0x%x) = 0x%x\n",
696 ROFDM0_RXDETECTOR3, rtlphy->framesync);
697}
698
699static void _phy_init_bb_rf_reg_def(struct ieee80211_hw *hw)
700{
701 struct rtl_priv *rtlpriv = rtl_priv(hw);
702 struct rtl_phy *rtlphy = &(rtlpriv->phy);
703
704 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
705 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
706 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
707 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
708
709 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
710 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
711 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
712 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
713
714 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
715 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
716
717 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
718 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
719
720 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
721 RFPGA0_XA_LSSIPARAMETER;
722 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
723 RFPGA0_XB_LSSIPARAMETER;
724
725 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
726 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
727 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
728 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
729
730 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
731 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
732 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
733 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
734
735 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
736 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
737
738 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
739 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
740
741 rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
742 rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
743 rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
744 rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
745
746 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
747 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
748 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
749 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
750
751 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
752 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
753 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
754 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
755
756 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
757 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
758 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBANLANCE;
759 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
760
761 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
762 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
763 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
764 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
765
766 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
767 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
768 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
769 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
770
771 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
772 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
773 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
774 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
775
776 rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
777 rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
778 rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
779 rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
780
781 rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVEA_HSPI_READBACK;
782 rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVEB_HSPI_READBACK;
783}
784
785void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
786{
787 struct rtl_priv *rtlpriv = rtl_priv(hw);
788 struct rtl_phy *rtlphy = &(rtlpriv->phy);
789 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
790 u8 txpwr_level;
791 long txpwr_dbm;
792
793 txpwr_level = rtlphy->cur_cck_txpwridx;
794 txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_B, txpwr_level);
795 txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
796 rtlefuse->legacy_ht_txpowerdiff;
797 if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, txpwr_level) > txpwr_dbm)
798 txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
799 txpwr_level);
800 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
801 if (_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, txpwr_level) >
802 txpwr_dbm)
803 txpwr_dbm = _phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
804 txpwr_level);
805 *powerlevel = txpwr_dbm;
806}
807
808static void _rtl8723ae_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
809 u8 *cckpowerlevel, u8 *ofdmpowerlevel)
810{
811 struct rtl_priv *rtlpriv = rtl_priv(hw);
812 struct rtl_phy *rtlphy = &(rtlpriv->phy);
813 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
814 u8 index = (channel - 1);
815
816 cckpowerlevel[RF90_PATH_A] =
817 rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
818 cckpowerlevel[RF90_PATH_B] =
819 rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
820 if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
821 ofdmpowerlevel[RF90_PATH_A] =
822 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
823 ofdmpowerlevel[RF90_PATH_B] =
824 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
825 } else if (get_rf_type(rtlphy) == RF_2T2R) {
826 ofdmpowerlevel[RF90_PATH_A] =
827 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
828 ofdmpowerlevel[RF90_PATH_B] =
829 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
830 }
831}
832
833static void _rtl8723ae_ccxpower_index_check(struct ieee80211_hw *hw,
834 u8 channel, u8 *cckpowerlevel,
835 u8 *ofdmpowerlevel)
836{
837 struct rtl_priv *rtlpriv = rtl_priv(hw);
838 struct rtl_phy *rtlphy = &(rtlpriv->phy);
839
840 rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
841 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
842}
843
844void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
845{
846 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
847 u8 cckpowerlevel[2], ofdmpowerlevel[2];
848
849 if (rtlefuse->txpwr_fromeprom == false)
850 return;
851 _rtl8723ae_get_txpower_index(hw, channel, &cckpowerlevel[0],
852 &ofdmpowerlevel[0]);
853 _rtl8723ae_ccxpower_index_check(hw, channel, &cckpowerlevel[0],
854 &ofdmpowerlevel[0]);
855 rtl8723ae_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
856 rtl8723ae_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
857}
858
859bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
860{
861 struct rtl_priv *rtlpriv = rtl_priv(hw);
862 struct rtl_phy *rtlphy = &(rtlpriv->phy);
863 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
864 u8 idx;
865 u8 rf_path;
866 u8 ccktxpwridx = _phy_dbm_to_txpwr_Idx(hw, WIRELESS_MODE_B,
867 power_indbm);
868 u8 ofdmtxpwridx = _phy_dbm_to_txpwr_Idx(hw, WIRELESS_MODE_N_24G,
869 power_indbm);
870 if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
871 ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
872 else
873 ofdmtxpwridx = 0;
874 RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
875 "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
876 power_indbm, ccktxpwridx, ofdmtxpwridx);
877 for (idx = 0; idx < 14; idx++) {
878 for (rf_path = 0; rf_path < 2; rf_path++) {
879 rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
880 rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
881 ofdmtxpwridx;
882 rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
883 ofdmtxpwridx;
884 }
885 }
886 rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel);
887 return true;
888}
889
890static u8 _phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
891 enum wireless_mode wirelessmode,
892 long power_indbm)
893{
894 u8 txpwridx;
895 long offset;
896
897 switch (wirelessmode) {
898 case WIRELESS_MODE_B:
899 offset = -7;
900 break;
901 case WIRELESS_MODE_G:
902 case WIRELESS_MODE_N_24G:
903 offset = -8;
904 break;
905 default:
906 offset = -8;
907 break;
908 }
909
910 if ((power_indbm - offset) > 0)
911 txpwridx = (u8) ((power_indbm - offset) * 2);
912 else
913 txpwridx = 0;
914
915 if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
916 txpwridx = MAX_TXPWR_IDX_NMODE_92S;
917
918 return txpwridx;
919}
920
921static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
922 enum wireless_mode wirelessmode, u8 txpwridx)
923{
924 long offset;
925 long pwrout_dbm;
926
927 switch (wirelessmode) {
928 case WIRELESS_MODE_B:
929 offset = -7;
930 break;
931 case WIRELESS_MODE_G:
932 case WIRELESS_MODE_N_24G:
933 offset = -8;
934 break;
935 default:
936 offset = -8;
937 break;
938 }
939 pwrout_dbm = txpwridx / 2 + offset;
940 return pwrout_dbm;
941}
942
943void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
944{
945 struct rtl_priv *rtlpriv = rtl_priv(hw);
946 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
947 enum io_type iotype;
948
949 if (!is_hal_stop(rtlhal)) {
950 switch (operation) {
951 case SCAN_OPT_BACKUP:
952 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
953 rtlpriv->cfg->ops->set_hw_reg(hw,
954 HW_VAR_IO_CMD,
955 (u8 *)&iotype);
956
957 break;
958 case SCAN_OPT_RESTORE:
959 iotype = IO_CMD_RESUME_DM_BY_SCAN;
960 rtlpriv->cfg->ops->set_hw_reg(hw,
961 HW_VAR_IO_CMD,
962 (u8 *)&iotype);
963 break;
964 default:
965 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
966 "Unknown Scan Backup operation.\n");
967 break;
968 }
969 }
970}
971
972void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
973{
974 struct rtl_priv *rtlpriv = rtl_priv(hw);
975 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
976 struct rtl_phy *rtlphy = &(rtlpriv->phy);
977 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
978 u8 reg_bw_opmode;
979 u8 reg_prsr_rsc;
980
981 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
982 "Switch to %s bandwidth\n",
983 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
984 "20MHz" : "40MHz");
985
986 if (is_hal_stop(rtlhal)) {
987 rtlphy->set_bwmode_inprogress = false;
988 return;
989 }
990
991 reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
992 reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
993
994 switch (rtlphy->current_chan_bw) {
995 case HT_CHANNEL_WIDTH_20:
996 reg_bw_opmode |= BW_OPMODE_20MHZ;
997 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
998 break;
999 case HT_CHANNEL_WIDTH_20_40:
1000 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
1001 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
1002 reg_prsr_rsc =
1003 (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
1004 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
1005 break;
1006 default:
1007 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1008 "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
1009 break;
1010 }
1011
1012 switch (rtlphy->current_chan_bw) {
1013 case HT_CHANNEL_WIDTH_20:
1014 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
1015 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
1016 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
1017 break;
1018 case HT_CHANNEL_WIDTH_20_40:
1019 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
1020 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
1021
1022 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
1023 (mac->cur_40_prime_sc >> 1));
1024 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
1025 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
1026
1027 rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
1028 (mac->cur_40_prime_sc ==
1029 HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
1030 break;
1031 default:
1032 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1033 "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
1034 break;
1035 }
1036 rtl8723ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
1037 rtlphy->set_bwmode_inprogress = false;
1038 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
1039}
1040
1041void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
1042 enum nl80211_channel_type ch_type)
1043{
1044 struct rtl_priv *rtlpriv = rtl_priv(hw);
1045 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1046 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1047 u8 tmp_bw = rtlphy->current_chan_bw;
1048
1049 if (rtlphy->set_bwmode_inprogress)
1050 return;
1051 rtlphy->set_bwmode_inprogress = true;
1052 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1053 rtl8723ae_phy_set_bw_mode_callback(hw);
1054 } else {
1055 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1056 "FALSE driver sleep or unload\n");
1057 rtlphy->set_bwmode_inprogress = false;
1058 rtlphy->current_chan_bw = tmp_bw;
1059 }
1060}
1061
1062void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
1063{
1064 struct rtl_priv *rtlpriv = rtl_priv(hw);
1065 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1066 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1067 u32 delay;
1068
1069 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1070 "switch to channel%d\n", rtlphy->current_channel);
1071 if (is_hal_stop(rtlhal))
1072 return;
1073 do {
1074 if (!rtlphy->sw_chnl_inprogress)
1075 break;
1076 if (!_phy_sw_chnl_step_by_step
1077 (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
1078 &rtlphy->sw_chnl_step, &delay)) {
1079 if (delay > 0)
1080 mdelay(delay);
1081 else
1082 continue;
1083 } else {
1084 rtlphy->sw_chnl_inprogress = false;
1085 }
1086 break;
1087 } while (true);
1088 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
1089}
1090
1091u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw)
1092{
1093 struct rtl_priv *rtlpriv = rtl_priv(hw);
1094 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1095 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1096
1097 if (rtlphy->sw_chnl_inprogress)
1098 return 0;
1099 if (rtlphy->set_bwmode_inprogress)
1100 return 0;
1101 RT_ASSERT((rtlphy->current_channel <= 14),
1102 "WIRELESS_MODE_G but channel>14");
1103 rtlphy->sw_chnl_inprogress = true;
1104 rtlphy->sw_chnl_stage = 0;
1105 rtlphy->sw_chnl_step = 0;
1106 if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1107 rtl8723ae_phy_sw_chnl_callback(hw);
1108 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1109 "sw_chnl_inprogress false schdule workitem\n");
1110 rtlphy->sw_chnl_inprogress = false;
1111 } else {
1112 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1113 "sw_chnl_inprogress false driver sleep or unload\n");
1114 rtlphy->sw_chnl_inprogress = false;
1115 }
1116 return 1;
1117}
1118
1119static void _rtl8723ae_phy_sw_rf_seting(struct ieee80211_hw *hw, u8 channel)
1120{
1121 struct rtl_priv *rtlpriv = rtl_priv(hw);
1122 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1123 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1124
1125 if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) {
1126 if (channel == 6 && rtlphy->current_chan_bw ==
1127 HT_CHANNEL_WIDTH_20)
1128 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
1129 0x00255);
1130 else{
1131 u32 backupRF0x1A = (u32)rtl_get_rfreg(hw, RF90_PATH_A,
1132 RF_RX_G1, RFREG_OFFSET_MASK);
1133 rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD,
1134 backupRF0x1A);
1135 }
1136 }
1137}
1138
1139static bool _phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, u8 channel,
1140 u8 *stage, u8 *step, u32 *delay)
1141{
1142 struct rtl_priv *rtlpriv = rtl_priv(hw);
1143 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1144 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
1145 u32 precommoncmdcnt;
1146 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
1147 u32 postcommoncmdcnt;
1148 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
1149 u32 rfdependcmdcnt;
1150 struct swchnlcmd *currentcmd = NULL;
1151 u8 rfpath;
1152 u8 num_total_rfpath = rtlphy->num_total_rfpath;
1153
1154 precommoncmdcnt = 0;
1155 _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1156 MAX_PRECMD_CNT, CMDID_SET_TXPOWEROWER_LEVEL,
1157 0, 0, 0);
1158 _phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1159 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
1160 postcommoncmdcnt = 0;
1161
1162 _phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
1163 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
1164 rfdependcmdcnt = 0;
1165
1166 RT_ASSERT((channel >= 1 && channel <= 14),
1167 "illegal channel for Zebra: %d\n", channel);
1168
1169 _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1170 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
1171 RF_CHNLBW, channel, 10);
1172
1173 _phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1174 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, 0);
1175
1176 do {
1177 switch (*stage) {
1178 case 0:
1179 currentcmd = &precommoncmd[*step];
1180 break;
1181 case 1:
1182 currentcmd = &rfdependcmd[*step];
1183 break;
1184 case 2:
1185 currentcmd = &postcommoncmd[*step];
1186 break;
1187 }
1188
1189 if (currentcmd->cmdid == CMDID_END) {
1190 if ((*stage) == 2) {
1191 return true;
1192 } else {
1193 (*stage)++;
1194 (*step) = 0;
1195 continue;
1196 }
1197 }
1198
1199 switch (currentcmd->cmdid) {
1200 case CMDID_SET_TXPOWEROWER_LEVEL:
1201 rtl8723ae_phy_set_txpower_level(hw, channel);
1202 break;
1203 case CMDID_WRITEPORT_ULONG:
1204 rtl_write_dword(rtlpriv, currentcmd->para1,
1205 currentcmd->para2);
1206 break;
1207 case CMDID_WRITEPORT_USHORT:
1208 rtl_write_word(rtlpriv, currentcmd->para1,
1209 (u16) currentcmd->para2);
1210 break;
1211 case CMDID_WRITEPORT_UCHAR:
1212 rtl_write_byte(rtlpriv, currentcmd->para1,
1213 (u8) currentcmd->para2);
1214 break;
1215 case CMDID_RF_WRITEREG:
1216 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
1217 rtlphy->rfreg_chnlval[rfpath] =
1218 ((rtlphy->rfreg_chnlval[rfpath] &
1219 0xfffffc00) | currentcmd->para2);
1220
1221 rtl_set_rfreg(hw, (enum radio_path)rfpath,
1222 currentcmd->para1,
1223 RFREG_OFFSET_MASK,
1224 rtlphy->rfreg_chnlval[rfpath]);
1225 }
1226 _rtl8723ae_phy_sw_rf_seting(hw, channel);
1227 break;
1228 default:
1229 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1230 "switch case not process\n");
1231 break;
1232 }
1233
1234 break;
1235 } while (true);
1236
1237 (*delay) = currentcmd->msdelay;
1238 (*step)++;
1239 return false;
1240}
1241
1242static bool _phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
1243 u32 cmdtableidx, u32 cmdtablesz,
1244 enum swchnlcmd_id cmdid, u32 para1,
1245 u32 para2, u32 msdelay)
1246{
1247 struct swchnlcmd *pcmd;
1248
1249 if (cmdtable == NULL) {
1250 RT_ASSERT(false, "cmdtable cannot be NULL.\n");
1251 return false;
1252 }
1253
1254 if (cmdtableidx >= cmdtablesz)
1255 return false;
1256
1257 pcmd = cmdtable + cmdtableidx;
1258 pcmd->cmdid = cmdid;
1259 pcmd->para1 = para1;
1260 pcmd->para2 = para2;
1261 pcmd->msdelay = msdelay;
1262 return true;
1263}
1264
1265static u8 _rtl8723ae_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
1266{
1267 u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
1268 u8 result = 0x00;
1269
1270 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
1271 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
1272 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
1273 rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
1274 config_pathb ? 0x28160202 : 0x28160502);
1275
1276 if (config_pathb) {
1277 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
1278 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
1279 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
1280 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
1281 }
1282
1283 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
1284 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
1285 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1286
1287 mdelay(IQK_DELAY_TIME);
1288
1289 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1290 reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
1291 reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
1292 reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
1293
1294 if (!(reg_eac & BIT(28)) &&
1295 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
1296 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
1297 result |= 0x01;
1298 else
1299 return result;
1300
1301 if (!(reg_eac & BIT(27)) &&
1302 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
1303 (((reg_eac & 0x03FF0000) >> 16) != 0x36))
1304 result |= 0x02;
1305 return result;
1306}
1307
1308static u8 _rtl8723ae_phy_path_b_iqk(struct ieee80211_hw *hw)
1309{
1310 u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
1311 u8 result = 0x00;
1312
1313 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
1314 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
1315 mdelay(IQK_DELAY_TIME);
1316 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1317 reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1318 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1319 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1320 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1321
1322 if (!(reg_eac & BIT(31)) &&
1323 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
1324 (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
1325 result |= 0x01;
1326 else
1327 return result;
1328 if (!(reg_eac & BIT(30)) &&
1329 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
1330 (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
1331 result |= 0x02;
1332 return result;
1333}
1334
1335static void phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, bool iqk_ok,
1336 long result[][8], u8 final_candidate,
1337 bool btxonly)
1338{
1339 u32 oldval_0, x, tx0_a, reg;
1340 long y, tx0_c;
1341
1342 if (final_candidate == 0xFF) {
1343 return;
1344 } else if (iqk_ok) {
1345 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1346 MASKDWORD) >> 22) & 0x3FF;
1347 x = result[final_candidate][0];
1348 if ((x & 0x00000200) != 0)
1349 x = x | 0xFFFFFC00;
1350 tx0_a = (x * oldval_0) >> 8;
1351 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
1352 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
1353 ((x * oldval_0 >> 7) & 0x1));
1354 y = result[final_candidate][1];
1355 if ((y & 0x00000200) != 0)
1356 y = y | 0xFFFFFC00;
1357 tx0_c = (y * oldval_0) >> 8;
1358 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
1359 ((tx0_c & 0x3C0) >> 6));
1360 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
1361 (tx0_c & 0x3F));
1362 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
1363 ((y * oldval_0 >> 7) & 0x1));
1364 if (btxonly)
1365 return;
1366 reg = result[final_candidate][2];
1367 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
1368 reg = result[final_candidate][3] & 0x3F;
1369 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
1370 reg = (result[final_candidate][3] >> 6) & 0xF;
1371 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
1372 }
1373}
1374
1375static void phy_save_adda_regs(struct ieee80211_hw *hw,
1376 u32 *addareg, u32 *addabackup,
1377 u32 registernum)
1378{
1379 u32 i;
1380
1381 for (i = 0; i < registernum; i++)
1382 addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
1383}
1384
1385static void phy_save_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
1386 u32 *macbackup)
1387{
1388 struct rtl_priv *rtlpriv = rtl_priv(hw);
1389 u32 i;
1390
1391 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1392 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
1393 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
1394}
1395
1396static void phy_reload_adda_regs(struct ieee80211_hw *hw, u32 *addareg,
1397 u32 *addabackup, u32 regiesternum)
1398{
1399 u32 i;
1400
1401 for (i = 0; i < regiesternum; i++)
1402 rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
1403}
1404
1405static void phy_reload_mac_regs(struct ieee80211_hw *hw, u32 *macreg,
1406 u32 *macbackup)
1407{
1408 struct rtl_priv *rtlpriv = rtl_priv(hw);
1409 u32 i;
1410
1411 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1412 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
1413 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
1414}
1415
1416static void _rtl8723ae_phy_path_adda_on(struct ieee80211_hw *hw,
1417 u32 *addareg, bool is_patha_on,
1418 bool is2t)
1419{
1420 u32 pathOn;
1421 u32 i;
1422
1423 pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
1424 if (false == is2t) {
1425 pathOn = 0x0bdb25a0;
1426 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
1427 } else {
1428 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
1429 }
1430
1431 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
1432 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
1433}
1434
1435static void _rtl8723ae_phy_mac_setting_calibration(struct ieee80211_hw *hw,
1436 u32 *macreg, u32 *macbackup)
1437{
1438 struct rtl_priv *rtlpriv = rtl_priv(hw);
1439 u32 i = 0;
1440
1441 rtl_write_byte(rtlpriv, macreg[i], 0x3F);
1442
1443 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
1444 rtl_write_byte(rtlpriv, macreg[i],
1445 (u8) (macbackup[i] & (~BIT(3))));
1446 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
1447}
1448
1449static void _rtl8723ae_phy_path_a_standby(struct ieee80211_hw *hw)
1450{
1451 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1452 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1453 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1454}
1455
1456static void _rtl8723ae_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
1457{
1458 u32 mode;
1459
1460 mode = pi_mode ? 0x01000100 : 0x01000000;
1461 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1462 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1463}
1464
1465static bool phy_simularity_comp(struct ieee80211_hw *hw, long result[][8],
1466 u8 c1, u8 c2)
1467{
1468 u32 i, j, diff, simularity_bitmap, bound;
1469
1470 u8 final_candidate[2] = { 0xFF, 0xFF };
1471 bool bresult = true;
1472
1473 bound = 4;
1474
1475 simularity_bitmap = 0;
1476
1477 for (i = 0; i < bound; i++) {
1478 diff = (result[c1][i] > result[c2][i]) ?
1479 (result[c1][i] - result[c2][i]) :
1480 (result[c2][i] - result[c1][i]);
1481
1482 if (diff > MAX_TOLERANCE) {
1483 if ((i == 2 || i == 6) && !simularity_bitmap) {
1484 if (result[c1][i] + result[c1][i + 1] == 0)
1485 final_candidate[(i / 4)] = c2;
1486 else if (result[c2][i] + result[c2][i + 1] == 0)
1487 final_candidate[(i / 4)] = c1;
1488 else
1489 simularity_bitmap = simularity_bitmap |
1490 (1 << i);
1491 } else
1492 simularity_bitmap =
1493 simularity_bitmap | (1 << i);
1494 }
1495 }
1496
1497 if (simularity_bitmap == 0) {
1498 for (i = 0; i < (bound / 4); i++) {
1499 if (final_candidate[i] != 0xFF) {
1500 for (j = i * 4; j < (i + 1) * 4 - 2; j++)
1501 result[3][j] =
1502 result[final_candidate[i]][j];
1503 bresult = false;
1504 }
1505 }
1506 return bresult;
1507 } else if (!(simularity_bitmap & 0x0F)) {
1508 for (i = 0; i < 4; i++)
1509 result[3][i] = result[c1][i];
1510 return false;
1511 } else {
1512 return false;
1513 }
1514
1515}
1516
1517static void _rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw,
1518 long result[][8], u8 t, bool is2t)
1519{
1520 struct rtl_priv *rtlpriv = rtl_priv(hw);
1521 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1522 u32 i;
1523 u8 patha_ok, pathb_ok;
1524 u32 adda_reg[IQK_ADDA_REG_NUM] = {
1525 0x85c, 0xe6c, 0xe70, 0xe74,
1526 0xe78, 0xe7c, 0xe80, 0xe84,
1527 0xe88, 0xe8c, 0xed0, 0xed4,
1528 0xed8, 0xedc, 0xee0, 0xeec
1529 };
1530 u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
1531 0x522, 0x550, 0x551, 0x040
1532 };
1533 const u32 retrycount = 2;
1534 u32 bbvalue;
1535
1536 if (t == 0) {
1537 bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
1538
1539 phy_save_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
1540 phy_save_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
1541 }
1542 _rtl8723ae_phy_path_adda_on(hw, adda_reg, true, is2t);
1543 if (t == 0) {
1544 rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
1545 RFPGA0_XA_HSSIPARAMETER1,
1546 BIT(8));
1547 }
1548
1549 if (!rtlphy->rfpi_enable)
1550 _rtl8723ae_phy_pi_mode_switch(hw, true);
1551 if (t == 0) {
1552 rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
1553 rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
1554 rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
1555 }
1556 rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
1557 rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
1558 rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
1559 if (is2t) {
1560 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1561 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
1562 }
1563 _rtl8723ae_phy_mac_setting_calibration(hw, iqk_mac_reg,
1564 rtlphy->iqk_mac_backup);
1565 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
1566 if (is2t)
1567 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
1568 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1569 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1570 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
1571 for (i = 0; i < retrycount; i++) {
1572 patha_ok = _rtl8723ae_phy_path_a_iqk(hw, is2t);
1573 if (patha_ok == 0x03) {
1574 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1575 0x3FF0000) >> 16;
1576 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1577 0x3FF0000) >> 16;
1578 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
1579 0x3FF0000) >> 16;
1580 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
1581 0x3FF0000) >> 16;
1582 break;
1583 } else if (i == (retrycount - 1) && patha_ok == 0x01)
1584
1585 result[t][0] = (rtl_get_bbreg(hw, 0xe94,
1586 MASKDWORD) & 0x3FF0000) >> 16;
1587 result[t][1] =
1588 (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
1589
1590 }
1591
1592 if (is2t) {
1593 _rtl8723ae_phy_path_a_standby(hw);
1594 _rtl8723ae_phy_path_adda_on(hw, adda_reg, false, is2t);
1595 for (i = 0; i < retrycount; i++) {
1596 pathb_ok = _rtl8723ae_phy_path_b_iqk(hw);
1597 if (pathb_ok == 0x03) {
1598 result[t][4] =
1599 (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
1600 0x3FF0000) >> 16;
1601 result[t][5] =
1602 (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1603 0x3FF0000) >> 16;
1604 result[t][6] =
1605 (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
1606 0x3FF0000) >> 16;
1607 result[t][7] =
1608 (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
1609 0x3FF0000) >> 16;
1610 break;
1611 } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
1612 result[t][4] =
1613 (rtl_get_bbreg(hw, 0xeb4, MASKDWORD) &
1614 0x3FF0000) >> 16;
1615 }
1616 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1617 0x3FF0000) >> 16;
1618 }
1619 }
1620 rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
1621 rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
1622 rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
1623 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1624 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
1625 if (is2t)
1626 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
1627 if (t != 0) {
1628 if (!rtlphy->rfpi_enable)
1629 _rtl8723ae_phy_pi_mode_switch(hw, false);
1630 phy_reload_adda_regs(hw, adda_reg, rtlphy->adda_backup, 16);
1631 phy_reload_mac_regs(hw, iqk_mac_reg, rtlphy->iqk_mac_backup);
1632 }
1633}
1634
1635static void _rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1636{
1637 struct rtl_priv *rtlpriv = rtl_priv(hw);
1638 u8 tmpreg;
1639 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
1640
1641 tmpreg = rtl_read_byte(rtlpriv, 0xd03);
1642
1643 if ((tmpreg & 0x70) != 0)
1644 rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
1645 else
1646 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
1647
1648 if ((tmpreg & 0x70) != 0) {
1649 rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
1650
1651 if (is2t)
1652 rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
1653 MASK12BITS);
1654
1655 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
1656 (rf_a_mode & 0x8FFFF) | 0x10000);
1657
1658 if (is2t)
1659 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
1660 (rf_b_mode & 0x8FFFF) | 0x10000);
1661 }
1662 lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
1663
1664 rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
1665
1666 mdelay(100);
1667
1668 if ((tmpreg & 0x70) != 0) {
1669 rtl_write_byte(rtlpriv, 0xd03, tmpreg);
1670 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
1671
1672 if (is2t)
1673 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
1674 rf_b_mode);
1675 } else {
1676 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
1677 }
1678}
1679
1680static void _rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw,
1681 bool bmain, bool is2t)
1682{
1683 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1684
1685 if (is_hal_stop(rtlhal)) {
1686 rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
1687 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
1688 }
1689 if (is2t) {
1690 if (bmain)
1691 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1692 BIT(5) | BIT(6), 0x1);
1693 else
1694 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
1695 BIT(5) | BIT(6), 0x2);
1696 } else {
1697 if (bmain)
1698 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
1699 else
1700 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
1701
1702 }
1703}
1704
1705#undef IQK_ADDA_REG_NUM
1706#undef IQK_DELAY_TIME
1707
1708void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery)
1709{
1710 struct rtl_priv *rtlpriv = rtl_priv(hw);
1711 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1712 long result[4][8];
1713 u8 i, final_candidate;
1714 bool patha_ok, pathb_ok;
1715 long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
1716 reg_ecc, reg_tmp = 0;
1717 bool is12simular, is13simular, is23simular;
1718 bool start_conttx = false, singletone = false;
1719 u32 iqk_bb_reg[10] = {
1720 ROFDM0_XARXIQIMBALANCE,
1721 ROFDM0_XBRXIQIMBALANCE,
1722 ROFDM0_ECCATHRESHOLD,
1723 ROFDM0_AGCRSSITABLE,
1724 ROFDM0_XATXIQIMBALANCE,
1725 ROFDM0_XBTXIQIMBALANCE,
1726 ROFDM0_XCTXIQIMBALANCE,
1727 ROFDM0_XCTXAFE,
1728 ROFDM0_XDTXAFE,
1729 ROFDM0_RXIQEXTANTA
1730 };
1731
1732 if (recovery) {
1733 phy_reload_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
1734 return;
1735 }
1736 if (start_conttx || singletone)
1737 return;
1738 for (i = 0; i < 8; i++) {
1739 result[0][i] = 0;
1740 result[1][i] = 0;
1741 result[2][i] = 0;
1742 result[3][i] = 0;
1743 }
1744 final_candidate = 0xff;
1745 patha_ok = false;
1746 pathb_ok = false;
1747 is12simular = false;
1748 is23simular = false;
1749 is13simular = false;
1750 for (i = 0; i < 3; i++) {
1751 _rtl8723ae_phy_iq_calibrate(hw, result, i, false);
1752 if (i == 1) {
1753 is12simular = phy_simularity_comp(hw, result, 0, 1);
1754 if (is12simular) {
1755 final_candidate = 0;
1756 break;
1757 }
1758 }
1759 if (i == 2) {
1760 is13simular = phy_simularity_comp(hw, result, 0, 2);
1761 if (is13simular) {
1762 final_candidate = 0;
1763 break;
1764 }
1765 is23simular = phy_simularity_comp(hw, result, 1, 2);
1766 if (is23simular) {
1767 final_candidate = 1;
1768 } else {
1769 for (i = 0; i < 8; i++)
1770 reg_tmp += result[3][i];
1771
1772 if (reg_tmp != 0)
1773 final_candidate = 3;
1774 else
1775 final_candidate = 0xFF;
1776 }
1777 }
1778 }
1779 for (i = 0; i < 4; i++) {
1780 reg_e94 = result[i][0];
1781 reg_e9c = result[i][1];
1782 reg_ea4 = result[i][2];
1783 reg_eac = result[i][3];
1784 reg_eb4 = result[i][4];
1785 reg_ebc = result[i][5];
1786 reg_ec4 = result[i][6];
1787 reg_ecc = result[i][7];
1788 }
1789 if (final_candidate != 0xff) {
1790 rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
1791 rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
1792 reg_ea4 = result[final_candidate][2];
1793 reg_eac = result[final_candidate][3];
1794 rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
1795 rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
1796 reg_ec4 = result[final_candidate][6];
1797 reg_ecc = result[final_candidate][7];
1798 patha_ok = pathb_ok = true;
1799 } else {
1800 rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
1801 rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
1802 }
1803 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
1804 phy_path_a_fill_iqk_matrix(hw, patha_ok, result,
1805 final_candidate, (reg_ea4 == 0));
1806 phy_save_adda_regs(hw, iqk_bb_reg, rtlphy->iqk_bb_backup, 10);
1807}
1808
1809void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw)
1810{
1811 bool start_conttx = false, singletone = false;
1812
1813 if (start_conttx || singletone)
1814 return;
1815 _rtl8723ae_phy_lc_calibrate(hw, false);
1816}
1817
1818void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
1819{
1820 _rtl8723ae_phy_set_rfpath_switch(hw, bmain, false);
1821}
1822
1823bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
1824{
1825 struct rtl_priv *rtlpriv = rtl_priv(hw);
1826 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1827 bool postprocessing = false;
1828
1829 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1830 "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
1831 iotype, rtlphy->set_io_inprogress);
1832 do {
1833 switch (iotype) {
1834 case IO_CMD_RESUME_DM_BY_SCAN:
1835 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1836 "[IO CMD] Resume DM after scan.\n");
1837 postprocessing = true;
1838 break;
1839 case IO_CMD_PAUSE_DM_BY_SCAN:
1840 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1841 "[IO CMD] Pause DM before scan.\n");
1842 postprocessing = true;
1843 break;
1844 default:
1845 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1846 "switch case not process\n");
1847 break;
1848 }
1849 } while (false);
1850 if (postprocessing && !rtlphy->set_io_inprogress) {
1851 rtlphy->set_io_inprogress = true;
1852 rtlphy->current_io_type = iotype;
1853 } else {
1854 return false;
1855 }
1856 rtl8723ae_phy_set_io(hw);
1857 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
1858 return true;
1859}
1860
1861static void rtl8723ae_phy_set_io(struct ieee80211_hw *hw)
1862{
1863 struct rtl_priv *rtlpriv = rtl_priv(hw);
1864 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1865 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
1866
1867 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1868 "--->Cmd(%#x), set_io_inprogress(%d)\n",
1869 rtlphy->current_io_type, rtlphy->set_io_inprogress);
1870 switch (rtlphy->current_io_type) {
1871 case IO_CMD_RESUME_DM_BY_SCAN:
1872 dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
1873 rtl8723ae_dm_write_dig(hw);
1874 rtl8723ae_phy_set_txpower_level(hw, rtlphy->current_channel);
1875 break;
1876 case IO_CMD_PAUSE_DM_BY_SCAN:
1877 rtlphy->initgain_backup.xaagccore1 = dm_digtable->cur_igvalue;
1878 dm_digtable->cur_igvalue = 0x17;
1879 rtl8723ae_dm_write_dig(hw);
1880 break;
1881 default:
1882 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1883 "switch case not process\n");
1884 break;
1885 }
1886 rtlphy->set_io_inprogress = false;
1887 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1888 "<---(%#x)\n", rtlphy->current_io_type);
1889}
1890
1891static void rtl8723ae_phy_set_rf_on(struct ieee80211_hw *hw)
1892{
1893 struct rtl_priv *rtlpriv = rtl_priv(hw);
1894
1895 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
1896 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
1897 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
1898 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
1899 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
1900 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
1901}
1902
1903static void _rtl8723ae_phy_set_rf_sleep(struct ieee80211_hw *hw)
1904{
1905 struct rtl_priv *rtlpriv = rtl_priv(hw);
1906 u32 u4b_tmp;
1907 u8 delay = 5;
1908
1909 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
1910 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
1911 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
1912 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
1913 while (u4b_tmp != 0 && delay > 0) {
1914 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
1915 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
1916 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
1917 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
1918 delay--;
1919 }
1920 if (delay == 0) {
1921 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
1922 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
1923 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
1924 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
1925 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
1926 "Switch RF timeout !!!.\n");
1927 return;
1928 }
1929 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
1930 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
1931}
1932
1933static bool _rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
1934 enum rf_pwrstate rfpwr_state)
1935{
1936 struct rtl_priv *rtlpriv = rtl_priv(hw);
1937 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1938 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1939 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1940 struct rtl8192_tx_ring *ring = NULL;
1941 bool bresult = true;
1942 u8 i, queue_id;
1943
1944 switch (rfpwr_state) {
1945 case ERFON:
1946 if ((ppsc->rfpwr_state == ERFOFF) &&
1947 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
1948 bool rtstatus;
1949 u32 InitializeCount = 0;
1950 do {
1951 InitializeCount++;
1952 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1953 "IPS Set eRf nic enable\n");
1954 rtstatus = rtl_ps_enable_nic(hw);
1955 } while ((rtstatus != true) && (InitializeCount < 10));
1956 RT_CLEAR_PS_LEVEL(ppsc,
1957 RT_RF_OFF_LEVL_HALT_NIC);
1958 } else {
1959 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1960 "Set ERFON sleeped:%d ms\n",
1961 jiffies_to_msecs(jiffies -
1962 ppsc->last_sleep_jiffies));
1963 ppsc->last_awake_jiffies = jiffies;
1964 rtl8723ae_phy_set_rf_on(hw);
1965 }
1966 if (mac->link_state == MAC80211_LINKED) {
1967 rtlpriv->cfg->ops->led_control(hw,
1968 LED_CTL_LINK);
1969 } else {
1970 rtlpriv->cfg->ops->led_control(hw,
1971 LED_CTL_NO_LINK);
1972 }
1973 break;
1974 case ERFOFF:
1975 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
1976 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1977 "IPS Set eRf nic disable\n");
1978 rtl_ps_disable_nic(hw);
1979 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1980 } else {
1981 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
1982 rtlpriv->cfg->ops->led_control(hw,
1983 LED_CTL_NO_LINK);
1984 } else {
1985 rtlpriv->cfg->ops->led_control(hw,
1986 LED_CTL_POWER_OFF);
1987 }
1988 }
1989 break;
1990 case ERFSLEEP:
1991 if (ppsc->rfpwr_state == ERFOFF)
1992 break;
1993 for (queue_id = 0, i = 0;
1994 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
1995 ring = &pcipriv->dev.tx_ring[queue_id];
1996 if (skb_queue_len(&ring->queue) == 0) {
1997 queue_id++;
1998 continue;
1999 } else {
2000 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2001 "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
2002 (i + 1), queue_id,
2003 skb_queue_len(&ring->queue));
2004
2005 udelay(10);
2006 i++;
2007 }
2008 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
2009 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2010 "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
2011 MAX_DOZE_WAITING_TIMES_9x,
2012 queue_id,
2013 skb_queue_len(&ring->queue));
2014 break;
2015 }
2016 }
2017 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2018 "Set ERFSLEEP awaked:%d ms\n",
2019 jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
2020 ppsc->last_sleep_jiffies = jiffies;
2021 _rtl8723ae_phy_set_rf_sleep(hw);
2022 break;
2023 default:
2024 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2025 "switch case not processed\n");
2026 bresult = false;
2027 break;
2028 }
2029 if (bresult)
2030 ppsc->rfpwr_state = rfpwr_state;
2031 return bresult;
2032}
2033
2034bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
2035 enum rf_pwrstate rfpwr_state)
2036{
2037 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2038 bool bresult = false;
2039
2040 if (rfpwr_state == ppsc->rfpwr_state)
2041 return bresult;
2042 bresult = _rtl8723ae_phy_set_rf_power_state(hw, rfpwr_state);
2043 return bresult;
2044}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
new file mode 100644
index 000000000000..e7a59eba351a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
@@ -0,0 +1,224 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_PHY_H__
31#define __RTL92C_PHY_H__
32
33#define MAX_PRECMD_CNT 16
34#define MAX_RFDEPENDCMD_CNT 16
35#define MAX_POSTCMD_CNT 16
36
37#define MAX_DOZE_WAITING_TIMES_9x 64
38
39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41
42#define MAX_TOLERANCE 5
43#define IQK_DELAY_TIME 1
44
45#define APK_BB_REG_NUM 5
46#define APK_AFE_REG_NUM 16
47#define APK_CURVE_REG_NUM 4
48#define PATH_NUM 2
49
50#define LOOP_LIMIT 5
51#define MAX_STALL_TIME 50
52#define AntennaDiversityValue 0x80
53#define MAX_TXPWR_IDX_NMODE_92S 63
54#define Reset_Cnt_Limit 3
55
56#define IQK_MAC_REG_NUM 4
57
58#define RF6052_MAX_PATH 2
59
60#define CT_OFFSET_MAC_ADDR 0X16
61
62#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
63#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
64#define CT_OFFSET_HT402S_TX_PWR_IDX_DIFF 0x66
65#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
66#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
67
68#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
69#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
70
71#define CT_OFFSET_CHANNEL_PLAH 0x75
72#define CT_OFFSET_THERMAL_METER 0x78
73#define CT_OFFSET_RF_OPTION 0x79
74#define CT_OFFSET_VERSION 0x7E
75#define CT_OFFSET_CUSTOMER_ID 0x7F
76
77#define RTL92C_MAX_PATH_NUM 2
78
79enum swchnlcmd_id {
80 CMDID_END,
81 CMDID_SET_TXPOWEROWER_LEVEL,
82 CMDID_BBREGWRITE10,
83 CMDID_WRITEPORT_ULONG,
84 CMDID_WRITEPORT_USHORT,
85 CMDID_WRITEPORT_UCHAR,
86 CMDID_RF_WRITEREG,
87};
88
89struct swchnlcmd {
90 enum swchnlcmd_id cmdid;
91 u32 para1;
92 u32 para2;
93 u32 msdelay;
94};
95
96enum hw90_block_e {
97 HW90_BLOCK_MAC = 0,
98 HW90_BLOCK_PHY0 = 1,
99 HW90_BLOCK_PHY1 = 2,
100 HW90_BLOCK_RF = 3,
101 HW90_BLOCK_MAXIMUM = 4,
102};
103
104enum baseband_config_type {
105 BASEBAND_CONFIG_PHY_REG = 0,
106 BASEBAND_CONFIG_AGC_TAB = 1,
107};
108
109enum ra_offset_area {
110 RA_OFFSET_LEGACY_OFDM1,
111 RA_OFFSET_LEGACY_OFDM2,
112 RA_OFFSET_HT_OFDM1,
113 RA_OFFSET_HT_OFDM2,
114 RA_OFFSET_HT_OFDM3,
115 RA_OFFSET_HT_OFDM4,
116 RA_OFFSET_HT_CCK,
117};
118
119enum antenna_path {
120 ANTENNA_NONE,
121 ANTENNA_D,
122 ANTENNA_C,
123 ANTENNA_CD,
124 ANTENNA_B,
125 ANTENNA_BD,
126 ANTENNA_BC,
127 ANTENNA_BCD,
128 ANTENNA_A,
129 ANTENNA_AD,
130 ANTENNA_AC,
131 ANTENNA_ACD,
132 ANTENNA_AB,
133 ANTENNA_ABD,
134 ANTENNA_ABC,
135 ANTENNA_ABCD
136};
137
138struct r_antenna_select_ofdm {
139 u32 r_tx_antenna:4;
140 u32 r_ant_l:4;
141 u32 r_ant_non_ht:4;
142 u32 r_ant_ht1:4;
143 u32 r_ant_ht2:4;
144 u32 r_ant_ht_s1:4;
145 u32 r_ant_non_ht_s1:4;
146 u32 ofdm_txsc:2;
147 u32 reserved:2;
148};
149
150struct r_antenna_select_cck {
151 u8 r_cckrx_enable_2:2;
152 u8 r_cckrx_enable:2;
153 u8 r_ccktx_enable:4;
154};
155
156struct efuse_contents {
157 u8 mac_addr[ETH_ALEN];
158 u8 cck_tx_power_idx[6];
159 u8 ht40_1s_tx_power_idx[6];
160 u8 ht40_2s_tx_power_idx_diff[3];
161 u8 ht20_tx_power_idx_diff[3];
162 u8 ofdm_tx_power_idx_diff[3];
163 u8 ht40_max_power_offset[3];
164 u8 ht20_max_power_offset[3];
165 u8 channel_plan;
166 u8 thermal_meter;
167 u8 rf_option[5];
168 u8 version;
169 u8 oem_id;
170 u8 regulatory;
171};
172
173struct tx_power_struct {
174 u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
175 u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
176 u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
177 u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
178 u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
179 u8 legacy_ht_txpowerdiff;
180 u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
181 u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
182 u8 pwrgroup_cnt;
183 u32 mcs_original_offset[4][16];
184};
185
186extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
187 u32 regaddr, u32 bitmask);
188extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
189 u32 regaddr, u32 bitmask, u32 data);
190extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
191 enum radio_path rfpath, u32 regaddr,
192 u32 bitmask);
193extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
194 enum radio_path rfpath, u32 regaddr,
195 u32 bitmask, u32 data);
196extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
197extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
198extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
199extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
200 enum radio_path rfpath);
201extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
202extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
203 long *powerlevel);
204extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
205 u8 channel);
206extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
207 long power_indbm);
208extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
209 u8 operation);
210extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
211extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
212 enum nl80211_channel_type ch_type);
213extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
214extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
215extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
216void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
217void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
218bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
219 enum radio_path rfpath);
220bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
221extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
222 enum rf_pwrstate rfpwr_state);
223
224#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c
new file mode 100644
index 000000000000..df6ca9a57f7f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.c
@@ -0,0 +1,109 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "pwrseqcmd.h"
31#include "pwrseq.h"
32
33/* drivers should parse arrays below and do the corresponding actions */
34
35/*3 Power on Array*/
36struct wlan_pwr_cfg rtl8723A_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STPS
37 + RTL8723A_TRANS_END_STPS] = {
38 RTL8723A_TRANS_CARDEMU_TO_ACT,
39 RTL8723A_TRANS_END
40};
41
42/*3Radio off GPIO Array */
43struct wlan_pwr_cfg rtl8723A_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
44 + RTL8723A_TRANS_END_STPS] = {
45 RTL8723A_TRANS_ACT_TO_CARDEMU,
46 RTL8723A_TRANS_END
47};
48
49/*3Card Disable Array*/
50struct wlan_pwr_cfg
51rtl8723A_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
52 + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
53 + RTL8723A_TRANS_END_STPS] = {
54 RTL8723A_TRANS_ACT_TO_CARDEMU,
55 RTL8723A_TRANS_CARDEMU_TO_CARDDIS,
56 RTL8723A_TRANS_END
57};
58
59/*3 Card Enable Array*/
60struct wlan_pwr_cfg rtl8723A_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
61 + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
62 + RTL8723A_TRANS_END_STPS] = {
63 RTL8723A_TRANS_CARDDIS_TO_CARDEMU,
64 RTL8723A_TRANS_CARDEMU_TO_ACT,
65 RTL8723A_TRANS_END
66};
67
68/*3Suspend Array*/
69struct wlan_pwr_cfg rtl8723A_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
70 + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS
71 + RTL8723A_TRANS_END_STPS] = {
72 RTL8723A_TRANS_ACT_TO_CARDEMU,
73 RTL8723A_TRANS_CARDEMU_TO_SUS,
74 RTL8723A_TRANS_END
75};
76
77/*3 Resume Array*/
78struct wlan_pwr_cfg rtl8723A_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
79 + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS
80 + RTL8723A_TRANS_END_STPS] = {
81 RTL8723A_TRANS_SUS_TO_CARDEMU,
82 RTL8723A_TRANS_CARDEMU_TO_ACT,
83 RTL8723A_TRANS_END
84};
85
86/*3HWPDN Array*/
87struct wlan_pwr_cfg rtl8723A_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
88 + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
89 + RTL8723A_TRANS_END_STPS] = {
90 RTL8723A_TRANS_ACT_TO_CARDEMU,
91 RTL8723A_TRANS_CARDEMU_TO_PDN,
92 RTL8723A_TRANS_END
93};
94
95/*3 Enter LPS */
96struct wlan_pwr_cfg rtl8723A_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STPS
97 + RTL8723A_TRANS_END_STPS] = {
98 /*FW behavior*/
99 RTL8723A_TRANS_ACT_TO_LPS,
100 RTL8723A_TRANS_END
101};
102
103/*3 Leave LPS */
104struct wlan_pwr_cfg rtl8723A_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STPS
105 + RTL8723A_TRANS_END_STPS] = {
106 /*FW behavior*/
107 RTL8723A_TRANS_LPS_TO_ACT,
108 RTL8723A_TRANS_END
109};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
new file mode 100644
index 000000000000..7a46f9fdf558
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseq.h
@@ -0,0 +1,322 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_PWRSEQ_H__
31#define __RTL8723E_PWRSEQ_H__
32
33#include "pwrseqcmd.h"
34/*
35 Check document WM-20110607-Paul-RTL8723A_Power_Architecture-R02.vsd
36 There are 6 HW Power States:
37 0: POFF--Power Off
38 1: PDN--Power Down
39 2: CARDEMU--Card Emulation
40 3: ACT--Active Mode
41 4: LPS--Low Power State
42 5: SUS--Suspend
43
44 The transision from different states are defined below
45 TRANS_CARDEMU_TO_ACT
46 TRANS_ACT_TO_CARDEMU
47 TRANS_CARDEMU_TO_SUS
48 TRANS_SUS_TO_CARDEMU
49 TRANS_CARDEMU_TO_PDN
50 TRANS_ACT_TO_LPS
51 TRANS_LPS_TO_ACT
52
53 TRANS_END
54*/
55
56#define RTL8723A_TRANS_CARDEMU_TO_ACT_STPS 10
57#define RTL8723A_TRANS_ACT_TO_CARDEMU_STPS 10
58#define RTL8723A_TRANS_CARDEMU_TO_SUS_STPS 10
59#define RTL8723A_TRANS_SUS_TO_CARDEMU_STPS 10
60#define RTL8723A_TRANS_CARDEMU_TO_PDN_STPS 10
61#define RTL8723A_TRANS_PDN_TO_CARDEMU_STPS 10
62#define RTL8723A_TRANS_ACT_TO_LPS_STPS 15
63#define RTL8723A_TRANS_LPS_TO_ACT_STPS 15
64#define RTL8723A_TRANS_END_STPS 1
65
66
67#define RTL8723A_TRANS_CARDEMU_TO_ACT \
68 /* format */ \
69 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \
70 * comments here*/ \
71 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
72 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), 0}, \
73 /* disable SW LPS 0x04[10]=0*/ \
74 {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
75 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
76 /* wait till 0x04[17] = 1 power ready*/ \
77 {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
78 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
79 /* release WLON reset 0x04[16]=1*/ \
80 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
81 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \
82 /* disable HWPDN 0x04[15]=0*/ \
83 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
84 PWR_BASEADDR_MAC, PWR_CMD_WRITE, (BIT(4)|BIT(3)), 0}, \
85 /* disable WL suspend*/ \
86 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
87 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
88 /* polling until return 0*/ \
89 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
90 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}
91
92#define RTL8723A_TRANS_ACT_TO_CARDEMU \
93 /* format */ \
94 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \
95 * comments here*/ \
96 {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
97 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, \
98 /*0x1F[7:0] = 0 turn off RF*/ \
99 {0x004E, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
100 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, \
101 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
102 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
103 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
104 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}
105
106#define RTL8723A_TRANS_CARDEMU_TO_SUS \
107 /* format */ \
108 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, \
109 * comments here*/ \
110 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
111 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3), \
112 (BIT(4)|BIT(3))}, \
113 /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
114 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK | \
115 PWR_INTF_SDIO_MSK, \
116 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)},\
117 /*0x04[12:11] = 2b'01 enable WL suspend*/ \
118 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
119 PWR_BASEADDR_MAC, \
120 PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4)}, \
121 /*0x04[12:11] = 2b'11 enable WL suspend for PCIe*/ \
122 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
123 PWR_BASEADDR_SDIO, \
124 PWR_CMD_WRITE, BIT(0), BIT(0)}, \
125 /*Set SDIO suspend local register*/ \
126 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
127 PWR_BASEADDR_SDIO, \
128 PWR_CMD_POLLING, BIT(1), 0} \
129 /*wait power state to suspend*/
130
131#define RTL8723A_TRANS_SUS_TO_CARDEMU \
132 /* format */ \
133 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
134 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
135 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
136 /*Set SDIO suspend local register*/ \
137 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
138 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
139 /*wait power state to suspend*/ \
140 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
141 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0} \
142 /*0x04[12:11] = 2b'01enable WL suspend*/
143
144#define RTL8723A_TRANS_CARDEMU_TO_CARDDIS \
145 /* format */ \
146 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
147 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, \
148 PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, \
149 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)},\
150 /*0x04[12:11] = 2b'01 enable WL suspend*/ \
151 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
152 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(2), BIT(2)}, \
153 /*0x04[10] = 1, enable SW LPS*/ \
154 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
155 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, \
156 /*Set SDIO suspend local register*/ \
157 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
158 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0} \
159 /*wait power state to suspend*/
160
161#define RTL8723A_TRANS_CARDDIS_TO_CARDEMU \
162 /* format */ \
163 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
164 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
165 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, \
166 /*Set SDIO suspend local register*/ \
167 {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
168 PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, \
169 /*wait power state to suspend*/ \
170 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
171 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, \
172 /*0x04[12:11] = 2b'00enable WL suspend*/ \
173 {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
174 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0} \
175 /*PCIe DMA start*/
176
177#define RTL8723A_TRANS_CARDEMU_TO_PDN \
178 /* format */ \
179 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
180 {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
181 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
182 /* 0x04[16] = 0*/\
183 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
184 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)} \
185 /* 0x04[15] = 1*/
186
187#define RTL8723A_TRANS_PDN_TO_CARDEMU \
188 /* format */ \
189 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
190 {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
191 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0} \
192 /* 0x04[15] = 0*/
193
194#define RTL8723A_TRANS_ACT_TO_LPS \
195 /* format */ \
196 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
197 {0x0301, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
198 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
199 /*PCIe DMA stop*/ \
200 {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
201 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F}, \
202 /*Tx Pause*/ \
203 {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
204 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
205 /*Should be zero if no packet is transmitting*/ \
206 {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
207 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
208 /*Should be zero if no packet is transmitting*/ \
209 {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
210 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
211 /*Should be zero if no packet is transmitting*/ \
212 {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
213 PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0}, \
214 /*Should be zero if no packet is transmitting*/ \
215 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
216 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0}, \
217 /*CCK and OFDM are disabled,and clock are gated*/ \
218 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
219 PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US}, \
220 /*Delay 1us*/ \
221 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
222 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
223 /*Whole BB is reset*/ \
224 {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
225 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F}, \
226 /*Reset MAC TRX*/ \
227 {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
228 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0}, \
229 /*check if removed later*/ \
230 {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
231 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)} \
232 /*Respond TxOK to scheduler*/
233
234#define RTL8723A_TRANS_LPS_TO_ACT \
235 /* format */ \
236 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
237 {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, \
238 PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, \
239 /*SDIO RPWM*/ \
240 {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, \
241 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \
242 /*USB RPWM*/ \
243 {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, \
244 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, \
245 /*PCIe RPWM*/ \
246 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
247 PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, \
248 /*Delay*/ \
249 {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
250 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, \
251 /* 0x08[4] = 0 switch TSF to 40M*/ \
252 {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
253 PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, \
254 /*Polling 0x109[7]=0 TSF in 40M*/ \
255 {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
256 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, \
257 /*. 0x29[7:6] = 2b'00 enable BB clock*/ \
258 {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
259 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, \
260 /*. 0x101[1] = 1*/ \
261 {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
262 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, \
263 /* 0x100[7:0] = 0xFF enable WMAC TRX*/ \
264 {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
265 PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0), \
266 BIT(1)|BIT(0)}, \
267 /* 0x02[1:0] = 2b'11 enable BB macro*/ \
268 {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
269 PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0} \
270 /*. 0x522 = 0*/
271
272#define RTL8723A_TRANS_END \
273 /* format */ \
274 /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, */\
275 {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, \
276 0, PWR_CMD_END, 0, 0}
277
278extern struct
279wlan_pwr_cfg rtl8723A_power_on_flow[RTL8723A_TRANS_CARDEMU_TO_ACT_STPS
280 + RTL8723A_TRANS_END_STPS];
281extern struct
282wlan_pwr_cfg rtl8723A_radio_off_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
283 + RTL8723A_TRANS_END_STPS];
284extern struct
285wlan_pwr_cfg rtl8723A_card_disable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
286 + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
287 + RTL8723A_TRANS_END_STPS];
288extern struct
289wlan_pwr_cfg rtl8723A_card_enable_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
290 + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
291 + RTL8723A_TRANS_END_STPS];
292extern struct
293wlan_pwr_cfg rtl8723A_suspend_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
294 + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS
295 + RTL8723A_TRANS_END_STPS];
296extern struct
297wlan_pwr_cfg rtl8723A_resume_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
298 + RTL8723A_TRANS_CARDEMU_TO_SUS_STPS
299 + RTL8723A_TRANS_END_STPS];
300extern struct
301wlan_pwr_cfg rtl8723A_hwpdn_flow[RTL8723A_TRANS_ACT_TO_CARDEMU_STPS
302 + RTL8723A_TRANS_CARDEMU_TO_PDN_STPS
303 + RTL8723A_TRANS_END_STPS];
304extern struct
305wlan_pwr_cfg rtl8723A_enter_lps_flow[RTL8723A_TRANS_ACT_TO_LPS_STPS
306 + RTL8723A_TRANS_END_STPS];
307extern struct
308wlan_pwr_cfg rtl8723A_leave_lps_flow[RTL8723A_TRANS_LPS_TO_ACT_STPS
309 + RTL8723A_TRANS_END_STPS];
310
311/* RTL8723 Power Configuration CMDs for PCIe interface */
312#define Rtl8723_NIC_PWR_ON_FLOW rtl8723A_power_on_flow
313#define Rtl8723_NIC_RF_OFF_FLOW rtl8723A_radio_off_flow
314#define Rtl8723_NIC_DISABLE_FLOW rtl8723A_card_disable_flow
315#define Rtl8723_NIC_ENABLE_FLOW rtl8723A_card_enable_flow
316#define Rtl8723_NIC_SUSPEND_FLOW rtl8723A_suspend_flow
317#define Rtl8723_NIC_RESUME_FLOW rtl8723A_resume_flow
318#define Rtl8723_NIC_PDN_FLOW rtl8723A_hwpdn_flow
319#define Rtl8723_NIC_LPS_ENTER_FLOW rtl8723A_enter_lps_flow
320#define Rtl8723_NIC_LPS_LEAVE_FLOW rtl8723A_leave_lps_flow
321
322#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c
new file mode 100644
index 000000000000..2044b5936b7f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.c
@@ -0,0 +1,129 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "pwrseq.h"
31
32/* Description:
33 * This routine deals with the Power Configuration CMD
34 * parsing for RTL8723/RTL8188E Series IC.
35 * Assumption:
36 * We should follow specific format that was released from HW SD.
37 */
38bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
39 u8 faversion, u8 interface_type,
40 struct wlan_pwr_cfg pwrcfgcmd[])
41{
42 struct wlan_pwr_cfg cfg_cmd = {0};
43 bool polling_bit = false;
44 u32 ary_idx = 0;
45 u8 value = 0;
46 u32 offset = 0;
47 u32 polling_count = 0;
48 u32 max_polling_cnt = 5000;
49
50 do {
51 cfg_cmd = pwrcfgcmd[ary_idx];
52 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
53 "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x),"
54 "interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
55 GET_PWR_CFG_OFFSET(cfg_cmd),
56 GET_PWR_CFG_CUT_MASK(cfg_cmd),
57 GET_PWR_CFG_FAB_MASK(cfg_cmd),
58 GET_PWR_CFG_INTF_MASK(cfg_cmd),
59 GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
60 GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
61
62 if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
63 (GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
64 (GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
65 switch (GET_PWR_CFG_CMD(cfg_cmd)) {
66 case PWR_CMD_READ:
67 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
68 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
69 break;
70 case PWR_CMD_WRITE:
71 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
72 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_WRITE\n");
73 offset = GET_PWR_CFG_OFFSET(cfg_cmd);
74
75 /*Read the value from system register*/
76 value = rtl_read_byte(rtlpriv, offset);
77 value &= (~(GET_PWR_CFG_MASK(cfg_cmd)));
78 value |= (GET_PWR_CFG_VALUE(cfg_cmd) &
79 GET_PWR_CFG_MASK(cfg_cmd));
80
81 /*Write the value back to sytem register*/
82 rtl_write_byte(rtlpriv, offset, value);
83 break;
84 case PWR_CMD_POLLING:
85 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
86 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
87 polling_bit = false;
88 offset = GET_PWR_CFG_OFFSET(cfg_cmd);
89
90 do {
91 value = rtl_read_byte(rtlpriv, offset);
92
93 value &= GET_PWR_CFG_MASK(cfg_cmd);
94 if (value ==
95 (GET_PWR_CFG_VALUE(cfg_cmd)
96 & GET_PWR_CFG_MASK(cfg_cmd)))
97 polling_bit = true;
98 else
99 udelay(10);
100
101 if (polling_count++ > max_polling_cnt)
102 return false;
103 } while (!polling_bit);
104 break;
105 case PWR_CMD_DELAY:
106 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
107 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
108 if (GET_PWR_CFG_VALUE(cfg_cmd) ==
109 PWRSEQ_DELAY_US)
110 udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
111 else
112 mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
113 break;
114 case PWR_CMD_END:
115 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
116 "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
117 return true;
118 default:
119 RT_ASSERT(false,
120 "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
121 break;
122 }
123
124 }
125 ary_idx++;
126 } while (1);
127
128 return true;
129}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h
new file mode 100644
index 000000000000..6e0f3ea37ec0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/pwrseqcmd.h
@@ -0,0 +1,98 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_PWRSEQCMD_H__
31#define __RTL8723E_PWRSEQCMD_H__
32
33#include "../wifi.h"
34/*---------------------------------------------
35 * 3 The value of cmd: 4 bits
36 *---------------------------------------------
37 */
38#define PWR_CMD_READ 0x00
39#define PWR_CMD_WRITE 0x01
40#define PWR_CMD_POLLING 0x02
41#define PWR_CMD_DELAY 0x03
42#define PWR_CMD_END 0x04
43
44/* define the base address of each block */
45#define PWR_BASEADDR_MAC 0x00
46#define PWR_BASEADDR_USB 0x01
47#define PWR_BASEADDR_PCIE 0x02
48#define PWR_BASEADDR_SDIO 0x03
49
50#define PWR_INTF_SDIO_MSK BIT(0)
51#define PWR_INTF_USB_MSK BIT(1)
52#define PWR_INTF_PCI_MSK BIT(2)
53#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
54
55#define PWR_FAB_TSMC_MSK BIT(0)
56#define PWR_FAB_UMC_MSK BIT(1)
57#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
58
59#define PWR_CUT_TESTCHIP_MSK BIT(0)
60#define PWR_CUT_A_MSK BIT(1)
61#define PWR_CUT_B_MSK BIT(2)
62#define PWR_CUT_C_MSK BIT(3)
63#define PWR_CUT_D_MSK BIT(4)
64#define PWR_CUT_E_MSK BIT(5)
65#define PWR_CUT_F_MSK BIT(6)
66#define PWR_CUT_G_MSK BIT(7)
67#define PWR_CUT_ALL_MSK 0xFF
68
69enum pwrseq_delay_unit {
70 PWRSEQ_DELAY_US,
71 PWRSEQ_DELAY_MS,
72};
73
74struct wlan_pwr_cfg {
75 u16 offset;
76 u8 cut_msk;
77 u8 fab_msk:4;
78 u8 interface_msk:4;
79 u8 base:4;
80 u8 cmd:4;
81 u8 msk;
82 u8 value;
83};
84
85#define GET_PWR_CFG_OFFSET(__PWR_CMD) (__PWR_CMD.offset)
86#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) (__PWR_CMD.cut_msk)
87#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) (__PWR_CMD.fab_msk)
88#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) (__PWR_CMD.interface_msk)
89#define GET_PWR_CFG_BASE(__PWR_CMD) (__PWR_CMD.base)
90#define GET_PWR_CFG_CMD(__PWR_CMD) (__PWR_CMD.cmd)
91#define GET_PWR_CFG_MASK(__PWR_CMD) (__PWR_CMD.msk)
92#define GET_PWR_CFG_VALUE(__PWR_CMD) (__PWR_CMD.value)
93
94bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
95 u8 fab_version, u8 interface_type,
96 struct wlan_pwr_cfg pwrcfgcmd[]);
97
98#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
new file mode 100644
index 000000000000..199da366c6da
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/reg.h
@@ -0,0 +1,2097 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_REG_H__
31#define __RTL8723E_REG_H__
32
33#define REG_SYS_ISO_CTRL 0x0000
34#define REG_SYS_FUNC_EN 0x0002
35#define REG_APS_FSMCO 0x0004
36#define REG_SYS_CLKR 0x0008
37#define REG_9346CR 0x000A
38#define REG_EE_VPD 0x000C
39#define REG_AFE_MISC 0x0010
40#define REG_SPS0_CTRL 0x0011
41#define REG_SPS_OCP_CFG 0x0018
42#define REG_RSV_CTRL 0x001C
43#define REG_RF_CTRL 0x001F
44#define REG_LDOA15_CTRL 0x0020
45#define REG_LDOV12D_CTRL 0x0021
46#define REG_LDOHCI12_CTRL 0x0022
47#define REG_LPLDO_CTRL 0x0023
48#define REG_AFE_XTAL_CTRL 0x0024
49#define REG_AFE_PLL_CTRL 0x0028
50#define REG_EFUSE_CTRL 0x0030
51#define REG_EFUSE_TEST 0x0034
52#define REG_PWR_DATA 0x0038
53#define REG_CAL_TIMER 0x003C
54#define REG_ACLK_MON 0x003E
55#define REG_GPIO_MUXCFG 0x0040
56#define REG_GPIO_IO_SEL 0x0042
57#define REG_MAC_PINMUX_CFG 0x0043
58#define REG_GPIO_PIN_CTRL 0x0044
59#define REG_GPIO_INTM 0x0048
60#define REG_LEDCFG0 0x004C
61#define REG_LEDCFG1 0x004D
62#define REG_LEDCFG2 0x004E
63#define REG_LEDCFG3 0x004F
64#define REG_FSIMR 0x0050
65#define REG_FSISR 0x0054
66#define REG_GPIO_PIN_CTRL_2 0x0060
67#define REG_GPIO_IO_SEL_2 0x0062
68#define REG_MULTI_FUNC_CTRL 0x0068
69
70#define REG_MCUFWDL 0x0080
71
72#define REG_HMEBOX_EXT_0 0x0088
73#define REG_HMEBOX_EXT_1 0x008A
74#define REG_HMEBOX_EXT_2 0x008C
75#define REG_HMEBOX_EXT_3 0x008E
76
77#define REG_BIST_SCAN 0x00D0
78#define REG_BIST_RPT 0x00D4
79#define REG_BIST_ROM_RPT 0x00D8
80#define REG_USB_SIE_INTF 0x00E0
81#define REG_PCIE_MIO_INTF 0x00E4
82#define REG_PCIE_MIO_INTD 0x00E8
83#define REG_SYS_CFG 0x00F0
84#define REG_GPIO_OUTSTS 0x00F4
85
86#define REG_CR 0x0100
87#define REG_PBP 0x0104
88#define REG_TRXDMA_CTRL 0x010C
89#define REG_TRXFF_BNDY 0x0114
90#define REG_TRXFF_STATUS 0x0118
91#define REG_RXFF_PTR 0x011C
92#define REG_HIMR 0x0120
93#define REG_HISR 0x0124
94#define REG_HIMRE 0x0128
95#define REG_HISRE 0x012C
96#define REG_CPWM 0x012F
97#define REG_FWIMR 0x0130
98#define REG_FWISR 0x0134
99#define REG_PKTBUF_DBG_CTRL 0x0140
100#define REG_PKTBUF_DBG_DATA_L 0x0144
101#define REG_PKTBUF_DBG_DATA_H 0x0148
102
103#define REG_TC0_CTRL 0x0150
104#define REG_TC1_CTRL 0x0154
105#define REG_TC2_CTRL 0x0158
106#define REG_TC3_CTRL 0x015C
107#define REG_TC4_CTRL 0x0160
108#define REG_TCUNIT_BASE 0x0164
109#define REG_MBIST_START 0x0174
110#define REG_MBIST_DONE 0x0178
111#define REG_MBIST_FAIL 0x017C
112#define REG_C2HEVT_MSG_NORMAL 0x01A0
113#define REG_C2HEVT_MSG_TEST 0x01B8
114#define REG_MCUTST_1 0x01c0
115#define REG_FMETHR 0x01C8
116#define REG_HMETFR 0x01CC
117#define REG_HMEBOX_0 0x01D0
118#define REG_HMEBOX_1 0x01D4
119#define REG_HMEBOX_2 0x01D8
120#define REG_HMEBOX_3 0x01DC
121
122#define REG_LLT_INIT 0x01E0
123#define REG_BB_ACCEESS_CTRL 0x01E8
124#define REG_BB_ACCESS_DATA 0x01EC
125
126#define REG_RQPN 0x0200
127#define REG_FIFOPAGE 0x0204
128#define REG_TDECTRL 0x0208
129#define REG_TXDMA_OFFSET_CHK 0x020C
130#define REG_TXDMA_STATUS 0x0210
131#define REG_RQPN_NPQ 0x0214
132
133#define REG_RXDMA_AGG_PG_TH 0x0280
134#define REG_RXPKT_NUM 0x0284
135#define REG_RXDMA_STATUS 0x0288
136
137#define REG_PCIE_CTRL_REG 0x0300
138#define REG_INT_MIG 0x0304
139#define REG_BCNQ_DESA 0x0308
140#define REG_HQ_DESA 0x0310
141#define REG_MGQ_DESA 0x0318
142#define REG_VOQ_DESA 0x0320
143#define REG_VIQ_DESA 0x0328
144#define REG_BEQ_DESA 0x0330
145#define REG_BKQ_DESA 0x0338
146#define REG_RX_DESA 0x0340
147#define REG_DBI 0x0348
148#define REG_MDIO 0x0354
149#define REG_DBG_SEL 0x0360
150#define REG_PCIE_HRPWM 0x0361
151#define REG_PCIE_HCPWM 0x0363
152#define REG_UART_CTRL 0x0364
153#define REG_UART_TX_DESA 0x0370
154#define REG_UART_RX_DESA 0x0378
155
156#define REG_HDAQ_DESA_NODEF 0x0000
157#define REG_CMDQ_DESA_NODEF 0x0000
158
159#define REG_VOQ_INFORMATION 0x0400
160#define REG_VIQ_INFORMATION 0x0404
161#define REG_BEQ_INFORMATION 0x0408
162#define REG_BKQ_INFORMATION 0x040C
163#define REG_MGQ_INFORMATION 0x0410
164#define REG_HGQ_INFORMATION 0x0414
165#define REG_BCNQ_INFORMATION 0x0418
166
167#define REG_CPU_MGQ_INFORMATION 0x041C
168#define REG_FWHW_TXQ_CTRL 0x0420
169#define REG_HWSEQ_CTRL 0x0423
170#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
171#define REG_TXPKTBUF_MGQ_BDNY 0x0425
172#define REG_MULTI_BCNQ_EN 0x0426
173#define REG_MULTI_BCNQ_OFFSET 0x0427
174#define REG_SPEC_SIFS 0x0428
175#define REG_RL 0x042A
176#define REG_DARFRC 0x0430
177#define REG_RARFRC 0x0438
178#define REG_RRSR 0x0440
179#define REG_ARFR0 0x0444
180#define REG_ARFR1 0x0448
181#define REG_ARFR2 0x044C
182#define REG_ARFR3 0x0450
183#define REG_AGGLEN_LMT 0x0458
184#define REG_AMPDU_MIN_SPACE 0x045C
185#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
186#define REG_FAST_EDCA_CTRL 0x0460
187#define REG_RD_RESP_PKT_TH 0x0463
188#define REG_INIRTS_RATE_SEL 0x0480
189#define REG_INIDATA_RATE_SEL 0x0484
190#define REG_POWER_STATUS 0x04A4
191#define REG_POWER_STAGE1 0x04B4
192#define REG_POWER_STAGE2 0x04B8
193#define REG_PKT_LIFE_TIME 0x04C0
194#define REG_STBC_SETTING 0x04C4
195#define REG_PROT_MODE_CTRL 0x04C8
196#define REG_BAR_MODE_CTRL 0x04CC
197#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
198#define REG_NQOS_SEQ 0x04DC
199#define REG_QOS_SEQ 0x04DE
200#define REG_NEED_CPU_HANDLE 0x04E0
201#define REG_PKT_LOSE_RPT 0x04E1
202#define REG_PTCL_ERR_STATUS 0x04E2
203#define REG_DUMMY 0x04FC
204
205#define REG_EDCA_VO_PARAM 0x0500
206#define REG_EDCA_VI_PARAM 0x0504
207#define REG_EDCA_BE_PARAM 0x0508
208#define REG_EDCA_BK_PARAM 0x050C
209#define REG_BCNTCFG 0x0510
210#define REG_PIFS 0x0512
211#define REG_RDG_PIFS 0x0513
212#define REG_SIFS_CTX 0x0514
213#define REG_SIFS_TRX 0x0516
214#define REG_AGGR_BREAK_TIME 0x051A
215#define REG_SLOT 0x051B
216#define REG_TX_PTCL_CTRL 0x0520
217#define REG_TXPAUSE 0x0522
218#define REG_DIS_TXREQ_CLR 0x0523
219#define REG_RD_CTRL 0x0524
220#define REG_TBTT_PROHIBIT 0x0540
221#define REG_RD_NAV_NXT 0x0544
222#define REG_NAV_PROT_LEN 0x0546
223#define REG_BCN_CTRL 0x0550
224#define REG_USTIME_TSF 0x0551
225#define REG_MBID_NUM 0x0552
226#define REG_DUAL_TSF_RST 0x0553
227#define REG_BCN_INTERVAL 0x0554
228#define REG_MBSSID_BCN_SPACE 0x0554
229#define REG_DRVERLYINT 0x0558
230#define REG_BCNDMATIM 0x0559
231#define REG_ATIMWND 0x055A
232#define REG_BCN_MAX_ERR 0x055D
233#define REG_RXTSF_OFFSET_CCK 0x055E
234#define REG_RXTSF_OFFSET_OFDM 0x055F
235#define REG_TSFTR 0x0560
236#define REG_INIT_TSFTR 0x0564
237#define REG_PSTIMER 0x0580
238#define REG_TIMER0 0x0584
239#define REG_TIMER1 0x0588
240#define REG_ACMHWCTRL 0x05C0
241#define REG_ACMRSTCTRL 0x05C1
242#define REG_ACMAVG 0x05C2
243#define REG_VO_ADMTIME 0x05C4
244#define REG_VI_ADMTIME 0x05C6
245#define REG_BE_ADMTIME 0x05C8
246#define REG_EDCA_RANDOM_GEN 0x05CC
247#define REG_SCH_TXCMD 0x05D0
248
249#define REG_APSD_CTRL 0x0600
250#define REG_BWOPMODE 0x0603
251#define REG_TCR 0x0604
252#define REG_RCR 0x0608
253#define REG_RX_PKT_LIMIT 0x060C
254#define REG_RX_DLK_TIME 0x060D
255#define REG_RX_DRVINFO_SZ 0x060F
256
257#define REG_MACID 0x0610
258#define REG_BSSID 0x0618
259#define REG_MAR 0x0620
260#define REG_MBIDCAMCFG 0x0628
261
262#define REG_USTIME_EDCA 0x0638
263#define REG_MAC_SPEC_SIFS 0x063A
264#define REG_RESP_SIFS_CCK 0x063C
265#define REG_RESP_SIFS_OFDM 0x063E
266#define REG_ACKTO 0x0640
267#define REG_CTS2TO 0x0641
268#define REG_EIFS 0x0642
269
270#define REG_NAV_CTRL 0x0650
271#define REG_BACAMCMD 0x0654
272#define REG_BACAMCONTENT 0x0658
273#define REG_LBDLY 0x0660
274#define REG_FWDLY 0x0661
275#define REG_RXERR_RPT 0x0664
276#define REG_WMAC_TRXPTCL_CTL 0x0668
277
278#define REG_CAMCMD 0x0670
279#define REG_CAMWRITE 0x0674
280#define REG_CAMREAD 0x0678
281#define REG_CAMDBG 0x067C
282#define REG_SECCFG 0x0680
283
284#define REG_WOW_CTRL 0x0690
285#define REG_PSSTATUS 0x0691
286#define REG_PS_RX_INFO 0x0692
287#define REG_LPNAV_CTRL 0x0694
288#define REG_WKFMCAM_CMD 0x0698
289#define REG_WKFMCAM_RWD 0x069C
290#define REG_RXFLTMAP0 0x06A0
291#define REG_RXFLTMAP1 0x06A2
292#define REG_RXFLTMAP2 0x06A4
293#define REG_BCN_PSR_RPT 0x06A8
294#define REG_CALB32K_CTRL 0x06AC
295#define REG_PKT_MON_CTRL 0x06B4
296#define REG_BT_COEX_TABLE 0x06C0
297#define REG_WMAC_RESP_TXINFO 0x06D8
298
299#define REG_USB_INFO 0xFE17
300#define REG_USB_SPECIAL_OPTION 0xFE55
301#define REG_USB_DMA_AGG_TO 0xFE5B
302#define REG_USB_AGG_TO 0xFE5C
303#define REG_USB_AGG_TH 0xFE5D
304
305#define REG_TEST_USB_TXQS 0xFE48
306#define REG_TEST_SIE_VID 0xFE60
307#define REG_TEST_SIE_PID 0xFE62
308#define REG_TEST_SIE_OPTIONAL 0xFE64
309#define REG_TEST_SIE_CHIRP_K 0xFE65
310#define REG_TEST_SIE_PHY 0xFE66
311#define REG_TEST_SIE_MAC_ADDR 0xFE70
312#define REG_TEST_SIE_STRING 0xFE80
313
314#define REG_NORMAL_SIE_VID 0xFE60
315#define REG_NORMAL_SIE_PID 0xFE62
316#define REG_NORMAL_SIE_OPTIONAL 0xFE64
317#define REG_NORMAL_SIE_EP 0xFE65
318#define REG_NORMAL_SIE_PHY 0xFE68
319#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
320#define REG_NORMAL_SIE_STRING 0xFE80
321
322#define CR9346 REG_9346CR
323#define MSR (REG_CR + 2)
324#define ISR REG_HISR
325#define TSFR REG_TSFTR
326
327#define MACIDR0 REG_MACID
328#define MACIDR4 (REG_MACID + 4)
329
330#define PBP REG_PBP
331
332#define IDR0 MACIDR0
333#define IDR4 MACIDR4
334
335#define UNUSED_REGISTER 0x1BF
336#define DCAM UNUSED_REGISTER
337#define PSR UNUSED_REGISTER
338#define BBADDR UNUSED_REGISTER
339#define PHYDATAR UNUSED_REGISTER
340
341#define INVALID_BBRF_VALUE 0x12345678
342
343#define MAX_MSS_DENSITY_2T 0x13
344#define MAX_MSS_DENSITY_1T 0x0A
345
346#define CMDEEPROM_EN BIT(5)
347#define CMDEEPROM_SEL BIT(4)
348#define CMD9346CR_9356SEL BIT(4)
349#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL)
350#define AUTOLOAD_EFUSE CMDEEPROM_EN
351
352#define GPIOSEL_GPIO 0
353#define GPIOSEL_ENBT BIT(5)
354
355#define GPIO_IN REG_GPIO_PIN_CTRL
356#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
357#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
358#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
359
360#define MSR_NOLINK 0x00
361#define MSR_ADHOC 0x01
362#define MSR_INFRA 0x02
363#define MSR_AP 0x03
364
365#define RRSR_RSC_OFFSET 21
366#define RRSR_SHORT_OFFSET 23
367#define RRSR_RSC_BW_40M 0x600000
368#define RRSR_RSC_UPSUBCHNL 0x400000
369#define RRSR_RSC_LOWSUBCHNL 0x200000
370#define RRSR_SHORT 0x800000
371#define RRSR_1M BIT(0)
372#define RRSR_2M BIT(1)
373#define RRSR_5_5M BIT(2)
374#define RRSR_11M BIT(3)
375#define RRSR_6M BIT(4)
376#define RRSR_9M BIT(5)
377#define RRSR_12M BIT(6)
378#define RRSR_18M BIT(7)
379#define RRSR_24M BIT(8)
380#define RRSR_36M BIT(9)
381#define RRSR_48M BIT(10)
382#define RRSR_54M BIT(11)
383#define RRSR_MCS0 BIT(12)
384#define RRSR_MCS1 BIT(13)
385#define RRSR_MCS2 BIT(14)
386#define RRSR_MCS3 BIT(15)
387#define RRSR_MCS4 BIT(16)
388#define RRSR_MCS5 BIT(17)
389#define RRSR_MCS6 BIT(18)
390#define RRSR_MCS7 BIT(19)
391#define BRSR_ACKSHORTPMB BIT(23)
392
393#define RATR_1M 0x00000001
394#define RATR_2M 0x00000002
395#define RATR_55M 0x00000004
396#define RATR_11M 0x00000008
397#define RATR_6M 0x00000010
398#define RATR_9M 0x00000020
399#define RATR_12M 0x00000040
400#define RATR_18M 0x00000080
401#define RATR_24M 0x00000100
402#define RATR_36M 0x00000200
403#define RATR_48M 0x00000400
404#define RATR_54M 0x00000800
405#define RATR_MCS0 0x00001000
406#define RATR_MCS1 0x00002000
407#define RATR_MCS2 0x00004000
408#define RATR_MCS3 0x00008000
409#define RATR_MCS4 0x00010000
410#define RATR_MCS5 0x00020000
411#define RATR_MCS6 0x00040000
412#define RATR_MCS7 0x00080000
413#define RATR_MCS8 0x00100000
414#define RATR_MCS9 0x00200000
415#define RATR_MCS10 0x00400000
416#define RATR_MCS11 0x00800000
417#define RATR_MCS12 0x01000000
418#define RATR_MCS13 0x02000000
419#define RATR_MCS14 0x04000000
420#define RATR_MCS15 0x08000000
421
422#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
423#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M |\
424 RATR_24M | RATR_36M | RATR_48M | RATR_54M)
425#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 |\
426 RATR_MCS3 | RATR_MCS4 | RATR_MCS5 |\
427 RATR_MCS6 | RATR_MCS7)
428#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 |\
429 RATR_MCS11 | RATR_MCS12 | RATR_MCS13 |\
430 RATR_MCS14 | RATR_MCS15)
431
432#define BW_OPMODE_20MHZ BIT(2)
433#define BW_OPMODE_5G BIT(1)
434#define BW_OPMODE_11J BIT(0)
435
436#define CAM_VALID BIT(15)
437#define CAM_NOTVALID 0x0000
438#define CAM_USEDK BIT(5)
439
440#define CAM_NONE 0x0
441#define CAM_WEP40 0x01
442#define CAM_TKIP 0x02
443#define CAM_AES 0x04
444#define CAM_WEP104 0x05
445
446#define TOTAL_CAM_ENTRY 32
447#define HALF_CAM_ENTRY 16
448
449#define CAM_WRITE BIT(16)
450#define CAM_READ 0x00000000
451#define CAM_POLLINIG BIT(31)
452
453#define SCR_USEDK 0x01
454#define SCR_TXSEC_ENABLE 0x02
455#define SCR_RXSEC_ENABLE 0x04
456
457#define WOW_PMEN BIT(0)
458#define WOW_WOMEN BIT(1)
459#define WOW_MAGIC BIT(2)
460#define WOW_UWF BIT(3)
461
462#define IMR8190_DISABLED 0x0
463#define IMR_BCNDMAINT6 BIT(31)
464#define IMR_BCNDMAINT5 BIT(30)
465#define IMR_BCNDMAINT4 BIT(29)
466#define IMR_BCNDMAINT3 BIT(28)
467#define IMR_BCNDMAINT2 BIT(27)
468#define IMR_BCNDMAINT1 BIT(26)
469#define IMR_BCNDOK8 BIT(25)
470#define IMR_BCNDOK7 BIT(24)
471#define IMR_BCNDOK6 BIT(23)
472#define IMR_BCNDOK5 BIT(22)
473#define IMR_BCNDOK4 BIT(21)
474#define IMR_BCNDOK3 BIT(20)
475#define IMR_BCNDOK2 BIT(19)
476#define IMR_BCNDOK1 BIT(18)
477#define IMR_TIMEOUT2 BIT(17)
478#define IMR_TIMEOUT1 BIT(16)
479#define IMR_TXFOVW BIT(15)
480#define IMR_PSTIMEOUT BIT(14)
481#define IMR_BCNINT BIT(13)
482#define IMR_RXFOVW BIT(12)
483#define IMR_RDU BIT(11)
484#define IMR_ATIMEND BIT(10)
485#define IMR_BDOK BIT(9)
486#define IMR_HIGHDOK BIT(8)
487#define IMR_TBDOK BIT(7)
488#define IMR_MGNTDOK BIT(6)
489#define IMR_TBDER BIT(5)
490#define IMR_BKDOK BIT(4)
491#define IMR_BEDOK BIT(3)
492#define IMR_VIDOK BIT(2)
493#define IMR_VODOK BIT(1)
494#define IMR_ROK BIT(0)
495
496#define IMR_TXERR BIT(11)
497#define IMR_RXERR BIT(10)
498#define IMR_CPWM BIT(8)
499#define IMR_OCPINT BIT(1)
500#define IMR_WLANOFF BIT(0)
501
502/* 8723E series PCIE Host IMR/ISR bit */
503/* IMR DW0 Bit 0-31 */
504#define PHIMR_TIMEOUT2 BIT(31)
505#define PHIMR_TIMEOUT1 BIT(30)
506#define PHIMR_PSTIMEOUT BIT(29)
507#define PHIMR_GTINT4 BIT(28)
508#define PHIMR_GTINT3 BIT(27)
509#define PHIMR_TXBCNERR BIT(26)
510#define PHIMR_TXBCNOK BIT(25)
511#define PHIMR_TSF_BIT32_TOGGLE BIT(24)
512#define PHIMR_BCNDMAINT3 BIT(23)
513#define PHIMR_BCNDMAINT2 BIT(22)
514#define PHIMR_BCNDMAINT1 BIT(21)
515#define PHIMR_BCNDMAINT0 BIT(20)
516#define PHIMR_BCNDOK3 BIT(19)
517#define PHIMR_BCNDOK2 BIT(18)
518#define PHIMR_BCNDOK1 BIT(17)
519#define PHIMR_BCNDOK0 BIT(16)
520#define PHIMR_HSISR_IND_ON BIT(15)
521#define PHIMR_BCNDMAINT_E BIT(14)
522#define PHIMR_ATIMEND_E BIT(13)
523#define PHIMR_ATIM_CTW_END BIT(12)
524#define PHIMR_HISRE_IND BIT(11)
525#define PHIMR_C2HCMD BIT(10)
526#define PHIMR_CPWM2 BIT(9)
527#define PHIMR_CPWM BIT(8)
528#define PHIMR_HIGHDOK BIT(7)
529#define PHIMR_MGNTDOK BIT(6)
530#define PHIMR_BKDOK BIT(5)
531#define PHIMR_BEDOK BIT(4)
532#define PHIMR_VIDOK BIT(3)
533#define PHIMR_VODOK BIT(2)
534#define PHIMR_RDU BIT(1)
535#define PHIMR_ROK BIT(0)
536
537/* PCIE Host Interrupt Status Extension bit */
538#define PHIMR_BCNDMAINT7 BIT(23)
539#define PHIMR_BCNDMAINT6 BIT(22)
540#define PHIMR_BCNDMAINT5 BIT(21)
541#define PHIMR_BCNDMAINT4 BIT(20)
542#define PHIMR_BCNDOK7 BIT(19)
543#define PHIMR_BCNDOK6 BIT(18)
544#define PHIMR_BCNDOK5 BIT(17)
545#define PHIMR_BCNDOK4 BIT(16)
546/* bit12-15: RSVD */
547#define PHIMR_TXERR BIT(11)
548#define PHIMR_RXERR BIT(10)
549#define PHIMR_TXFOVW BIT(9)
550#define PHIMR_RXFOVW BIT(8)
551/* bit2-7: RSV */
552#define PHIMR_OCPINT BIT(1)
553
554#define HWSET_MAX_SIZE 256
555#define EFUSE_MAX_SECTION 32
556#define EFUSE_REAL_CONTENT_LEN 512
557#define EFUSE_OOB_PROTECT_BYTES 15
558
559#define EEPROM_DEFAULT_TSSI 0x0
560#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
561#define EEPROM_DEFAULT_CRYSTALCAP 0x5
562#define EEPROM_DEFAULT_BOARDTYPE 0x02
563#define EEPROM_DEFAULT_TXPOWER 0x1010
564#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
565
566#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
567#define EEPROM_DEFAULT_THERMALMETER 0x12
568#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
569#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
570#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
571#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
572#define EEPROM_DEFAULT_HT20_DIFF 2
573#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
574#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
575#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
576
577
578#define EEPROM_DEFAULT_PID 0x1234
579#define EEPROM_DEFAULT_VID 0x5678
580#define EEPROM_DEFAULT_CUSTOMERID 0xAB
581#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
582#define EEPROM_DEFAULT_VERSION 0
583
584#define EEPROM_CHANNEL_PLAN_FCC 0x0
585#define EEPROM_CHANNEL_PLAN_IC 0x1
586#define EEPROM_CHANNEL_PLAN_ETSI 0x2
587#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
588#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
589#define EEPROM_CHANNEL_PLAN_MKK 0x5
590#define EEPROM_CHANNEL_PLAN_MKK1 0x6
591#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
592#define EEPROM_CHANNEL_PLAN_TELEC 0x8
593#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
594#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
595#define EEPROM_CHANNEL_PLAN_NCC 0xB
596#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
597
598#define EEPROM_CID_DEFAULT 0x0
599#define EEPROM_CID_TOSHIBA 0x4
600#define EEPROM_CID_CCX 0x10
601#define EEPROM_CID_QMI 0x0D
602#define EEPROM_CID_WHQL 0xFE
603
604#define RTL8192_EEPROM_ID 0x8129
605
606#define RTL8190_EEPROM_ID 0x8129
607#define EEPROM_HPON 0x02
608#define EEPROM_CLK 0x06
609#define EEPROM_TESTR 0x08
610
611#define EEPROM_VID 0x49
612#define EEPROM_DID 0x4B
613#define EEPROM_SVID 0x4D
614#define EEPROM_SMID 0x4F
615
616#define EEPROM_MAC_ADDR 0x67
617
618#define EEPROM_CCK_TX_PWR_INX 0x5A
619#define EEPROM_HT40_1S_TX_PWR_INX 0x60
620#define EEPROM_HT40_2S_TX_PWR_INX_DIFF 0x66
621#define EEPROM_HT20_TX_PWR_INX_DIFF 0x69
622#define EEPROM_OFDM_TX_PWR_INX_DIFF 0x6C
623#define EEPROM_HT40_MAX_PWR_OFFSET 0x25
624#define EEPROM_HT20_MAX_PWR_OFFSET 0x22
625
626#define EEPROM_THERMAL_METER 0x2a
627#define EEPROM_XTAL_K 0x78
628#define EEPROM_RF_OPT1 0x79
629#define EEPROM_RF_OPT2 0x7A
630#define EEPROM_RF_OPT3 0x7B
631#define EEPROM_RF_OPT4 0x7C
632#define EEPROM_CHANNEL_PLAN 0x28
633#define EEPROM_VERSION 0x30
634#define EEPROM_CUSTOMER_ID 0x31
635
636#define EEPROM_PWRDIFF 0x54
637
638#define EEPROM_TXPOWERCCK 0x10
639#define EEPROM_TXPOWERHT40_1S 0x16
640#define EEPROM_TXPOWERHT40_2SDIFF 0x66
641#define EEPROM_TXPOWERHT20DIFF 0x1C
642#define EEPROM_TXPOWER_OFDMDIFF 0x1F
643
644#define EEPROM_TXPWR_GROUP 0x22
645
646#define EEPROM_TSSI_A 0x29
647#define EEPROM_TSSI_B 0x77
648
649#define EEPROM_CHANNELPLAN 0x28
650
651#define RF_OPTION1 0x2B
652#define RF_OPTION2 0x2C
653#define RF_OPTION3 0x2D
654#define RF_OPTION4 0x2E
655
656#define STOPBECON BIT(6)
657#define STOPHIGHT BIT(5)
658#define STOPMGT BIT(4)
659#define STOPVO BIT(3)
660#define STOPVI BIT(2)
661#define STOPBE BIT(1)
662#define STOPBK BIT(0)
663
664#define RCR_APPFCS BIT(31)
665#define RCR_APP_MIC BIT(30)
666#define RCR_APP_ICV BIT(29)
667#define RCR_APP_PHYST_RXFF BIT(28)
668#define RCR_APP_BA_SSN BIT(27)
669#define RCR_ENMBID BIT(24)
670#define RCR_LSIGEN BIT(23)
671#define RCR_MFBEN BIT(22)
672#define RCR_HTC_LOC_CTRL BIT(14)
673#define RCR_AMF BIT(13)
674#define RCR_ACF BIT(12)
675#define RCR_ADF BIT(11)
676#define RCR_AICV BIT(9)
677#define RCR_ACRC32 BIT(8)
678#define RCR_CBSSID_BCN BIT(7)
679#define RCR_CBSSID_DATA BIT(6)
680#define RCR_CBSSID RCR_CBSSID_DATA
681#define RCR_APWRMGT BIT(5)
682#define RCR_ADD3 BIT(4)
683#define RCR_AB BIT(3)
684#define RCR_AM BIT(2)
685#define RCR_APM BIT(1)
686#define RCR_AAP BIT(0)
687#define RCR_MXDMA_OFFSET 8
688#define RCR_FIFO_OFFSET 13
689
690#define RSV_CTRL 0x001C
691#define RD_CTRL 0x0524
692
693#define REG_USB_INFO 0xFE17
694#define REG_USB_SPECIAL_OPTION 0xFE55
695#define REG_USB_DMA_AGG_TO 0xFE5B
696#define REG_USB_AGG_TO 0xFE5C
697#define REG_USB_AGG_TH 0xFE5D
698
699#define REG_USB_VID 0xFE60
700#define REG_USB_PID 0xFE62
701#define REG_USB_OPTIONAL 0xFE64
702#define REG_USB_CHIRP_K 0xFE65
703#define REG_USB_PHY 0xFE66
704#define REG_USB_MAC_ADDR 0xFE70
705#define REG_USB_HRPWM 0xFE58
706#define REG_USB_HCPWM 0xFE57
707
708#define SW18_FPWM BIT(3)
709
710#define ISO_MD2PP BIT(0)
711#define ISO_UA2USB BIT(1)
712#define ISO_UD2CORE BIT(2)
713#define ISO_PA2PCIE BIT(3)
714#define ISO_PD2CORE BIT(4)
715#define ISO_IP2MAC BIT(5)
716#define ISO_DIOP BIT(6)
717#define ISO_DIOE BIT(7)
718#define ISO_EB2CORE BIT(8)
719#define ISO_DIOR BIT(9)
720
721#define PWC_EV25V BIT(14)
722#define PWC_EV12V BIT(15)
723
724#define FEN_BBRSTB BIT(0)
725#define FEN_BB_GLB_RSTn BIT(1)
726#define FEN_USBA BIT(2)
727#define FEN_UPLL BIT(3)
728#define FEN_USBD BIT(4)
729#define FEN_DIO_PCIE BIT(5)
730#define FEN_PCIEA BIT(6)
731#define FEN_PPLL BIT(7)
732#define FEN_PCIED BIT(8)
733#define FEN_DIOE BIT(9)
734#define FEN_CPUEN BIT(10)
735#define FEN_DCORE BIT(11)
736#define FEN_ELDR BIT(12)
737#define FEN_DIO_RF BIT(13)
738#define FEN_HWPDN BIT(14)
739#define FEN_MREGEN BIT(15)
740
741#define PFM_LDALL BIT(0)
742#define PFM_ALDN BIT(1)
743#define PFM_LDKP BIT(2)
744#define PFM_WOWL BIT(3)
745#define EnPDN BIT(4)
746#define PDN_PL BIT(5)
747#define APFM_ONMAC BIT(8)
748#define APFM_OFF BIT(9)
749#define APFM_RSM BIT(10)
750#define AFSM_HSUS BIT(11)
751#define AFSM_PCIE BIT(12)
752#define APDM_MAC BIT(13)
753#define APDM_HOST BIT(14)
754#define APDM_HPDN BIT(15)
755#define RDY_MACON BIT(16)
756#define SUS_HOST BIT(17)
757#define ROP_ALD BIT(20)
758#define ROP_PWR BIT(21)
759#define ROP_SPS BIT(22)
760#define SOP_MRST BIT(25)
761#define SOP_FUSE BIT(26)
762#define SOP_ABG BIT(27)
763#define SOP_AMB BIT(28)
764#define SOP_RCK BIT(29)
765#define SOP_A8M BIT(30)
766#define XOP_BTCK BIT(31)
767
768#define ANAD16V_EN BIT(0)
769#define ANA8M BIT(1)
770#define MACSLP BIT(4)
771#define LOADER_CLK_EN BIT(5)
772#define _80M_SSC_DIS BIT(7)
773#define _80M_SSC_EN_HO BIT(8)
774#define PHY_SSC_RSTB BIT(9)
775#define SEC_CLK_EN BIT(10)
776#define MAC_CLK_EN BIT(11)
777#define SYS_CLK_EN BIT(12)
778#define RING_CLK_EN BIT(13)
779
780#define BOOT_FROM_EEPROM BIT(4)
781#define EEPROM_EN BIT(5)
782
783#define AFE_BGEN BIT(0)
784#define AFE_MBEN BIT(1)
785#define MAC_ID_EN BIT(7)
786
787#define WLOCK_ALL BIT(0)
788#define WLOCK_00 BIT(1)
789#define WLOCK_04 BIT(2)
790#define WLOCK_08 BIT(3)
791#define WLOCK_40 BIT(4)
792#define R_DIS_PRST_0 BIT(5)
793#define R_DIS_PRST_1 BIT(6)
794#define LOCK_ALL_EN BIT(7)
795
796#define RF_EN BIT(0)
797#define RF_RSTB BIT(1)
798#define RF_SDMRSTB BIT(2)
799
800#define LDA15_EN BIT(0)
801#define LDA15_STBY BIT(1)
802#define LDA15_OBUF BIT(2)
803#define LDA15_REG_VOS BIT(3)
804#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
805
806#define LDV12_EN BIT(0)
807#define LDV12_SDBY BIT(1)
808#define LPLDO_HSM BIT(2)
809#define LPLDO_LSM_DIS BIT(3)
810#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
811
812#define XTAL_EN BIT(0)
813#define XTAL_BSEL BIT(1)
814#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
815#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
816#define XTAL_GATE_USB BIT(8)
817#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
818#define XTAL_GATE_AFE BIT(11)
819#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
820#define XTAL_RF_GATE BIT(14)
821#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
822#define XTAL_GATE_DIG BIT(17)
823#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
824#define XTAL_BT_GATE BIT(20)
825#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
826#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
827
828#define CKDLY_AFE BIT(26)
829#define CKDLY_USB BIT(27)
830#define CKDLY_DIG BIT(28)
831#define CKDLY_BT BIT(29)
832
833#define APLL_EN BIT(0)
834#define APLL_320_EN BIT(1)
835#define APLL_FREF_SEL BIT(2)
836#define APLL_EDGE_SEL BIT(3)
837#define APLL_WDOGB BIT(4)
838#define APLL_LPFEN BIT(5)
839
840#define APLL_REF_CLK_13MHZ 0x1
841#define APLL_REF_CLK_19_2MHZ 0x2
842#define APLL_REF_CLK_20MHZ 0x3
843#define APLL_REF_CLK_25MHZ 0x4
844#define APLL_REF_CLK_26MHZ 0x5
845#define APLL_REF_CLK_38_4MHZ 0x6
846#define APLL_REF_CLK_40MHZ 0x7
847
848#define APLL_320EN BIT(14)
849#define APLL_80EN BIT(15)
850#define APLL_1MEN BIT(24)
851
852#define ALD_EN BIT(18)
853#define EF_PD BIT(19)
854#define EF_FLAG BIT(31)
855
856#define EF_TRPT BIT(7)
857#define LDOE25_EN BIT(31)
858
859#define RSM_EN BIT(0)
860#define Timer_EN BIT(4)
861
862#define TRSW0EN BIT(2)
863#define TRSW1EN BIT(3)
864#define EROM_EN BIT(4)
865#define EnBT BIT(5)
866#define EnUart BIT(8)
867#define Uart_910 BIT(9)
868#define EnPMAC BIT(10)
869#define SIC_SWRST BIT(11)
870#define EnSIC BIT(12)
871#define SIC_23 BIT(13)
872#define EnHDP BIT(14)
873#define SIC_LBK BIT(15)
874
875#define LED0PL BIT(4)
876#define LED1PL BIT(12)
877#define LED0DIS BIT(7)
878
879#define MCUFWDL_EN BIT(0)
880#define MCUFWDL_RDY BIT(1)
881#define FWDL_ChkSum_rpt BIT(2)
882#define MACINI_RDY BIT(3)
883#define BBINI_RDY BIT(4)
884#define RFINI_RDY BIT(5)
885#define WINTINI_RDY BIT(6)
886#define CPRST BIT(23)
887
888#define XCLK_VLD BIT(0)
889#define ACLK_VLD BIT(1)
890#define UCLK_VLD BIT(2)
891#define PCLK_VLD BIT(3)
892#define PCIRSTB BIT(4)
893#define V15_VLD BIT(5)
894#define TRP_B15V_EN BIT(7)
895#define SIC_IDLE BIT(8)
896#define BD_MAC2 BIT(9)
897#define BD_MAC1 BIT(10)
898#define IC_MACPHY_MODE BIT(11)
899#define BT_FUNC BIT(16)
900#define VENDOR_ID BIT(19)
901#define PAD_HWPD_IDN BIT(22)
902#define TRP_VAUX_EN BIT(23)
903#define TRP_BT_EN BIT(24)
904#define BD_PKG_SEL BIT(25)
905#define BD_HCI_SEL BIT(26)
906#define TYPE_ID BIT(27)
907
908#define CHIP_VER_RTL_MASK 0xF000
909#define CHIP_VER_RTL_SHIFT 12
910
911#define REG_LBMODE (REG_CR + 3)
912
913#define HCI_TXDMA_EN BIT(0)
914#define HCI_RXDMA_EN BIT(1)
915#define TXDMA_EN BIT(2)
916#define RXDMA_EN BIT(3)
917#define PROTOCOL_EN BIT(4)
918#define SCHEDULE_EN BIT(5)
919#define MACTXEN BIT(6)
920#define MACRXEN BIT(7)
921#define ENSWBCN BIT(8)
922#define ENSEC BIT(9)
923
924#define _NETTYPE(x) (((x) & 0x3) << 16)
925#define MASK_NETTYPE 0x30000
926#define NT_NO_LINK 0x0
927#define NT_LINK_AD_HOC 0x1
928#define NT_LINK_AP 0x2
929#define NT_AS_AP 0x3
930
931#define _LBMODE(x) (((x) & 0xF) << 24)
932#define MASK_LBMODE 0xF000000
933#define LOOPBACK_NORMAL 0x0
934#define LOOPBACK_IMMEDIATELY 0xB
935#define LOOPBACK_MAC_DELAY 0x3
936#define LOOPBACK_PHY 0x1
937#define LOOPBACK_DMA 0x7
938
939#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
940#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
941#define _PSRX_MASK 0xF
942#define _PSTX_MASK 0xF0
943#define _PSRX(x) (x)
944#define _PSTX(x) ((x) << 4)
945
946#define PBP_64 0x0
947#define PBP_128 0x1
948#define PBP_256 0x2
949#define PBP_512 0x3
950#define PBP_1024 0x4
951
952#define RXDMA_ARBBW_EN BIT(0)
953#define RXSHFT_EN BIT(1)
954#define RXDMA_AGG_EN BIT(2)
955#define QS_VO_QUEUE BIT(8)
956#define QS_VI_QUEUE BIT(9)
957#define QS_BE_QUEUE BIT(10)
958#define QS_BK_QUEUE BIT(11)
959#define QS_MANAGER_QUEUE BIT(12)
960#define QS_HIGH_QUEUE BIT(13)
961
962#define HQSEL_VOQ BIT(0)
963#define HQSEL_VIQ BIT(1)
964#define HQSEL_BEQ BIT(2)
965#define HQSEL_BKQ BIT(3)
966#define HQSEL_MGTQ BIT(4)
967#define HQSEL_HIQ BIT(5)
968
969#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
970#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
971#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
972#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
973#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
974#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
975
976#define QUEUE_LOW 1
977#define QUEUE_NORMAL 2
978#define QUEUE_HIGH 3
979
980#define _LLT_NO_ACTIVE 0x0
981#define _LLT_WRITE_ACCESS 0x1
982#define _LLT_READ_ACCESS 0x2
983
984#define _LLT_INIT_DATA(x) ((x) & 0xFF)
985#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
986#define _LLT_OP(x) (((x) & 0x3) << 30)
987#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
988
989#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
990#define BB_WRITE_EN BIT(30)
991#define BB_READ_EN BIT(31)
992
993#define _HPQ(x) ((x) & 0xFF)
994#define _LPQ(x) (((x) & 0xFF) << 8)
995#define _PUBQ(x) (((x) & 0xFF) << 16)
996#define _NPQ(x) ((x) & 0xFF)
997
998#define HPQ_PUBLIC_DIS BIT(24)
999#define LPQ_PUBLIC_DIS BIT(25)
1000#define LD_RQPN BIT(31)
1001
1002#define BCN_VALID BIT(16)
1003#define BCN_HEAD(x) (((x) & 0xFF) << 8)
1004#define BCN_HEAD_MASK 0xFF00
1005
1006#define BLK_DESC_NUM_SHIFT 4
1007#define BLK_DESC_NUM_MASK 0xF
1008
1009#define DROP_DATA_EN BIT(9)
1010
1011#define EN_AMPDU_RTY_NEW BIT(7)
1012
1013#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
1014
1015#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
1016#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
1017
1018#define RATE_REG_BITMAP_ALL 0xFFFFF
1019
1020#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
1021
1022#define _RRSR_RSC(x) (((x) & 0x3) << 21)
1023#define RRSR_RSC_RESERVED 0x0
1024#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
1025#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
1026#define RRSR_RSC_DUPLICATE_MODE 0x3
1027
1028#define USE_SHORT_G1 BIT(20)
1029
1030#define _AGGLMT_MCS0(x) ((x) & 0xF)
1031#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
1032#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
1033#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
1034#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
1035#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
1036#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
1037#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
1038
1039#define RETRY_LIMIT_SHORT_SHIFT 8
1040#define RETRY_LIMIT_LONG_SHIFT 0
1041
1042#define _DARF_RC1(x) ((x) & 0x1F)
1043#define _DARF_RC2(x) (((x) & 0x1F) << 8)
1044#define _DARF_RC3(x) (((x) & 0x1F) << 16)
1045#define _DARF_RC4(x) (((x) & 0x1F) << 24)
1046#define _DARF_RC5(x) ((x) & 0x1F)
1047#define _DARF_RC6(x) (((x) & 0x1F) << 8)
1048#define _DARF_RC7(x) (((x) & 0x1F) << 16)
1049#define _DARF_RC8(x) (((x) & 0x1F) << 24)
1050
1051#define _RARF_RC1(x) ((x) & 0x1F)
1052#define _RARF_RC2(x) (((x) & 0x1F) << 8)
1053#define _RARF_RC3(x) (((x) & 0x1F) << 16)
1054#define _RARF_RC4(x) (((x) & 0x1F) << 24)
1055#define _RARF_RC5(x) ((x) & 0x1F)
1056#define _RARF_RC6(x) (((x) & 0x1F) << 8)
1057#define _RARF_RC7(x) (((x) & 0x1F) << 16)
1058#define _RARF_RC8(x) (((x) & 0x1F) << 24)
1059
1060#define AC_PARAM_TXOP_LIMIT_OFFSET 16
1061#define AC_PARAM_ECW_MAX_OFFSET 12
1062#define AC_PARAM_ECW_MIN_OFFSET 8
1063#define AC_PARAM_AIFS_OFFSET 0
1064
1065#define _AIFS(x) (x)
1066#define _ECW_MAX_MIN(x) ((x) << 8)
1067#define _TXOP_LIMIT(x) ((x) << 16)
1068
1069#define _BCNIFS(x) ((x) & 0xFF)
1070#define _BCNECW(x) ((((x) & 0xF)) << 8)
1071
1072#define _LRL(x) ((x) & 0x3F)
1073#define _SRL(x) (((x) & 0x3F) << 8)
1074
1075#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
1076#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
1077
1078#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
1079#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
1080
1081#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
1082
1083#define DIS_EDCA_CNT_DWN BIT(11)
1084
1085#define EN_MBSSID BIT(1)
1086#define EN_TXBCN_RPT BIT(2)
1087#define EN_BCN_FUNCTION BIT(3)
1088
1089#define TSFTR_RST BIT(0)
1090#define TSFTR1_RST BIT(1)
1091
1092#define STOP_BCNQ BIT(6)
1093
1094#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
1095#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
1096
1097#define AcmHw_HwEn BIT(0)
1098#define AcmHw_BeqEn BIT(1)
1099#define AcmHw_ViqEn BIT(2)
1100#define AcmHw_VoqEn BIT(3)
1101#define AcmHw_BeqStatus BIT(4)
1102#define AcmHw_ViqStatus BIT(5)
1103#define AcmHw_VoqStatus BIT(6)
1104
1105#define APSDOFF BIT(6)
1106#define APSDOFF_STATUS BIT(7)
1107
1108#define BW_20MHZ BIT(2)
1109
1110#define RATE_BITMAP_ALL 0xFFFFF
1111
1112#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
1113
1114#define TSFRST BIT(0)
1115#define DIS_GCLK BIT(1)
1116#define PAD_SEL BIT(2)
1117#define PWR_ST BIT(6)
1118#define PWRBIT_OW_EN BIT(7)
1119#define ACRC BIT(8)
1120#define CFENDFORM BIT(9)
1121#define ICV BIT(10)
1122
1123#define AAP BIT(0)
1124#define APM BIT(1)
1125#define AM BIT(2)
1126#define AB BIT(3)
1127#define ADD3 BIT(4)
1128#define APWRMGT BIT(5)
1129#define CBSSID BIT(6)
1130#define CBSSID_DATA BIT(6)
1131#define CBSSID_BCN BIT(7)
1132#define ACRC32 BIT(8)
1133#define AICV BIT(9)
1134#define ADF BIT(11)
1135#define ACF BIT(12)
1136#define AMF BIT(13)
1137#define HTC_LOC_CTRL BIT(14)
1138#define UC_DATA_EN BIT(16)
1139#define BM_DATA_EN BIT(17)
1140#define MFBEN BIT(22)
1141#define LSIGEN BIT(23)
1142#define EnMBID BIT(24)
1143#define APP_BASSN BIT(27)
1144#define APP_PHYSTS BIT(28)
1145#define APP_ICV BIT(29)
1146#define APP_MIC BIT(30)
1147#define APP_FCS BIT(31)
1148
1149#define _MIN_SPACE(x) ((x) & 0x7)
1150#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
1151
1152#define RXERR_TYPE_OFDM_PPDU 0
1153#define RXERR_TYPE_OFDM_FALSE_ALARM 1
1154#define RXERR_TYPE_OFDM_MPDU_OK 2
1155#define RXERR_TYPE_OFDM_MPDU_FAIL 3
1156#define RXERR_TYPE_CCK_PPDU 4
1157#define RXERR_TYPE_CCK_FALSE_ALARM 5
1158#define RXERR_TYPE_CCK_MPDU_OK 6
1159#define RXERR_TYPE_CCK_MPDU_FAIL 7
1160#define RXERR_TYPE_HT_PPDU 8
1161#define RXERR_TYPE_HT_FALSE_ALARM 9
1162#define RXERR_TYPE_HT_MPDU_TOTAL 10
1163#define RXERR_TYPE_HT_MPDU_OK 11
1164#define RXERR_TYPE_HT_MPDU_FAIL 12
1165#define RXERR_TYPE_RX_FULL_DROP 15
1166
1167#define RXERR_COUNTER_MASK 0xFFFFF
1168#define RXERR_RPT_RST BIT(27)
1169#define _RXERR_RPT_SEL(type) ((type) << 28)
1170
1171#define SCR_TxUseDK BIT(0)
1172#define SCR_RxUseDK BIT(1)
1173#define SCR_TxEncEnable BIT(2)
1174#define SCR_RxDecEnable BIT(3)
1175#define SCR_SKByA2 BIT(4)
1176#define SCR_NoSKMC BIT(5)
1177#define SCR_TXBCUSEDK BIT(6)
1178#define SCR_RXBCUSEDK BIT(7)
1179
1180#define USB_IS_HIGH_SPEED 0
1181#define USB_IS_FULL_SPEED 1
1182#define USB_SPEED_MASK BIT(5)
1183
1184#define USB_NORMAL_SIE_EP_MASK 0xF
1185#define USB_NORMAL_SIE_EP_SHIFT 4
1186
1187#define USB_TEST_EP_MASK 0x30
1188#define USB_TEST_EP_SHIFT 4
1189
1190#define USB_AGG_EN BIT(3)
1191
1192#define MAC_ADDR_LEN 6
1193#define LAST_ENTRY_OF_TX_PKT_BUFFER 255
1194
1195#define POLLING_LLT_THRESHOLD 20
1196#define POLLING_READY_TIMEOUT_COUNT 1000
1197
1198#define MAX_MSS_DENSITY_2T 0x13
1199#define MAX_MSS_DENSITY_1T 0x0A
1200
1201#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
1202#define EPROM_CMD_CONFIG 0x3
1203#define EPROM_CMD_LOAD 1
1204
1205#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
1206
1207#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
1208
1209#define RPMAC_RESET 0x100
1210#define RPMAC_TXSTART 0x104
1211#define RPMAC_TXLEGACYSIG 0x108
1212#define RPMAC_TXHTSIG1 0x10c
1213#define RPMAC_TXHTSIG2 0x110
1214#define RPMAC_PHYDEBUG 0x114
1215#define RPMAC_TXPACKETNUM 0x118
1216#define RPMAC_TXIDLE 0x11c
1217#define RPMAC_TXMACHEADER0 0x120
1218#define RPMAC_TXMACHEADER1 0x124
1219#define RPMAC_TXMACHEADER2 0x128
1220#define RPMAC_TXMACHEADER3 0x12c
1221#define RPMAC_TXMACHEADER4 0x130
1222#define RPMAC_TXMACHEADER5 0x134
1223#define RPMAC_TXDADATYPE 0x138
1224#define RPMAC_TXRANDOMSEED 0x13c
1225#define RPMAC_CCKPLCPPREAMBLE 0x140
1226#define RPMAC_CCKPLCPHEADER 0x144
1227#define RPMAC_CCKCRC16 0x148
1228#define RPMAC_OFDMRXCRC32OK 0x170
1229#define RPMAC_OFDMRXCRC32Er 0x174
1230#define RPMAC_OFDMRXPARITYER 0x178
1231#define RPMAC_OFDMRXCRC8ER 0x17c
1232#define RPMAC_CCKCRXRC16ER 0x180
1233#define RPMAC_CCKCRXRC32ER 0x184
1234#define RPMAC_CCKCRXRC32OK 0x188
1235#define RPMAC_TXSTATUS 0x18c
1236
1237#define RFPGA0_RFMOD 0x800
1238
1239#define RFPGA0_TXINFO 0x804
1240#define RFPGA0_PSDFUNCTION 0x808
1241
1242#define RFPGA0_TXGAINSTAGE 0x80c
1243
1244#define RFPGA0_RFTIMING1 0x810
1245#define RFPGA0_RFTIMING2 0x814
1246
1247#define RFPGA0_XA_HSSIPARAMETER1 0x820
1248#define RFPGA0_XA_HSSIPARAMETER2 0x824
1249#define RFPGA0_XB_HSSIPARAMETER1 0x828
1250#define RFPGA0_XB_HSSIPARAMETER2 0x82c
1251
1252#define RFPGA0_XA_LSSIPARAMETER 0x840
1253#define RFPGA0_XB_LSSIPARAMETER 0x844
1254
1255#define RFPGA0_RFWAKEUPPARAMETER 0x850
1256#define RFPGA0_RFSLEEPUPPARAMETER 0x854
1257
1258#define RFPGA0_XAB_SWITCHCONTROL 0x858
1259#define RFPGA0_XCD_SWITCHCONTROL 0x85c
1260
1261#define RFPGA0_XA_RFINTERFACEOE 0x860
1262#define RFPGA0_XB_RFINTERFACEOE 0x864
1263
1264#define RFPGA0_XAB_RFINTERFACESW 0x870
1265#define RFPGA0_XCD_RFINTERFACESW 0x874
1266
1267#define rFPGA0_XAB_RFPARAMETER 0x878
1268#define rFPGA0_XCD_RFPARAMETER 0x87c
1269
1270#define RFPGA0_ANALOGPARAMETER1 0x880
1271#define RFPGA0_ANALOGPARAMETER2 0x884
1272#define RFPGA0_ANALOGPARAMETER3 0x888
1273#define RFPGA0_ANALOGPARAMETER4 0x88c
1274
1275#define RFPGA0_XA_LSSIREADBACK 0x8a0
1276#define RFPGA0_XB_LSSIREADBACK 0x8a4
1277#define RFPGA0_XC_LSSIREADBACK 0x8a8
1278#define RFPGA0_XD_LSSIREADBACK 0x8ac
1279
1280#define RFPGA0_PSDREPORT 0x8b4
1281#define TRANSCEIVEA_HSPI_READBACK 0x8b8
1282#define TRANSCEIVEB_HSPI_READBACK 0x8bc
1283#define RFPGA0_XAB_RFINTERFACERB 0x8e0
1284#define RFPGA0_XCD_RFINTERFACERB 0x8e4
1285
1286#define RFPGA1_RFMOD 0x900
1287
1288#define RFPGA1_TXBLOCK 0x904
1289#define RFPGA1_DEBUGSELECT 0x908
1290#define RFPGA1_TXINFO 0x90c
1291
1292#define RCCK0_SYSTEM 0xa00
1293
1294#define RCCK0_AFESETTING 0xa04
1295#define RCCK0_CCA 0xa08
1296
1297#define RCCK0_RXAGC1 0xa0c
1298#define RCCK0_RXAGC2 0xa10
1299
1300#define RCCK0_RXHP 0xa14
1301
1302#define RCCK0_DSPPARAMETER1 0xa18
1303#define RCCK0_DSPPARAMETER2 0xa1c
1304
1305#define RCCK0_TXFILTER1 0xa20
1306#define RCCK0_TXFILTER2 0xa24
1307#define RCCK0_DEBUGPORT 0xa28
1308#define RCCK0_FALSEALARMREPORT 0xa2c
1309#define RCCK0_TRSSIREPORT 0xa50
1310#define RCCK0_RXREPORT 0xa54
1311#define RCCK0_FACOUNTERLOWER 0xa5c
1312#define RCCK0_FACOUNTERUPPER 0xa58
1313
1314#define ROFDM0_LSTF 0xc00
1315
1316#define ROFDM0_TRXPATHENABLE 0xc04
1317#define ROFDM0_TRMUXPAR 0xc08
1318#define ROFDM0_TRSWISOLATION 0xc0c
1319
1320#define ROFDM0_XARXAFE 0xc10
1321#define ROFDM0_XARXIQIMBALANCE 0xc14
1322#define ROFDM0_XBRXAFE 0xc18
1323#define ROFDM0_XBRXIQIMBALANCE 0xc1c
1324#define ROFDM0_XCRXAFE 0xc20
1325#define ROFDM0_XCRXIQIMBANLANCE 0xc24
1326#define ROFDM0_XDRXAFE 0xc28
1327#define ROFDM0_XDRXIQIMBALANCE 0xc2c
1328
1329#define ROFDM0_RXDETECTOR1 0xc30
1330#define ROFDM0_RXDETECTOR2 0xc34
1331#define ROFDM0_RXDETECTOR3 0xc38
1332#define ROFDM0_RXDETECTOR4 0xc3c
1333
1334#define ROFDM0_RXDSP 0xc40
1335#define ROFDM0_CFOANDDAGC 0xc44
1336#define ROFDM0_CCADROPTHRESHOLD 0xc48
1337#define ROFDM0_ECCATHRESHOLD 0xc4c
1338
1339#define ROFDM0_XAAGCCORE1 0xc50
1340#define ROFDM0_XAAGCCORE2 0xc54
1341#define ROFDM0_XBAGCCORE1 0xc58
1342#define ROFDM0_XBAGCCORE2 0xc5c
1343#define ROFDM0_XCAGCCORE1 0xc60
1344#define ROFDM0_XCAGCCORE2 0xc64
1345#define ROFDM0_XDAGCCORE1 0xc68
1346#define ROFDM0_XDAGCCORE2 0xc6c
1347
1348#define ROFDM0_AGCPARAMETER1 0xc70
1349#define ROFDM0_AGCPARAMETER2 0xc74
1350#define ROFDM0_AGCRSSITABLE 0xc78
1351#define ROFDM0_HTSTFAGC 0xc7c
1352
1353#define ROFDM0_XATXIQIMBALANCE 0xc80
1354#define ROFDM0_XATXAFE 0xc84
1355#define ROFDM0_XBTXIQIMBALANCE 0xc88
1356#define ROFDM0_XBTXAFE 0xc8c
1357#define ROFDM0_XCTXIQIMBALANCE 0xc90
1358#define ROFDM0_XCTXAFE 0xc94
1359#define ROFDM0_XDTXIQIMBALANCE 0xc98
1360#define ROFDM0_XDTXAFE 0xc9c
1361
1362#define ROFDM0_RXIQEXTANTA 0xca0
1363
1364#define ROFDM0_RXHPPARAMETER 0xce0
1365#define ROFDM0_TXPSEUDONOISEWGT 0xce4
1366#define ROFDM0_FRAMESYNC 0xcf0
1367#define ROFDM0_DFSREPORT 0xcf4
1368#define ROFDM0_TXCOEFF1 0xca4
1369#define ROFDM0_TXCOEFF2 0xca8
1370#define ROFDM0_TXCOEFF3 0xcac
1371#define ROFDM0_TXCOEFF4 0xcb0
1372#define ROFDM0_TXCOEFF5 0xcb4
1373#define ROFDM0_TXCOEFF6 0xcb8
1374
1375#define ROFDM1_LSTF 0xd00
1376#define ROFDM1_TRXPATHENABLE 0xd04
1377
1378#define ROFDM1_CF0 0xd08
1379#define ROFDM1_CSI1 0xd10
1380#define ROFDM1_SBD 0xd14
1381#define ROFDM1_CSI2 0xd18
1382#define ROFDM1_CFOTRACKING 0xd2c
1383#define ROFDM1_TRXMESAURE1 0xd34
1384#define ROFDM1_INTFDET 0xd3c
1385#define ROFDM1_PSEUDONOISESTATEAB 0xd50
1386#define ROFDM1_PSEUDONOISESTATECD 0xd54
1387#define ROFDM1_RXPSEUDONOISEWGT 0xd58
1388
1389#define ROFDM_PHYCOUNTER1 0xda0
1390#define ROFDM_PHYCOUNTER2 0xda4
1391#define ROFDM_PHYCOUNTER3 0xda8
1392
1393#define ROFDM_SHORTCFOAB 0xdac
1394#define ROFDM_SHORTCFOCD 0xdb0
1395#define ROFDM_LONGCFOAB 0xdb4
1396#define ROFDM_LONGCFOCD 0xdb8
1397#define ROFDM_TAILCF0AB 0xdbc
1398#define ROFDM_TAILCF0CD 0xdc0
1399#define ROFDM_PWMEASURE1 0xdc4
1400#define ROFDM_PWMEASURE2 0xdc8
1401#define ROFDM_BWREPORT 0xdcc
1402#define ROFDM_AGCREPORT 0xdd0
1403#define ROFDM_RXSNR 0xdd4
1404#define ROFDM_RXEVMCSI 0xdd8
1405#define ROFDM_SIGREPORT 0xddc
1406
1407#define RTXAGC_A_RATE18_06 0xe00
1408#define RTXAGC_A_RATE54_24 0xe04
1409#define RTXAGC_A_CCK1_MCS32 0xe08
1410#define RTXAGC_A_MCS03_MCS00 0xe10
1411#define RTXAGC_A_MCS07_MCS04 0xe14
1412#define RTXAGC_A_MCS11_MCS08 0xe18
1413#define RTXAGC_A_MCS15_MCS12 0xe1c
1414
1415#define RTXAGC_B_RATE18_06 0x830
1416#define RTXAGC_B_RATE54_24 0x834
1417#define RTXAGC_B_CCK1_55_MCS32 0x838
1418#define RTXAGC_B_MCS03_MCS00 0x83c
1419#define RTXAGC_B_MCS07_MCS04 0x848
1420#define RTXAGC_B_MCS11_MCS08 0x84c
1421#define RTXAGC_B_MCS15_MCS12 0x868
1422#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
1423
1424#define RZEBRA1_HSSIENABLE 0x0
1425#define RZEBRA1_TRXENABLE1 0x1
1426#define RZEBRA1_TRXENABLE2 0x2
1427#define RZEBRA1_AGC 0x4
1428#define RZEBRA1_CHARGEPUMP 0x5
1429#define RZEBRA1_CHANNEL 0x7
1430
1431#define RZEBRA1_TXGAIN 0x8
1432#define RZEBRA1_TXLPF 0x9
1433#define RZEBRA1_RXLPF 0xb
1434#define RZEBRA1_RXHPFCORNER 0xc
1435
1436#define RGLOBALCTRL 0
1437#define RRTL8256_TXLPF 19
1438#define RRTL8256_RXLPF 11
1439#define RRTL8258_TXLPF 0x11
1440#define RRTL8258_RXLPF 0x13
1441#define RRTL8258_RSSILPF 0xa
1442
1443#define RF_AC 0x00
1444
1445#define RF_IQADJ_G1 0x01
1446#define RF_IQADJ_G2 0x02
1447#define RF_POW_TRSW 0x05
1448
1449#define RF_GAIN_RX 0x06
1450#define RF_GAIN_TX 0x07
1451
1452#define RF_TXM_IDAC 0x08
1453#define RF_BS_IQGEN 0x0F
1454
1455#define RF_MODE1 0x10
1456#define RF_MODE2 0x11
1457
1458#define RF_RX_AGC_HP 0x12
1459#define RF_TX_AGC 0x13
1460#define RF_BIAS 0x14
1461#define RF_IPA 0x15
1462#define RF_POW_ABILITY 0x17
1463#define RF_MODE_AG 0x18
1464#define RRFCHANNEL 0x18
1465#define RF_CHNLBW 0x18
1466#define RF_TOP 0x19
1467
1468#define RF_RX_G1 0x1A
1469#define RF_RX_G2 0x1B
1470
1471#define RF_RX_BB2 0x1C
1472#define RF_RX_BB1 0x1D
1473
1474#define RF_RCK1 0x1E
1475#define RF_RCK2 0x1F
1476
1477#define RF_TX_G1 0x20
1478#define RF_TX_G2 0x21
1479#define RF_TX_G3 0x22
1480
1481#define RF_TX_BB1 0x23
1482#define RF_T_METER 0x24
1483
1484#define RF_SYN_G1 0x25
1485#define RF_SYN_G2 0x26
1486#define RF_SYN_G3 0x27
1487#define RF_SYN_G4 0x28
1488#define RF_SYN_G5 0x29
1489#define RF_SYN_G6 0x2A
1490#define RF_SYN_G7 0x2B
1491#define RF_SYN_G8 0x2C
1492
1493#define RF_RCK_OS 0x30
1494#define RF_TXPA_G1 0x31
1495#define RF_TXPA_G2 0x32
1496#define RF_TXPA_G3 0x33
1497
1498#define BBBRESETB 0x100
1499#define BGLOBALRESETB 0x200
1500#define BOFDMTXSTART 0x4
1501#define BCCKTXSTART 0x8
1502#define BCRC32DEBUG 0x100
1503#define BPMACLOOPBACK 0x10
1504#define BTXLSIG 0xffffff
1505#define BOFDMTXRATE 0xf
1506#define BOFDMTXRESERVED 0x10
1507#define BOFDMTXLENGTH 0x1ffe0
1508#define BOFDMTXPARITY 0x20000
1509#define BTXHTSIG1 0xffffff
1510#define BTXHTMCSRATE 0x7f
1511#define BTXHTBW 0x80
1512#define BTXHTLENGTH 0xffff00
1513#define BTXHTSIG2 0xffffff
1514#define BTXHTSMOOTHING 0x1
1515#define BTXHTSOUNDING 0x2
1516#define BTXHTRESERVED 0x4
1517#define BTXHTAGGREATION 0x8
1518#define BTXHTSTBC 0x30
1519#define BTXHTADVANCECODING 0x40
1520#define BTXHTSHORTGI 0x80
1521#define BTXHTNUMBERHT_LTF 0x300
1522#define BTXHTCRC8 0x3fc00
1523#define BCOUNTERRESET 0x10000
1524#define BNUMOFOFDMTX 0xffff
1525#define BNUMOFCCKTX 0xffff0000
1526#define BTXIDLEINTERVAL 0xffff
1527#define BOFDMSERVICE 0xffff0000
1528#define BTXMACHEADER 0xffffffff
1529#define BTXDATAINIT 0xff
1530#define BTXHTMODE 0x100
1531#define BTXDATATYPE 0x30000
1532#define BTXRANDOMSEED 0xffffffff
1533#define BCCKTXPREAMBLE 0x1
1534#define BCCKTXSFD 0xffff0000
1535#define BCCKTXSIG 0xff
1536#define BCCKTXSERVICE 0xff00
1537#define BCCKLENGTHEXT 0x8000
1538#define BCCKTXLENGHT 0xffff0000
1539#define BCCKTXCRC16 0xffff
1540#define BCCKTXSTATUS 0x1
1541#define BOFDMTXSTATUS 0x2
1542#define IS_BB_REG_OFFSET_92S(_Offset) \
1543 ((_Offset >= 0x800) && (_Offset <= 0xfff))
1544
1545#define BRFMOD 0x1
1546#define BJAPANMODE 0x2
1547#define BCCKTXSC 0x30
1548#define BCCKEN 0x1000000
1549#define BOFDMEN 0x2000000
1550
1551#define BOFDMRXADCPHASE 0x10000
1552#define BOFDMTXDACPHASE 0x40000
1553#define BXATXAGC 0x3f
1554
1555#define BXBTXAGC 0xf00
1556#define BXCTXAGC 0xf000
1557#define BXDTXAGC 0xf0000
1558
1559#define BPASTART 0xf0000000
1560#define BTRSTART 0x00f00000
1561#define BRFSTART 0x0000f000
1562#define BBBSTART 0x000000f0
1563#define BBBCCKSTART 0x0000000f
1564#define BPAEND 0xf
1565#define BTREND 0x0f000000
1566#define BRFEND 0x000f0000
1567#define BCCAMASK 0x000000f0
1568#define BR2RCCAMASK 0x00000f00
1569#define BHSSI_R2TDELAY 0xf8000000
1570#define BHSSI_T2RDELAY 0xf80000
1571#define BCONTXHSSI 0x400
1572#define BIGFROMCCK 0x200
1573#define BAGCADDRESS 0x3f
1574#define BRXHPTX 0x7000
1575#define BRXHP2RX 0x38000
1576#define BRXHPCCKINI 0xc0000
1577#define BAGCTXCODE 0xc00000
1578#define BAGCRXCODE 0x300000
1579
1580#define B3WIREDATALENGTH 0x800
1581#define B3WIREADDREAALENGTH 0x400
1582
1583#define B3WIRERFPOWERDOWN 0x1
1584#define B5GPAPEPOLARITY 0x40000000
1585#define B2GPAPEPOLARITY 0x80000000
1586#define BRFSW_TXDEFAULTANT 0x3
1587#define BRFSW_TXOPTIONANT 0x30
1588#define BRFSW_RXDEFAULTANT 0x300
1589#define BRFSW_RXOPTIONANT 0x3000
1590#define BRFSI_3WIREDATA 0x1
1591#define BRFSI_3WIRECLOCK 0x2
1592#define BRFSI_3WIRELOAD 0x4
1593#define BRFSI_3WIRERW 0x8
1594#define BRFSI_3WIRE 0xf
1595
1596#define BRFSI_RFENV 0x10
1597
1598#define BRFSI_TRSW 0x20
1599#define BRFSI_TRSWB 0x40
1600#define BRFSI_ANTSW 0x100
1601#define BRFSI_ANTSWB 0x200
1602#define BRFSI_PAPE 0x400
1603#define BRFSI_PAPE5G 0x800
1604#define BBANDSELECT 0x1
1605#define BHTSIG2_GI 0x80
1606#define BHTSIG2_SMOOTHING 0x01
1607#define BHTSIG2_SOUNDING 0x02
1608#define BHTSIG2_AGGREATON 0x08
1609#define BHTSIG2_STBC 0x30
1610#define BHTSIG2_ADVCODING 0x40
1611#define BHTSIG2_NUMOFHTLTF 0x300
1612#define BHTSIG2_CRC8 0x3fc
1613#define BHTSIG1_MCS 0x7f
1614#define BHTSIG1_BANDWIDTH 0x80
1615#define BHTSIG1_HTLENGTH 0xffff
1616#define BLSIG_RATE 0xf
1617#define BLSIG_RESERVED 0x10
1618#define BLSIG_LENGTH 0x1fffe
1619#define BLSIG_PARITY 0x20
1620#define BCCKRXPHASE 0x4
1621
1622#define BLSSIREADADDRESS 0x7f800000
1623#define BLSSIREADEDGE 0x80000000
1624
1625#define BLSSIREADBACKDATA 0xfffff
1626
1627#define BLSSIREADOKFLAG 0x1000
1628#define BCCKSAMPLERATE 0x8
1629#define BREGULATOR0STANDBY 0x1
1630#define BREGULATORPLLSTANDBY 0x2
1631#define BREGULATOR1STANDBY 0x4
1632#define BPLLPOWERUP 0x8
1633#define BDPLLPOWERUP 0x10
1634#define BDA10POWERUP 0x20
1635#define BAD7POWERUP 0x200
1636#define BDA6POWERUP 0x2000
1637#define BXTALPOWERUP 0x4000
1638#define B40MDCLKPOWERUP 0x8000
1639#define BDA6DEBUGMODE 0x20000
1640#define BDA6SWING 0x380000
1641
1642#define BADCLKPHASE 0x4000000
1643#define B80MCLKDELAY 0x18000000
1644#define BAFEWATCHDOGENABLE 0x20000000
1645
1646#define BXTALCAP01 0xc0000000
1647#define BXTALCAP23 0x3
1648#define BXTALCAP92X 0x0f000000
1649#define BXTALCAP 0x0f000000
1650
1651#define BINTDIFCLKENABLE 0x400
1652#define BEXTSIGCLKENABLE 0x800
1653#define BBANDGAP_MBIAS_POWERUP 0x10000
1654#define BAD11SH_GAIN 0xc0000
1655#define BAD11NPUT_RANGE 0x700000
1656#define BAD110P_CURRENT 0x3800000
1657#define BLPATH_LOOPBACK 0x4000000
1658#define BQPATH_LOOPBACK 0x8000000
1659#define BAFE_LOOPBACK 0x10000000
1660#define BDA10_SWING 0x7e0
1661#define BDA10_REVERSE 0x800
1662#define BDA_CLK_SOURCE 0x1000
1663#define BDA7INPUT_RANGE 0x6000
1664#define BDA7_GAIN 0x38000
1665#define BDA7OUTPUT_CM_MODE 0x40000
1666#define BDA7INPUT_CM_MODE 0x380000
1667#define BDA7CURRENT 0xc00000
1668#define BREGULATOR_ADJUST 0x7000000
1669#define BAD11POWERUP_ATTX 0x1
1670#define BDA10PS_ATTX 0x10
1671#define BAD11POWERUP_ATRX 0x100
1672#define BDA10PS_ATRX 0x1000
1673#define BCCKRX_AGC_FORMAT 0x200
1674#define BPSDFFT_SAMPLE_POINT 0xc000
1675#define BPSD_AVERAGE_NUM 0x3000
1676#define BIQPATH_CONTROL 0xc00
1677#define BPSD_FREQ 0x3ff
1678#define BPSD_ANTENNA_PATH 0x30
1679#define BPSD_IQ_SWITCH 0x40
1680#define BPSD_RX_TRIGGER 0x400000
1681#define BPSD_TX_TRIGGER 0x80000000
1682#define BPSD_SINE_TONE_SCALE 0x7f000000
1683#define BPSD_REPORT 0xffff
1684
1685#define BOFDM_TXSC 0x30000000
1686#define BCCK_TXON 0x1
1687#define BOFDM_TXON 0x2
1688#define BDEBUG_PAGE 0xfff
1689#define BDEBUG_ITEM 0xff
1690#define BANTL 0x10
1691#define BANT_NONHT 0x100
1692#define BANT_HT1 0x1000
1693#define BANT_HT2 0x10000
1694#define BANT_HT1S1 0x100000
1695#define BANT_NONHTS1 0x1000000
1696
1697#define BCCK_BBMODE 0x3
1698#define BCCK_TXPOWERSAVING 0x80
1699#define BCCK_RXPOWERSAVING 0x40
1700
1701#define BCCK_SIDEBAND 0x10
1702
1703#define BCCK_SCRAMBLE 0x8
1704#define BCCK_ANTDIVERSITY 0x8000
1705#define BCCK_CARRIER_RECOVERY 0x4000
1706#define BCCK_TXRATE 0x3000
1707#define BCCK_DCCANCEL 0x0800
1708#define BCCK_ISICANCEL 0x0400
1709#define BCCK_MATCH_FILTER 0x0200
1710#define BCCK_EQUALIZER 0x0100
1711#define BCCK_PREAMBLE_DETECT 0x800000
1712#define BCCK_FAST_FALSECCAi 0x400000
1713#define BCCK_CH_ESTSTARTi 0x300000
1714#define BCCK_CCA_COUNTi 0x080000
1715#define BCCK_CS_LIM 0x070000
1716#define BCCK_BIST_MODEi 0x80000000
1717#define BCCK_CCAMASK 0x40000000
1718#define BCCK_TX_DAC_PHASE 0x4
1719#define BCCK_RX_ADC_PHASE 0x20000000
1720#define BCCKR_CP_MODE 0x0100
1721#define BCCK_TXDC_OFFSET 0xf0
1722#define BCCK_RXDC_OFFSET 0xf
1723#define BCCK_CCA_MODE 0xc000
1724#define BCCK_FALSECS_LIM 0x3f00
1725#define BCCK_CS_RATIO 0xc00000
1726#define BCCK_CORGBIT_SEL 0x300000
1727#define BCCK_PD_LIM 0x0f0000
1728#define BCCK_NEWCCA 0x80000000
1729#define BCCK_RXHP_OF_IG 0x8000
1730#define BCCK_RXIG 0x7f00
1731#define BCCK_LNA_POLARITY 0x800000
1732#define BCCK_RX1ST_BAIN 0x7f0000
1733#define BCCK_RF_EXTEND 0x20000000
1734#define BCCK_RXAGC_SATLEVEL 0x1f000000
1735#define BCCK_RXAGC_SATCOUNT 0xe0
1736#define bCCKRxRFSettle 0x1f
1737#define BCCK_FIXED_RXAGC 0x8000
1738#define BCCK_ANTENNA_POLARITY 0x2000
1739#define BCCK_TXFILTER_TYPE 0x0c00
1740#define BCCK_RXAGC_REPORTTYPE 0x0300
1741#define BCCK_RXDAGC_EN 0x80000000
1742#define BCCK_RXDAGC_PERIOD 0x20000000
1743#define BCCK_RXDAGC_SATLEVEL 0x1f000000
1744#define BCCK_TIMING_RECOVERY 0x800000
1745#define BCCK_TXC0 0x3f0000
1746#define BCCK_TXC1 0x3f000000
1747#define BCCK_TXC2 0x3f
1748#define BCCK_TXC3 0x3f00
1749#define BCCK_TXC4 0x3f0000
1750#define BCCK_TXC5 0x3f000000
1751#define BCCK_TXC6 0x3f
1752#define BCCK_TXC7 0x3f00
1753#define BCCK_DEBUGPORT 0xff0000
1754#define BCCK_DAC_DEBUG 0x0f000000
1755#define BCCK_FALSEALARM_ENABLE 0x8000
1756#define BCCK_FALSEALARM_READ 0x4000
1757#define BCCK_TRSSI 0x7f
1758#define BCCK_RXAGC_REPORT 0xfe
1759#define BCCK_RXREPORT_ANTSEL 0x80000000
1760#define BCCK_RXREPORT_MFOFF 0x40000000
1761#define BCCK_RXREPORT_SQLOSS 0x20000000
1762#define BCCK_RXREPORT_PKTLOSS 0x10000000
1763#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
1764#define BCCK_RXREPORT_RATEERROR 0x04000000
1765#define BCCK_RXREPORT_RXRATE 0x03000000
1766#define BCCK_RXFA_COUNTER_LOWER 0xff
1767#define BCCK_RXFA_COUNTER_UPPER 0xff000000
1768#define BCCK_RXHPAGC_START 0xe000
1769#define BCCK_RXHPAGC_FINAL 0x1c00
1770#define BCCK_RXFALSEALARM_ENABLE 0x8000
1771#define BCCK_FACOUNTER_FREEZE 0x4000
1772#define BCCK_TXPATH_SEL 0x10000000
1773#define BCCK_DEFAULT_RXPATH 0xc000000
1774#define BCCK_OPTION_RXPATH 0x3000000
1775
1776#define BNUM_OFSTF 0x3
1777#define BSHIFT_L 0xc0
1778#define BGI_TH 0xc
1779#define BRXPATH_A 0x1
1780#define BRXPATH_B 0x2
1781#define BRXPATH_C 0x4
1782#define BRXPATH_D 0x8
1783#define BTXPATH_A 0x1
1784#define BTXPATH_B 0x2
1785#define BTXPATH_C 0x4
1786#define BTXPATH_D 0x8
1787#define BTRSSI_FREQ 0x200
1788#define BADC_BACKOFF 0x3000
1789#define BDFIR_BACKOFF 0xc000
1790#define BTRSSI_LATCH_PHASE 0x10000
1791#define BRX_LDC_OFFSET 0xff
1792#define BRX_QDC_OFFSET 0xff00
1793#define BRX_DFIR_MODE 0x1800000
1794#define BRX_DCNF_TYPE 0xe000000
1795#define BRXIQIMB_A 0x3ff
1796#define BRXIQIMB_B 0xfc00
1797#define BRXIQIMB_C 0x3f0000
1798#define BRXIQIMB_D 0xffc00000
1799#define BDC_DC_NOTCH 0x60000
1800#define BRXNB_NOTCH 0x1f000000
1801#define BPD_TH 0xf
1802#define BPD_TH_OPT2 0xc000
1803#define BPWED_TH 0x700
1804#define BIFMF_WIN_L 0x800
1805#define BPD_OPTION 0x1000
1806#define BMF_WIN_L 0xe000
1807#define BBW_SEARCH_L 0x30000
1808#define BWIN_ENH_L 0xc0000
1809#define BBW_TH 0x700000
1810#define BED_TH2 0x3800000
1811#define BBW_OPTION 0x4000000
1812#define BRADIO_TH 0x18000000
1813#define BWINDOW_L 0xe0000000
1814#define BSBD_OPTION 0x1
1815#define BFRAME_TH 0x1c
1816#define BFS_OPTION 0x60
1817#define BDC_SLOPE_CHECK 0x80
1818#define BFGUARD_COUNTER_DC_L 0xe00
1819#define BFRAME_WEIGHT_SHORT 0x7000
1820#define BSUB_TUNE 0xe00000
1821#define BFRAME_DC_LENGTH 0xe000000
1822#define BSBD_START_OFFSET 0x30000000
1823#define BFRAME_TH_2 0x7
1824#define BFRAME_GI2_TH 0x38
1825#define BGI2_SYNC_EN 0x40
1826#define BSARCH_SHORT_EARLY 0x300
1827#define BSARCH_SHORT_LATE 0xc00
1828#define BSARCH_GI2_LATE 0x70000
1829#define BCFOANTSUM 0x1
1830#define BCFOACC 0x2
1831#define BCFOSTARTOFFSET 0xc
1832#define BCFOLOOPBACK 0x70
1833#define BCFOSUMWEIGHT 0x80
1834#define BDAGCENABLE 0x10000
1835#define BTXIQIMB_A 0x3ff
1836#define BTXIQIMB_b 0xfc00
1837#define BTXIQIMB_C 0x3f0000
1838#define BTXIQIMB_D 0xffc00000
1839#define BTXIDCOFFSET 0xff
1840#define BTXIQDCOFFSET 0xff00
1841#define BTXDFIRMODE 0x10000
1842#define BTXPESUDO_NOISEON 0x4000000
1843#define BTXPESUDO_NOISE_A 0xff
1844#define BTXPESUDO_NOISE_B 0xff00
1845#define BTXPESUDO_NOISE_C 0xff0000
1846#define BTXPESUDO_NOISE_D 0xff000000
1847#define BCCA_DROPOPTION 0x20000
1848#define BCCA_DROPTHRES 0xfff00000
1849#define BEDCCA_H 0xf
1850#define BEDCCA_L 0xf0
1851#define BLAMBDA_ED 0x300
1852#define BRX_INITIALGAIN 0x7f
1853#define BRX_ANTDIV_EN 0x80
1854#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
1855#define BRX_HIGHPOWER_FLOW 0x8000
1856#define BRX_AGC_FREEZE_THRES 0xc0000
1857#define BRX_FREEZESTEP_AGC1 0x300000
1858#define BRX_FREEZESTEP_AGC2 0xc00000
1859#define BRX_FREEZESTEP_AGC3 0x3000000
1860#define BRX_FREEZESTEP_AGC0 0xc000000
1861#define BRXRSSI_CMP_EN 0x10000000
1862#define BRXQUICK_AGCEN 0x20000000
1863#define BRXAGC_FREEZE_THRES_MODE 0x40000000
1864#define BRX_OVERFLOW_CHECKTYPE 0x80000000
1865#define BRX_AGCSHIFT 0x7f
1866#define BTRSW_TRI_ONLY 0x80
1867#define BPOWER_THRES 0x300
1868#define BRXAGC_EN 0x1
1869#define BRXAGC_TOGETHER_EN 0x2
1870#define BRXAGC_MIN 0x4
1871#define BRXHP_INI 0x7
1872#define BRXHP_TRLNA 0x70
1873#define BRXHP_RSSI 0x700
1874#define BRXHP_BBP1 0x7000
1875#define BRXHP_BBP2 0x70000
1876#define BRXHP_BBP3 0x700000
1877#define BRSSI_H 0x7f0000
1878#define BRSSI_GEN 0x7f000000
1879#define BRXSETTLE_TRSW 0x7
1880#define BRXSETTLE_LNA 0x38
1881#define BRXSETTLE_RSSI 0x1c0
1882#define BRXSETTLE_BBP 0xe00
1883#define BRXSETTLE_RXHP 0x7000
1884#define BRXSETTLE_ANTSW_RSSI 0x38000
1885#define BRXSETTLE_ANTSW 0xc0000
1886#define BRXPROCESS_TIME_DAGC 0x300000
1887#define BRXSETTLE_HSSI 0x400000
1888#define BRXPROCESS_TIME_BBPPW 0x800000
1889#define BRXANTENNA_POWER_SHIFT 0x3000000
1890#define BRSSI_TABLE_SELECT 0xc000000
1891#define BRXHP_FINAL 0x7000000
1892#define BRXHPSETTLE_BBP 0x7
1893#define BRXHTSETTLE_HSSI 0x8
1894#define BRXHTSETTLE_RXHP 0x70
1895#define BRXHTSETTLE_BBPPW 0x80
1896#define BRXHTSETTLE_IDLE 0x300
1897#define BRXHTSETTLE_RESERVED 0x1c00
1898#define BRXHT_RXHP_EN 0x8000
1899#define BRXAGC_FREEZE_THRES 0x30000
1900#define BRXAGC_TOGETHEREN 0x40000
1901#define BRXHTAGC_MIN 0x80000
1902#define BRXHTAGC_EN 0x100000
1903#define BRXHTDAGC_EN 0x200000
1904#define BRXHT_RXHP_BBP 0x1c00000
1905#define BRXHT_RXHP_FINAL 0xe0000000
1906#define BRXPW_RADIO_TH 0x3
1907#define BRXPW_RADIO_EN 0x4
1908#define BRXMF_HOLD 0x3800
1909#define BRXPD_DELAY_TH1 0x38
1910#define BRXPD_DELAY_TH2 0x1c0
1911#define BRXPD_DC_COUNT_MAX 0x600
1912#define BRXPD_DELAY_TH 0x8000
1913#define BRXPROCESS_DELAY 0xf0000
1914#define BRXSEARCHRANGE_GI2_EARLY 0x700000
1915#define BRXFRAME_FUARD_COUNTER_L 0x3800000
1916#define BRXSGI_GUARD_L 0xc000000
1917#define BRXSGI_SEARCH_L 0x30000000
1918#define BRXSGI_TH 0xc0000000
1919#define BDFSCNT0 0xff
1920#define BDFSCNT1 0xff00
1921#define BDFSFLAG 0xf0000
1922#define BMF_WEIGHT_SUM 0x300000
1923#define BMINIDX_TH 0x7f000000
1924#define BDAFORMAT 0x40000
1925#define BTXCH_EMU_ENABLE 0x01000000
1926#define BTRSW_ISOLATION_A 0x7f
1927#define BTRSW_ISOLATION_B 0x7f00
1928#define BTRSW_ISOLATION_C 0x7f0000
1929#define BTRSW_ISOLATION_D 0x7f000000
1930#define BEXT_LNA_GAIN 0x7c00
1931
1932#define BSTBC_EN 0x4
1933#define BANTENNA_MAPPING 0x10
1934#define BNSS 0x20
1935#define BCFO_ANTSUM_ID 0x200
1936#define BPHY_COUNTER_RESET 0x8000000
1937#define BCFO_REPORT_GET 0x4000000
1938#define BOFDM_CONTINUE_TX 0x10000000
1939#define BOFDM_SINGLE_CARRIER 0x20000000
1940#define BOFDM_SINGLE_TONE 0x40000000
1941#define BHT_DETECT 0x100
1942#define BCFOEN 0x10000
1943#define BCFOVALUE 0xfff00000
1944#define BSIGTONE_RE 0x3f
1945#define BSIGTONE_IM 0x7f00
1946#define BCOUNTER_CCA 0xffff
1947#define BCOUNTER_PARITYFAIL 0xffff0000
1948#define BCOUNTER_RATEILLEGAL 0xffff
1949#define BCOUNTER_CRC8FAIL 0xffff0000
1950#define BCOUNTER_MCSNOSUPPORT 0xffff
1951#define BCOUNTER_FASTSYNC 0xffff
1952#define BSHORTCFO 0xfff
1953#define BSHORTCFOT_LENGTH 12
1954#define BSHORTCFOF_LENGTH 11
1955#define BLONGCFO 0x7ff
1956#define BLONGCFOT_LENGTH 11
1957#define BLONGCFOF_LENGTH 11
1958#define BTAILCFO 0x1fff
1959#define BTAILCFOT_LENGTH 13
1960#define BTAILCFOF_LENGTH 12
1961#define BNOISE_EN_PWDB 0xffff
1962#define BCC_POWER_DB 0xffff0000
1963#define BMOISE_PWDB 0xffff
1964#define BPOWERMEAST_LENGTH 10
1965#define BPOWERMEASF_LENGTH 3
1966#define BRX_HT_BW 0x1
1967#define BRXSC 0x6
1968#define BRX_HT 0x8
1969#define BNB_INTF_DET_ON 0x1
1970#define BINTF_WIN_LEN_CFG 0x30
1971#define BNB_INTF_TH_CFG 0x1c0
1972#define BRFGAIN 0x3f
1973#define BTABLESEL 0x40
1974#define BTRSW 0x80
1975#define BRXSNR_A 0xff
1976#define BRXSNR_B 0xff00
1977#define BRXSNR_C 0xff0000
1978#define BRXSNR_D 0xff000000
1979#define BSNR_EVMT_LENGTH 8
1980#define BSNR_EVMF_LENGTH 1
1981#define BCSI1ST 0xff
1982#define BCSI2ND 0xff00
1983#define BRXEVM1ST 0xff0000
1984#define BRXEVM2ND 0xff000000
1985#define BSIGEVM 0xff
1986#define BPWDB 0xff00
1987#define BSGIEN 0x10000
1988
1989#define BSFACTOR_QMA1 0xf
1990#define BSFACTOR_QMA2 0xf0
1991#define BSFACTOR_QMA3 0xf00
1992#define BSFACTOR_QMA4 0xf000
1993#define BSFACTOR_QMA5 0xf0000
1994#define BSFACTOR_QMA6 0xf0000
1995#define BSFACTOR_QMA7 0xf00000
1996#define BSFACTOR_QMA8 0xf000000
1997#define BSFACTOR_QMA9 0xf0000000
1998#define BCSI_SCHEME 0x100000
1999
2000#define BNOISE_LVL_TOP_SET 0x3
2001#define BCHSMOOTH 0x4
2002#define BCHSMOOTH_CFG1 0x38
2003#define BCHSMOOTH_CFG2 0x1c0
2004#define BCHSMOOTH_CFG3 0xe00
2005#define BCHSMOOTH_CFG4 0x7000
2006#define BMRCMODE 0x800000
2007#define BTHEVMCFG 0x7000000
2008
2009#define BLOOP_FIT_TYPE 0x1
2010#define BUPD_CFO 0x40
2011#define BUPD_CFO_OFFDATA 0x80
2012#define BADV_UPD_CFO 0x100
2013#define BADV_TIME_CTRL 0x800
2014#define BUPD_CLKO 0x1000
2015#define BFC 0x6000
2016#define BTRACKING_MODE 0x8000
2017#define BPHCMP_ENABLE 0x10000
2018#define BUPD_CLKO_LTF 0x20000
2019#define BCOM_CH_CFO 0x40000
2020#define BCSI_ESTI_MODE 0x80000
2021#define BADV_UPD_EQZ 0x100000
2022#define BUCHCFG 0x7000000
2023#define BUPDEQZ 0x8000000
2024
2025#define BRX_PESUDO_NOISE_ON 0x20000000
2026#define BRX_PESUDO_NOISE_A 0xff
2027#define BRX_PESUDO_NOISE_B 0xff00
2028#define BRX_PESUDO_NOISE_C 0xff0000
2029#define BRX_PESUDO_NOISE_D 0xff000000
2030#define BRX_PESUDO_NOISESTATE_A 0xffff
2031#define BRX_PESUDO_NOISESTATE_B 0xffff0000
2032#define BRX_PESUDO_NOISESTATE_C 0xffff
2033#define BRX_PESUDO_NOISESTATE_D 0xffff0000
2034
2035#define BZEBRA1_HSSIENABLE 0x8
2036#define BZEBRA1_TRXCONTROL 0xc00
2037#define BZEBRA1_TRXGAINSETTING 0x07f
2038#define BZEBRA1_RXCOUNTER 0xc00
2039#define BZEBRA1_TXCHANGEPUMP 0x38
2040#define BZEBRA1_RXCHANGEPUMP 0x7
2041#define BZEBRA1_CHANNEL_NUM 0xf80
2042#define BZEBRA1_TXLPFBW 0x400
2043#define BZEBRA1_RXLPFBW 0x600
2044
2045#define BRTL8256REG_MODE_CTRL1 0x100
2046#define BRTL8256REG_MODE_CTRL0 0x40
2047#define BRTL8256REG_TXLPFBW 0x18
2048#define BRTL8256REG_RXLPFBW 0x600
2049
2050#define BRTL8258_TXLPFBW 0xc
2051#define BRTL8258_RXLPFBW 0xc00
2052#define BRTL8258_RSSILPFBW 0xc0
2053
2054#define BBYTE0 0x1
2055#define BBYTE1 0x2
2056#define BBYTE2 0x4
2057#define BBYTE3 0x8
2058#define BWORD0 0x3
2059#define BWORD1 0xc
2060#define BWORD 0xf
2061
2062#define MASKBYTE0 0xff
2063#define MASKBYTE1 0xff00
2064#define MASKBYTE2 0xff0000
2065#define MASKBYTE3 0xff000000
2066#define MASKHWORD 0xffff0000
2067#define MASKLWORD 0x0000ffff
2068#define MASKDWORD 0xffffffff
2069#define MASK12BITS 0xfff
2070#define MASKH4BITS 0xf0000000
2071#define MASKOFDM_D 0xffc00000
2072#define MASKCCK 0x3f3f3f3f
2073
2074#define MASK4BITS 0x0f
2075#define MASK20BITS 0xfffff
2076#define RFREG_OFFSET_MASK 0xfffff
2077
2078#define BENABLE 0x1
2079#define BDISABLE 0x0
2080
2081#define LEFT_ANTENNA 0x0
2082#define RIGHT_ANTENNA 0x1
2083
2084#define TCHECK_TXSTATUS 500
2085#define TUPDATE_RXCOUNTER 100
2086
2087/* 2 EFUSE_TEST (For RTL8723 partially) */
2088#define EFUSE_SEL(x) (((x) & 0x3) << 8)
2089#define EFUSE_SEL_MASK 0x300
2090#define EFUSE_WIFI_SEL_0 0x0
2091
2092/* Enable GPIO[9] as WiFi HW PDn source*/
2093#define WL_HWPDN_EN BIT(0)
2094/* WiFi HW PDn polarity control*/
2095#define WL_HWPDN_SL BIT(1)
2096
2097#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c
new file mode 100644
index 000000000000..50dd2fb2c93d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.c
@@ -0,0 +1,505 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "reg.h"
32#include "def.h"
33#include "phy.h"
34#include "rf.h"
35#include "dm.h"
36
37void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
38{
39 struct rtl_priv *rtlpriv = rtl_priv(hw);
40 struct rtl_phy *rtlphy = &(rtlpriv->phy);
41
42 switch (bandwidth) {
43 case HT_CHANNEL_WIDTH_20:
44 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
45 0xfffff3ff) | 0x0400);
46 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
47 rtlphy->rfreg_chnlval[0]);
48 break;
49 case HT_CHANNEL_WIDTH_20_40:
50 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
51 0xfffff3ff));
52 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
53 rtlphy->rfreg_chnlval[0]);
54 break;
55 default:
56 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
57 "unknown bandwidth: %#X\n", bandwidth);
58 break;
59 }
60}
61
62void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
63 u8 *ppowerlevel)
64{
65 struct rtl_priv *rtlpriv = rtl_priv(hw);
66 struct rtl_phy *rtlphy = &(rtlpriv->phy);
67 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
68 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
69 u32 tx_agc[2] = {0, 0}, tmpval;
70 bool turbo_scanoff = false;
71 u8 idx1, idx2;
72 u8 *ptr;
73
74 if (rtlefuse->eeprom_regulatory != 0)
75 turbo_scanoff = true;
76
77 if (mac->act_scanning == true) {
78 tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
79 tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
80
81 if (turbo_scanoff) {
82 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
83 tx_agc[idx1] = ppowerlevel[idx1] |
84 (ppowerlevel[idx1] << 8) |
85 (ppowerlevel[idx1] << 16) |
86 (ppowerlevel[idx1] << 24);
87 }
88 }
89 } else {
90 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
91 tx_agc[idx1] = ppowerlevel[idx1] |
92 (ppowerlevel[idx1] << 8) |
93 (ppowerlevel[idx1] << 16) |
94 (ppowerlevel[idx1] << 24);
95 }
96
97 if (rtlefuse->eeprom_regulatory == 0) {
98 tmpval = (rtlphy->mcs_offset[0][6]) +
99 (rtlphy->mcs_offset[0][7] << 8);
100 tx_agc[RF90_PATH_A] += tmpval;
101
102 tmpval = (rtlphy->mcs_offset[0][14]) +
103 (rtlphy->mcs_offset[0][15] << 24);
104 tx_agc[RF90_PATH_B] += tmpval;
105 }
106 }
107
108 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
109 ptr = (u8 *) (&(tx_agc[idx1]));
110 for (idx2 = 0; idx2 < 4; idx2++) {
111 if (*ptr > RF6052_MAX_TX_PWR)
112 *ptr = RF6052_MAX_TX_PWR;
113 ptr++;
114 }
115 }
116
117 tmpval = tx_agc[RF90_PATH_A] & 0xff;
118 rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
119
120 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
121 "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
122 RTXAGC_A_CCK1_MCS32);
123
124 tmpval = tx_agc[RF90_PATH_A] >> 8;
125
126 tmpval = tmpval & 0xff00ffff;
127
128 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
129
130 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
131 "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
132 RTXAGC_B_CCK11_A_CCK2_11);
133
134 tmpval = tx_agc[RF90_PATH_B] >> 24;
135 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
136
137 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
138 "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
139 RTXAGC_B_CCK11_A_CCK2_11);
140
141 tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
142 rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
143
144 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
145 "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
146 RTXAGC_B_CCK1_55_MCS32);
147}
148
149static void rtl8723ae_phy_get_power_base(struct ieee80211_hw *hw,
150 u8 *ppowerlevel, u8 channel,
151 u32 *ofdmbase, u32 *mcsbase)
152{
153 struct rtl_priv *rtlpriv = rtl_priv(hw);
154 struct rtl_phy *rtlphy = &(rtlpriv->phy);
155 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
156 u32 powerBase0, powerBase1;
157 u8 legacy_pwrdiff, ht20_pwrdiff;
158 u8 i, powerlevel[2];
159
160 for (i = 0; i < 2; i++) {
161 powerlevel[i] = ppowerlevel[i];
162 legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
163 powerBase0 = powerlevel[i] + legacy_pwrdiff;
164
165 powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
166 (powerBase0 << 8) | powerBase0;
167 *(ofdmbase + i) = powerBase0;
168 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
169 " [OFDM power base index rf(%c) = 0x%x]\n",
170 ((i == 0) ? 'A' : 'B'), *(ofdmbase + i));
171 }
172
173 for (i = 0; i < 2; i++) {
174 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
175 ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
176 powerlevel[i] += ht20_pwrdiff;
177 }
178 powerBase1 = powerlevel[i];
179 powerBase1 = (powerBase1 << 24) |
180 (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
181
182 *(mcsbase + i) = powerBase1;
183
184 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
185 " [MCS power base index rf(%c) = 0x%x]\n",
186 ((i == 0) ? 'A' : 'B'), *(mcsbase + i));
187 }
188}
189
190static void rtl8723ae_get_txpwr_val_by_reg(struct ieee80211_hw *hw,
191 u8 channel, u8 index,
192 u32 *powerBase0,
193 u32 *powerBase1,
194 u32 *p_outwriteval)
195{
196 struct rtl_priv *rtlpriv = rtl_priv(hw);
197 struct rtl_phy *rtlphy = &(rtlpriv->phy);
198 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
199 u8 i, chnlgroup = 0, pwr_diff_limit[4];
200 u32 writeVal, customer_limit, rf;
201
202 for (rf = 0; rf < 2; rf++) {
203 switch (rtlefuse->eeprom_regulatory) {
204 case 0:
205 chnlgroup = 0;
206
207 writeVal = rtlphy->mcs_offset[chnlgroup]
208 [index + (rf ? 8 : 0)] +
209 ((index < 2) ? powerBase0[rf] :
210 powerBase1[rf]);
211
212 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
213 "RTK better performance, "
214 "writeVal(%c) = 0x%x\n",
215 ((rf == 0) ? 'A' : 'B'), writeVal);
216 break;
217 case 1:
218 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
219 writeVal = ((index < 2) ? powerBase0[rf] :
220 powerBase1[rf]);
221
222 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
223 "Realtek regulatory, 40MHz, "
224 "writeVal(%c) = 0x%x\n",
225 ((rf == 0) ? 'A' : 'B'), writeVal);
226 } else {
227 if (rtlphy->pwrgroup_cnt == 1)
228 chnlgroup = 0;
229 if (rtlphy->pwrgroup_cnt >= 3) {
230 if (channel <= 3)
231 chnlgroup = 0;
232 else if (channel >= 4 && channel <= 9)
233 chnlgroup = 1;
234 else if (channel > 9)
235 chnlgroup = 2;
236 if (rtlphy->current_chan_bw ==
237 HT_CHANNEL_WIDTH_20)
238 chnlgroup++;
239 else
240 chnlgroup += 4;
241 }
242
243 writeVal = rtlphy->mcs_offset[chnlgroup]
244 [index + (rf ? 8 : 0)] + ((index < 2) ?
245 powerBase0[rf] :
246 powerBase1[rf]);
247
248 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
249 "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n",
250 ((rf == 0) ? 'A' : 'B'), writeVal);
251 }
252 break;
253 case 2:
254 writeVal =
255 ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
256
257 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
258 "Better regulatory, writeVal(%c) = 0x%x\n",
259 ((rf == 0) ? 'A' : 'B'), writeVal);
260 break;
261 case 3:
262 chnlgroup = 0;
263
264 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
265 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
266 "customer's limit, 40MHz rf(%c) = 0x%x\n",
267 ((rf == 0) ? 'A' : 'B'),
268 rtlefuse->pwrgroup_ht40[rf][channel-1]);
269 } else {
270 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
271 "customer's limit, 20MHz rf(%c) = 0x%x\n",
272 ((rf == 0) ? 'A' : 'B'),
273 rtlefuse->pwrgroup_ht20[rf][channel-1]);
274 }
275 for (i = 0; i < 4; i++) {
276 pwr_diff_limit[i] =
277 (u8) ((rtlphy->mcs_offset
278 [chnlgroup][index + (rf ? 8 : 0)] &
279 (0x7f << (i * 8))) >> (i * 8));
280
281 if (rtlphy->current_chan_bw ==
282 HT_CHANNEL_WIDTH_20_40) {
283 if (pwr_diff_limit[i] >
284 rtlefuse->
285 pwrgroup_ht40[rf][channel - 1])
286 pwr_diff_limit[i] =
287 rtlefuse->pwrgroup_ht40[rf]
288 [channel - 1];
289 } else {
290 if (pwr_diff_limit[i] >
291 rtlefuse->
292 pwrgroup_ht20[rf][channel - 1])
293 pwr_diff_limit[i] =
294 rtlefuse->pwrgroup_ht20[rf]
295 [channel - 1];
296 }
297 }
298
299 customer_limit = (pwr_diff_limit[3] << 24) |
300 (pwr_diff_limit[2] << 16) |
301 (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
302
303 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
304 "Customer's limit rf(%c) = 0x%x\n",
305 ((rf == 0) ? 'A' : 'B'), customer_limit);
306
307 writeVal = customer_limit +
308 ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
309
310 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
311 "Customer, writeVal rf(%c)= 0x%x\n",
312 ((rf == 0) ? 'A' : 'B'), writeVal);
313 break;
314 default:
315 chnlgroup = 0;
316 writeVal = rtlphy->mcs_offset[chnlgroup][index +
317 (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] :
318 powerBase1[rf]);
319
320 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
321 "RTK better performance, writeVal rf(%c) = 0x%x\n",
322 ((rf == 0) ? 'A' : 'B'), writeVal);
323 break;
324 }
325
326 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
327 writeVal = writeVal - 0x06060606;
328 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
329 TXHIGHPWRLEVEL_BT2)
330 writeVal = writeVal - 0x0c0c0c0c;
331 *(p_outwriteval + rf) = writeVal;
332 }
333}
334
335static void _rtl8723ae_write_ofdm_power_reg(struct ieee80211_hw *hw,
336 u8 index, u32 *pValue)
337{
338 struct rtl_priv *rtlpriv = rtl_priv(hw);
339 struct rtl_phy *rtlphy = &(rtlpriv->phy);
340
341 u16 regoffset_a[6] = {
342 RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
343 RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
344 RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
345 };
346 u16 regoffset_b[6] = {
347 RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
348 RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
349 RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
350 };
351 u8 i, rf, pwr_val[4];
352 u32 writeVal;
353 u16 regoffset;
354
355 for (rf = 0; rf < 2; rf++) {
356 writeVal = pValue[rf];
357 for (i = 0; i < 4; i++) {
358 pwr_val[i] = (u8) ((writeVal & (0x7f <<
359 (i * 8))) >> (i * 8));
360
361 if (pwr_val[i] > RF6052_MAX_TX_PWR)
362 pwr_val[i] = RF6052_MAX_TX_PWR;
363 }
364 writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
365 (pwr_val[1] << 8) | pwr_val[0];
366
367 if (rf == 0)
368 regoffset = regoffset_a[index];
369 else
370 regoffset = regoffset_b[index];
371 rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
372
373 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
374 "Set 0x%x = %08x\n", regoffset, writeVal);
375
376 if (((get_rf_type(rtlphy) == RF_2T2R) &&
377 (regoffset == RTXAGC_A_MCS15_MCS12 ||
378 regoffset == RTXAGC_B_MCS15_MCS12)) ||
379 ((get_rf_type(rtlphy) != RF_2T2R) &&
380 (regoffset == RTXAGC_A_MCS07_MCS04 ||
381 regoffset == RTXAGC_B_MCS07_MCS04))) {
382
383 writeVal = pwr_val[3];
384 if (regoffset == RTXAGC_A_MCS15_MCS12 ||
385 regoffset == RTXAGC_A_MCS07_MCS04)
386 regoffset = 0xc90;
387 if (regoffset == RTXAGC_B_MCS15_MCS12 ||
388 regoffset == RTXAGC_B_MCS07_MCS04)
389 regoffset = 0xc98;
390
391 for (i = 0; i < 3; i++) {
392 writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
393 rtl_write_byte(rtlpriv, (u32) (regoffset + i),
394 (u8) writeVal);
395 }
396 }
397 }
398}
399
400void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
401 u8 *ppowerlevel, u8 channel)
402{
403 u32 writeVal[2], powerBase0[2], powerBase1[2];
404 u8 index;
405
406 rtl8723ae_phy_get_power_base(hw, ppowerlevel,
407 channel, &powerBase0[0], &powerBase1[0]);
408
409 for (index = 0; index < 6; index++) {
410 rtl8723ae_get_txpwr_val_by_reg(hw, channel, index,
411 &powerBase0[0],
412 &powerBase1[0],
413 &writeVal[0]);
414
415 _rtl8723ae_write_ofdm_power_reg(hw, index, &writeVal[0]);
416 }
417}
418
419static bool _rtl8723ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
420{
421 struct rtl_priv *rtlpriv = rtl_priv(hw);
422 struct rtl_phy *rtlphy = &(rtlpriv->phy);
423 u32 u4_regvalue = 0;
424 u8 rfpath;
425 bool rtstatus = true;
426 struct bb_reg_def *pphyreg;
427
428 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
429
430 pphyreg = &rtlphy->phyreg_def[rfpath];
431
432 switch (rfpath) {
433 case RF90_PATH_A:
434 case RF90_PATH_C:
435 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
436 BRFSI_RFENV);
437 break;
438 case RF90_PATH_B:
439 case RF90_PATH_D:
440 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
441 BRFSI_RFENV << 16);
442 break;
443 }
444
445 rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
446 udelay(1);
447
448 rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
449 udelay(1);
450
451 rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
452 B3WIREADDREAALENGTH, 0x0);
453 udelay(1);
454
455 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
456 udelay(1);
457
458 switch (rfpath) {
459 case RF90_PATH_A:
460 rtstatus = rtl8723ae_phy_config_rf_with_headerfile(hw,
461 (enum radio_path)rfpath);
462 break;
463 case RF90_PATH_B:
464 rtstatus = rtl8723ae_phy_config_rf_with_headerfile(hw,
465 (enum radio_path)rfpath);
466 break;
467 case RF90_PATH_C:
468 break;
469 case RF90_PATH_D:
470 break;
471 }
472 switch (rfpath) {
473 case RF90_PATH_A:
474 case RF90_PATH_C:
475 rtl_set_bbreg(hw, pphyreg->rfintfs,
476 BRFSI_RFENV, u4_regvalue);
477 break;
478 case RF90_PATH_B:
479 case RF90_PATH_D:
480 rtl_set_bbreg(hw, pphyreg->rfintfs,
481 BRFSI_RFENV << 16, u4_regvalue);
482 break;
483 }
484 if (rtstatus != true) {
485 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
486 "Radio[%d] Fail!!", rfpath);
487 return false;
488 }
489 }
490 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
491 return rtstatus;
492}
493
494bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw)
495{
496 struct rtl_priv *rtlpriv = rtl_priv(hw);
497 struct rtl_phy *rtlphy = &(rtlpriv->phy);
498
499 if (rtlphy->rf_type == RF_1T1R)
500 rtlphy->num_total_rfpath = 1;
501 else
502 rtlphy->num_total_rfpath = 2;
503
504 return _rtl8723ae_phy_rf6052_config_parafile(hw);
505}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
new file mode 100644
index 000000000000..d0f9dd79abea
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
@@ -0,0 +1,43 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_RF_H__
31#define __RTL8723E_RF_H__
32
33#define RF6052_MAX_TX_PWR 0x3F
34
35extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
36 u8 bandwidth);
37extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
38 u8 *ppowerlevel);
39extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
40 u8 *ppowerlevel, u8 channel);
41extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
42
43#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
new file mode 100644
index 000000000000..18b0bc51766b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -0,0 +1,380 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include <linux/vmalloc.h>
32#include <linux/module.h>
33
34#include "../core.h"
35#include "../pci.h"
36#include "reg.h"
37#include "def.h"
38#include "phy.h"
39#include "dm.h"
40#include "hw.h"
41#include "sw.h"
42#include "trx.h"
43#include "led.h"
44#include "table.h"
45#include "hal_btc.h"
46
47static void rtl8723ae_init_aspm_vars(struct ieee80211_hw *hw)
48{
49 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
50
51 /*close ASPM for AMD defaultly */
52 rtlpci->const_amdpci_aspm = 0;
53
54 /* ASPM PS mode.
55 * 0 - Disable ASPM,
56 * 1 - Enable ASPM without Clock Req,
57 * 2 - Enable ASPM with Clock Req,
58 * 3 - Alwyas Enable ASPM with Clock Req,
59 * 4 - Always Enable ASPM without Clock Req.
60 * set defult to RTL8192CE:3 RTL8192E:2
61 */
62 rtlpci->const_pci_aspm = 3;
63
64 /*Setting for PCI-E device */
65 rtlpci->const_devicepci_aspm_setting = 0x03;
66
67 /*Setting for PCI-E bridge */
68 rtlpci->const_hostpci_aspm_setting = 0x02;
69
70 /* In Hw/Sw Radio Off situation.
71 * 0 - Default,
72 * 1 - From ASPM setting without low Mac Pwr,
73 * 2 - From ASPM setting with low Mac Pwr,
74 * 3 - Bus D3
75 * set default to RTL8192CE:0 RTL8192SE:2
76 */
77 rtlpci->const_hwsw_rfoff_d3 = 0;
78
79 /* This setting works for those device with
80 * backdoor ASPM setting such as EPHY setting.
81 * 0 - Not support ASPM,
82 * 1 - Support ASPM,
83 * 2 - According to chipset.
84 */
85 rtlpci->const_support_pciaspm = 1;
86}
87
88int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw)
89{
90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
92 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
93 int err;
94
95 rtl8723ae_bt_reg_init(hw);
96 rtlpriv->dm.dm_initialgain_enable = 1;
97 rtlpriv->dm.dm_flag = 0;
98 rtlpriv->dm.disable_framebursting = 0;
99 rtlpriv->dm.thermalvalue = 0;
100 rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
101
102 /* compatible 5G band 88ce just 2.4G band & smsp */
103 rtlpriv->rtlhal.current_bandtype = BAND_ON_2_4G;
104 rtlpriv->rtlhal.bandset = BAND_ON_2_4G;
105 rtlpriv->rtlhal.macphymode = SINGLEMAC_SINGLEPHY;
106
107 rtlpci->receive_config = (RCR_APPFCS |
108 RCR_APP_MIC |
109 RCR_APP_ICV |
110 RCR_APP_PHYST_RXFF |
111 RCR_HTC_LOC_CTRL |
112 RCR_AMF |
113 RCR_ACF |
114 RCR_ADF |
115 RCR_AICV |
116 RCR_AB |
117 RCR_AM |
118 RCR_APM |
119 0);
120
121 rtlpci->irq_mask[0] =
122 (u32) (PHIMR_ROK |
123 PHIMR_RDU |
124 PHIMR_VODOK |
125 PHIMR_VIDOK |
126 PHIMR_BEDOK |
127 PHIMR_BKDOK |
128 PHIMR_MGNTDOK |
129 PHIMR_HIGHDOK |
130 PHIMR_C2HCMD |
131 PHIMR_HISRE_IND |
132 PHIMR_TSF_BIT32_TOGGLE |
133 PHIMR_TXBCNOK |
134 PHIMR_PSTIMEOUT |
135 0);
136
137 rtlpci->irq_mask[1] = (u32)(PHIMR_RXFOVW | 0);
138
139 /* for debug level */
140 rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
141 /* for LPS & IPS */
142 rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
143 rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
144 rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
145 rtlpriv->psc.reg_fwctrl_lps = 3;
146 rtlpriv->psc.reg_max_lps_awakeintvl = 5;
147 /* for ASPM, you can close aspm through
148 * set const_support_pciaspm = 0
149 */
150 rtl8723ae_init_aspm_vars(hw);
151
152 if (rtlpriv->psc.reg_fwctrl_lps == 1)
153 rtlpriv->psc.fwctrl_psmode = FW_PS_MIN_MODE;
154 else if (rtlpriv->psc.reg_fwctrl_lps == 2)
155 rtlpriv->psc.fwctrl_psmode = FW_PS_MAX_MODE;
156 else if (rtlpriv->psc.reg_fwctrl_lps == 3)
157 rtlpriv->psc.fwctrl_psmode = FW_PS_DTIM_MODE;
158
159 /* for firmware buf */
160 rtlpriv->rtlhal.pfirmware = vmalloc(0x6000);
161 if (!rtlpriv->rtlhal.pfirmware) {
162 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
163 "Can't alloc buffer for fw.\n");
164 return 1;
165 }
166
167 if (IS_VENDOR_8723_A_CUT(rtlhal->version))
168 rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw.bin";
169 else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version))
170 rtlpriv->cfg->fw_name = "rtlwifi/rtl8723fw_B.bin";
171
172 rtlpriv->max_fw_size = 0x6000;
173 pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name);
174 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
175 rtlpriv->io.dev, GFP_KERNEL, hw,
176 rtl_fw_cb);
177 if (err) {
178 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
179 "Failed to request firmware!\n");
180 return 1;
181 }
182 return 0;
183}
184
185void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw)
186{
187 struct rtl_priv *rtlpriv = rtl_priv(hw);
188
189 if (rtlpriv->rtlhal.pfirmware) {
190 vfree(rtlpriv->rtlhal.pfirmware);
191 rtlpriv->rtlhal.pfirmware = NULL;
192 }
193}
194
195static struct rtl_hal_ops rtl8723ae_hal_ops = {
196 .init_sw_vars = rtl8723ae_init_sw_vars,
197 .deinit_sw_vars = rtl8723ae_deinit_sw_vars,
198 .read_eeprom_info = rtl8723ae_read_eeprom_info,
199 .interrupt_recognized = rtl8723ae_interrupt_recognized,
200 .hw_init = rtl8723ae_hw_init,
201 .hw_disable = rtl8723ae_card_disable,
202 .hw_suspend = rtl8723ae_suspend,
203 .hw_resume = rtl8723ae_resume,
204 .enable_interrupt = rtl8723ae_enable_interrupt,
205 .disable_interrupt = rtl8723ae_disable_interrupt,
206 .set_network_type = rtl8723ae_set_network_type,
207 .set_chk_bssid = rtl8723ae_set_check_bssid,
208 .set_qos = rtl8723ae_set_qos,
209 .set_bcn_reg = rtl8723ae_set_beacon_related_registers,
210 .set_bcn_intv = rtl8723ae_set_beacon_interval,
211 .update_interrupt_mask = rtl8723ae_update_interrupt_mask,
212 .get_hw_reg = rtl8723ae_get_hw_reg,
213 .set_hw_reg = rtl8723ae_set_hw_reg,
214 .update_rate_tbl = rtl8723ae_update_hal_rate_tbl,
215 .fill_tx_desc = rtl8723ae_tx_fill_desc,
216 .fill_tx_cmddesc = rtl8723ae_tx_fill_cmddesc,
217 .query_rx_desc = rtl8723ae_rx_query_desc,
218 .set_channel_access = rtl8723ae_update_channel_access_setting,
219 .radio_onoff_checking = rtl8723ae_gpio_radio_on_off_checking,
220 .set_bw_mode = rtl8723ae_phy_set_bw_mode,
221 .switch_channel = rtl8723ae_phy_sw_chnl,
222 .dm_watchdog = rtl8723ae_dm_watchdog,
223 .scan_operation_backup = rtl8723ae_phy_scan_operation_backup,
224 .set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
225 .led_control = rtl8723ae_led_control,
226 .set_desc = rtl8723ae_set_desc,
227 .get_desc = rtl8723ae_get_desc,
228 .tx_polling = rtl8723ae_tx_polling,
229 .enable_hw_sec = rtl8723ae_enable_hw_security_config,
230 .set_key = rtl8723ae_set_key,
231 .init_sw_leds = rtl8723ae_init_sw_leds,
232 .allow_all_destaddr = rtl8723ae_allow_all_destaddr,
233 .get_bbreg = rtl8723ae_phy_query_bb_reg,
234 .set_bbreg = rtl8723ae_phy_set_bb_reg,
235 .get_rfreg = rtl8723ae_phy_query_rf_reg,
236 .set_rfreg = rtl8723ae_phy_set_rf_reg,
237 .c2h_command_handle = rtl_8723e_c2h_command_handle,
238 .bt_wifi_media_status_notify = rtl_8723e_bt_wifi_media_status_notify,
239 .bt_coex_off_before_lps = rtl8723ae_bt_coex_off_before_lps,
240};
241
242static struct rtl_mod_params rtl8723ae_mod_params = {
243 .sw_crypto = false,
244 .inactiveps = true,
245 .swctrl_lps = false,
246 .fwctrl_lps = true,
247 .debug = DBG_EMERG,
248};
249
250static struct rtl_hal_cfg rtl8723ae_hal_cfg = {
251 .bar_id = 2,
252 .write_readback = true,
253 .name = "rtl8723ae_pci",
254 .fw_name = "rtlwifi/rtl8723aefw.bin",
255 .ops = &rtl8723ae_hal_ops,
256 .mod_params = &rtl8723ae_mod_params,
257 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
258 .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
259 .maps[SYS_CLK] = REG_SYS_CLKR,
260 .maps[MAC_RCR_AM] = AM,
261 .maps[MAC_RCR_AB] = AB,
262 .maps[MAC_RCR_ACRC32] = ACRC32,
263 .maps[MAC_RCR_ACF] = ACF,
264 .maps[MAC_RCR_AAP] = AAP,
265 .maps[EFUSE_TEST] = REG_EFUSE_TEST,
266 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
267 .maps[EFUSE_CLK] = 0,
268 .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
269 .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
270 .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
271 .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
272 .maps[EFUSE_ANA8M] = ANA8M,
273 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
274 .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
275 .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
276 .maps[EFUSE_OOB_PROTECT_BYTES_LEN] = EFUSE_OOB_PROTECT_BYTES,
277
278 .maps[RWCAM] = REG_CAMCMD,
279 .maps[WCAMI] = REG_CAMWRITE,
280 .maps[RCAMO] = REG_CAMREAD,
281 .maps[CAMDBG] = REG_CAMDBG,
282 .maps[SECR] = REG_SECCFG,
283 .maps[SEC_CAM_NONE] = CAM_NONE,
284 .maps[SEC_CAM_WEP40] = CAM_WEP40,
285 .maps[SEC_CAM_TKIP] = CAM_TKIP,
286 .maps[SEC_CAM_AES] = CAM_AES,
287 .maps[SEC_CAM_WEP104] = CAM_WEP104,
288
289 .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
290 .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
291 .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
292 .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
293 .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
294 .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
295 .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
296 .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
297 .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
298 .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
299 .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
300 .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
301 .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
302 .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
303 .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
304 .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
305
306 .maps[RTL_IMR_TXFOVW] = PHIMR_TXFOVW,
307 .maps[RTL_IMR_PSTIMEOUT] = PHIMR_PSTIMEOUT,
308 .maps[RTL_IMR_BcnInt] = PHIMR_BCNDMAINT0,
309 .maps[RTL_IMR_RXFOVW] = PHIMR_RXFOVW,
310 .maps[RTL_IMR_RDU] = PHIMR_RDU,
311 .maps[RTL_IMR_ATIMEND] = PHIMR_ATIMEND_E,
312 .maps[RTL_IMR_BDOK] = PHIMR_BCNDOK0,
313 .maps[RTL_IMR_MGNTDOK] = PHIMR_MGNTDOK,
314 .maps[RTL_IMR_TBDER] = PHIMR_TXBCNERR,
315 .maps[RTL_IMR_HIGHDOK] = PHIMR_HIGHDOK,
316 .maps[RTL_IMR_TBDOK] = PHIMR_TXBCNOK,
317 .maps[RTL_IMR_BKDOK] = PHIMR_BKDOK,
318 .maps[RTL_IMR_BEDOK] = PHIMR_BEDOK,
319 .maps[RTL_IMR_VIDOK] = PHIMR_VIDOK,
320 .maps[RTL_IMR_VODOK] = PHIMR_VODOK,
321 .maps[RTL_IMR_ROK] = PHIMR_ROK,
322 .maps[RTL_IBSS_INT_MASKS] = (PHIMR_BCNDMAINT0 |
323 PHIMR_TXBCNOK | PHIMR_TXBCNERR),
324 .maps[RTL_IMR_C2HCMD] = PHIMR_C2HCMD,
325
326
327 .maps[RTL_RC_CCK_RATE1M] = DESC92_RATE1M,
328 .maps[RTL_RC_CCK_RATE2M] = DESC92_RATE2M,
329 .maps[RTL_RC_CCK_RATE5_5M] = DESC92_RATE5_5M,
330 .maps[RTL_RC_CCK_RATE11M] = DESC92_RATE11M,
331 .maps[RTL_RC_OFDM_RATE6M] = DESC92_RATE6M,
332 .maps[RTL_RC_OFDM_RATE9M] = DESC92_RATE9M,
333 .maps[RTL_RC_OFDM_RATE12M] = DESC92_RATE12M,
334 .maps[RTL_RC_OFDM_RATE18M] = DESC92_RATE18M,
335 .maps[RTL_RC_OFDM_RATE24M] = DESC92_RATE24M,
336 .maps[RTL_RC_OFDM_RATE36M] = DESC92_RATE36M,
337 .maps[RTL_RC_OFDM_RATE48M] = DESC92_RATE48M,
338 .maps[RTL_RC_OFDM_RATE54M] = DESC92_RATE54M,
339
340 .maps[RTL_RC_HT_RATEMCS7] = DESC92_RATEMCS7,
341 .maps[RTL_RC_HT_RATEMCS15] = DESC92_RATEMCS15,
342};
343
344static struct pci_device_id rtl8723ae_pci_ids[] __devinitdata = {
345 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723ae_hal_cfg)},
346 {},
347};
348
349MODULE_DEVICE_TABLE(pci, rtl8723ae_pci_ids);
350
351MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
352MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
353MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
354MODULE_LICENSE("GPL");
355MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
356MODULE_FIRMWARE("rtlwifi/rtl8723aefw.bin");
357MODULE_FIRMWARE("rtlwifi/rtl8723aefw_B.bin");
358
359module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444);
360module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444);
361module_param_named(ips, rtl8723ae_mod_params.inactiveps, bool, 0444);
362module_param_named(swlps, rtl8723ae_mod_params.swctrl_lps, bool, 0444);
363module_param_named(fwlps, rtl8723ae_mod_params.fwctrl_lps, bool, 0444);
364MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
365MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
366MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
367MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
368MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
369
370static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
371
372static struct pci_driver rtl8723ae_driver = {
373 .name = KBUILD_MODNAME,
374 .id_table = rtl8723ae_pci_ids,
375 .probe = rtl_pci_probe,
376 .remove = rtl_pci_disconnect,
377 .driver.pm = &rtlwifi_pm_ops,
378};
379
380module_pci_driver(rtl8723ae_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h
new file mode 100644
index 000000000000..fc4fde5e3eb5
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.h
@@ -0,0 +1,37 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_SW_H__
31#define __RTL8723E_SW_H__
32
33int rtl8723ae_init_sw_vars(struct ieee80211_hw *hw);
34void rtl8723ae_deinit_sw_vars(struct ieee80211_hw *hw);
35void rtl8723ae_init_var_map(struct ieee80211_hw *hw);
36
37#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/rtlwifi/rtl8723ae/table.c
new file mode 100644
index 000000000000..9b0b50cc4ade
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/table.c
@@ -0,0 +1,738 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Created on 2010/ 5/18, 1:41
27 *
28 * Larry Finger <Larry.Finger@lwfinger.net>
29 *
30 *****************************************************************************/
31
32#include "table.h"
33
34u32 RTL8723EPHY_REG_1TARRAY[RTL8723E_PHY_REG_1TARRAY_LENGTH] = {
35 0x800, 0x80040000,
36 0x804, 0x00000003,
37 0x808, 0x0000fc00,
38 0x80c, 0x0000000a,
39 0x810, 0x10005388,
40 0x814, 0x020c3d10,
41 0x818, 0x02200385,
42 0x81c, 0x00000000,
43 0x820, 0x01000100,
44 0x824, 0x00390004,
45 0x828, 0x00000000,
46 0x82c, 0x00000000,
47 0x830, 0x00000000,
48 0x834, 0x00000000,
49 0x838, 0x00000000,
50 0x83c, 0x00000000,
51 0x840, 0x00010000,
52 0x844, 0x00000000,
53 0x848, 0x00000000,
54 0x84c, 0x00000000,
55 0x850, 0x00000000,
56 0x854, 0x00000000,
57 0x858, 0x569a569a,
58 0x85c, 0x001b25a4,
59 0x860, 0x66f60110,
60 0x864, 0x061f0130,
61 0x868, 0x00000000,
62 0x86c, 0x32323200,
63 0x870, 0x07000760,
64 0x874, 0x22004000,
65 0x878, 0x00000808,
66 0x87c, 0x00000000,
67 0x880, 0xc0083070,
68 0x884, 0x000004d5,
69 0x888, 0x00000000,
70 0x88c, 0xccc000c0,
71 0x890, 0x00000800,
72 0x894, 0xfffffffe,
73 0x898, 0x40302010,
74 0x89c, 0x00706050,
75 0x900, 0x00000000,
76 0x904, 0x00000023,
77 0x908, 0x00000000,
78 0x90c, 0x81121111,
79 0xa00, 0x00d047c8,
80 0xa04, 0x80ff000c,
81 0xa08, 0x8c838300,
82 0xa0c, 0x2e68120f,
83 0xa10, 0x9500bb78,
84 0xa14, 0x11144028,
85 0xa18, 0x00881117,
86 0xa1c, 0x89140f00,
87 0xa20, 0x1a1b0000,
88 0xa24, 0x090e1317,
89 0xa28, 0x00000204,
90 0xa2c, 0x00d30000,
91 0xa70, 0x101fbf00,
92 0xa74, 0x00000007,
93 0xa78, 0x00000900,
94 0xc00, 0x48071d40,
95 0xc04, 0x03a05611,
96 0xc08, 0x000000e4,
97 0xc0c, 0x6c6c6c6c,
98 0xc10, 0x08800000,
99 0xc14, 0x40000100,
100 0xc18, 0x08800000,
101 0xc1c, 0x40000100,
102 0xc20, 0x00000000,
103 0xc24, 0x00000000,
104 0xc28, 0x00000000,
105 0xc2c, 0x00000000,
106 0xc30, 0x69e9ac44,
107 0xc34, 0x469652cf,
108 0xc38, 0x49795994,
109 0xc3c, 0x0a97971c,
110 0xc40, 0x1f7c403f,
111 0xc44, 0x000100b7,
112 0xc48, 0xec020107,
113 0xc4c, 0x007f037f,
114 0xc50, 0x69543420,
115 0xc54, 0x43bc0094,
116 0xc58, 0x69543420,
117 0xc5c, 0x433c0094,
118 0xc60, 0x00000000,
119 0xc64, 0x7116848b,
120 0xc68, 0x47c00bff,
121 0xc6c, 0x00000036,
122 0xc70, 0x2c7f000d,
123 0xc74, 0x018610db,
124 0xc78, 0x0000001f,
125 0xc7c, 0x00b91612,
126 0xc80, 0x40000100,
127 0xc84, 0x20f60000,
128 0xc88, 0x40000100,
129 0xc8c, 0x20200000,
130 0xc90, 0x00121820,
131 0xc94, 0x00000000,
132 0xc98, 0x00121820,
133 0xc9c, 0x00007f7f,
134 0xca0, 0x00000000,
135 0xca4, 0x00000080,
136 0xca8, 0x00000000,
137 0xcac, 0x00000000,
138 0xcb0, 0x00000000,
139 0xcb4, 0x00000000,
140 0xcb8, 0x00000000,
141 0xcbc, 0x28000000,
142 0xcc0, 0x00000000,
143 0xcc4, 0x00000000,
144 0xcc8, 0x00000000,
145 0xccc, 0x00000000,
146 0xcd0, 0x00000000,
147 0xcd4, 0x00000000,
148 0xcd8, 0x64b22427,
149 0xcdc, 0x00766932,
150 0xce0, 0x00222222,
151 0xce4, 0x00000000,
152 0xce8, 0x37644302,
153 0xcec, 0x2f97d40c,
154 0xd00, 0x00080740,
155 0xd04, 0x00020401,
156 0xd08, 0x0000907f,
157 0xd0c, 0x20010201,
158 0xd10, 0xa0633333,
159 0xd14, 0x3333bc43,
160 0xd18, 0x7a8f5b6b,
161 0xd2c, 0xcc979975,
162 0xd30, 0x00000000,
163 0xd34, 0x80608000,
164 0xd38, 0x00000000,
165 0xd3c, 0x00027293,
166 0xd40, 0x00000000,
167 0xd44, 0x00000000,
168 0xd48, 0x00000000,
169 0xd4c, 0x00000000,
170 0xd50, 0x6437140a,
171 0xd54, 0x00000000,
172 0xd58, 0x00000000,
173 0xd5c, 0x30032064,
174 0xd60, 0x4653de68,
175 0xd64, 0x04518a3c,
176 0xd68, 0x00002101,
177 0xd6c, 0x2a201c16,
178 0xd70, 0x1812362e,
179 0xd74, 0x322c2220,
180 0xd78, 0x000e3c24,
181 0xe00, 0x2a2a2a2a,
182 0xe04, 0x2a2a2a2a,
183 0xe08, 0x03902a2a,
184 0xe10, 0x2a2a2a2a,
185 0xe14, 0x2a2a2a2a,
186 0xe18, 0x2a2a2a2a,
187 0xe1c, 0x2a2a2a2a,
188 0xe28, 0x00000000,
189 0xe30, 0x1000dc1f,
190 0xe34, 0x10008c1f,
191 0xe38, 0x02140102,
192 0xe3c, 0x681604c2,
193 0xe40, 0x01007c00,
194 0xe44, 0x01004800,
195 0xe48, 0xfb000000,
196 0xe4c, 0x000028d1,
197 0xe50, 0x1000dc1f,
198 0xe54, 0x10008c1f,
199 0xe58, 0x02140102,
200 0xe5c, 0x28160d05,
201 0xe60, 0x00000008,
202 0xe68, 0x001b25a4,
203 0xe6c, 0x631b25a0,
204 0xe70, 0x631b25a0,
205 0xe74, 0x081b25a0,
206 0xe78, 0x081b25a0,
207 0xe7c, 0x081b25a0,
208 0xe80, 0x081b25a0,
209 0xe84, 0x631b25a0,
210 0xe88, 0x081b25a0,
211 0xe8c, 0x631b25a0,
212 0xed0, 0x631b25a0,
213 0xed4, 0x631b25a0,
214 0xed8, 0x631b25a0,
215 0xedc, 0x001b25a0,
216 0xee0, 0x001b25a0,
217 0xeec, 0x6b1b25a0,
218 0xf14, 0x00000003,
219 0xf4c, 0x00000000,
220 0xf00, 0x00000300,
221};
222
223u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH] = {
224 0xe00, 0xffffffff, 0x0a0c0c0c,
225 0xe04, 0xffffffff, 0x02040608,
226 0xe08, 0x0000ff00, 0x00000000,
227 0x86c, 0xffffff00, 0x00000000,
228 0xe10, 0xffffffff, 0x0a0c0d0e,
229 0xe14, 0xffffffff, 0x02040608,
230 0xe18, 0xffffffff, 0x0a0c0d0e,
231 0xe1c, 0xffffffff, 0x02040608,
232 0x830, 0xffffffff, 0x0a0c0c0c,
233 0x834, 0xffffffff, 0x02040608,
234 0x838, 0xffffff00, 0x00000000,
235 0x86c, 0x000000ff, 0x00000000,
236 0x83c, 0xffffffff, 0x0a0c0d0e,
237 0x848, 0xffffffff, 0x02040608,
238 0x84c, 0xffffffff, 0x0a0c0d0e,
239 0x868, 0xffffffff, 0x02040608,
240 0xe00, 0xffffffff, 0x00000000,
241 0xe04, 0xffffffff, 0x00000000,
242 0xe08, 0x0000ff00, 0x00000000,
243 0x86c, 0xffffff00, 0x00000000,
244 0xe10, 0xffffffff, 0x00000000,
245 0xe14, 0xffffffff, 0x00000000,
246 0xe18, 0xffffffff, 0x00000000,
247 0xe1c, 0xffffffff, 0x00000000,
248 0x830, 0xffffffff, 0x00000000,
249 0x834, 0xffffffff, 0x00000000,
250 0x838, 0xffffff00, 0x00000000,
251 0x86c, 0x000000ff, 0x00000000,
252 0x83c, 0xffffffff, 0x00000000,
253 0x848, 0xffffffff, 0x00000000,
254 0x84c, 0xffffffff, 0x00000000,
255 0x868, 0xffffffff, 0x00000000,
256 0xe00, 0xffffffff, 0x04040404,
257 0xe04, 0xffffffff, 0x00020204,
258 0xe08, 0x0000ff00, 0x00000000,
259 0x86c, 0xffffff00, 0x00000000,
260 0xe10, 0xffffffff, 0x06060606,
261 0xe14, 0xffffffff, 0x00020406,
262 0xe18, 0xffffffff, 0x00000000,
263 0xe1c, 0xffffffff, 0x00000000,
264 0x830, 0xffffffff, 0x04040404,
265 0x834, 0xffffffff, 0x00020204,
266 0x838, 0xffffff00, 0x00000000,
267 0x86c, 0x000000ff, 0x00000000,
268 0x83c, 0xffffffff, 0x06060606,
269 0x848, 0xffffffff, 0x00020406,
270 0x84c, 0xffffffff, 0x00000000,
271 0x868, 0xffffffff, 0x00000000,
272 0xe00, 0xffffffff, 0x00000000,
273 0xe04, 0xffffffff, 0x00000000,
274 0xe08, 0x0000ff00, 0x00000000,
275 0x86c, 0xffffff00, 0x00000000,
276 0xe10, 0xffffffff, 0x00000000,
277 0xe14, 0xffffffff, 0x00000000,
278 0xe18, 0xffffffff, 0x00000000,
279 0xe1c, 0xffffffff, 0x00000000,
280 0x830, 0xffffffff, 0x00000000,
281 0x834, 0xffffffff, 0x00000000,
282 0x838, 0xffffff00, 0x00000000,
283 0x86c, 0x000000ff, 0x00000000,
284 0x83c, 0xffffffff, 0x00000000,
285 0x848, 0xffffffff, 0x00000000,
286 0x84c, 0xffffffff, 0x00000000,
287 0x868, 0xffffffff, 0x00000000,
288 0xe00, 0xffffffff, 0x00000000,
289 0xe04, 0xffffffff, 0x00000000,
290 0xe08, 0x0000ff00, 0x00000000,
291 0x86c, 0xffffff00, 0x00000000,
292 0xe10, 0xffffffff, 0x00000000,
293 0xe14, 0xffffffff, 0x00000000,
294 0xe18, 0xffffffff, 0x00000000,
295 0xe1c, 0xffffffff, 0x00000000,
296 0x830, 0xffffffff, 0x00000000,
297 0x834, 0xffffffff, 0x00000000,
298 0x838, 0xffffff00, 0x00000000,
299 0x86c, 0x000000ff, 0x00000000,
300 0x83c, 0xffffffff, 0x00000000,
301 0x848, 0xffffffff, 0x00000000,
302 0x84c, 0xffffffff, 0x00000000,
303 0x868, 0xffffffff, 0x00000000,
304 0xe00, 0xffffffff, 0x04040404,
305 0xe04, 0xffffffff, 0x00020204,
306 0xe08, 0x0000ff00, 0x00000000,
307 0x86c, 0xffffff00, 0x00000000,
308 0xe10, 0xffffffff, 0x00000000,
309 0xe14, 0xffffffff, 0x00000000,
310 0xe18, 0xffffffff, 0x00000000,
311 0xe1c, 0xffffffff, 0x00000000,
312 0x830, 0xffffffff, 0x04040404,
313 0x834, 0xffffffff, 0x00020204,
314 0x838, 0xffffff00, 0x00000000,
315 0x86c, 0x000000ff, 0x00000000,
316 0x83c, 0xffffffff, 0x00000000,
317 0x848, 0xffffffff, 0x00000000,
318 0x84c, 0xffffffff, 0x00000000,
319 0x868, 0xffffffff, 0x00000000,
320 0xe00, 0xffffffff, 0x00000000,
321 0xe04, 0xffffffff, 0x00000000,
322 0xe08, 0x0000ff00, 0x00000000,
323 0x86c, 0xffffff00, 0x00000000,
324 0xe10, 0xffffffff, 0x00000000,
325 0xe14, 0xffffffff, 0x00000000,
326 0xe18, 0xffffffff, 0x00000000,
327 0xe1c, 0xffffffff, 0x00000000,
328 0x830, 0xffffffff, 0x00000000,
329 0x834, 0xffffffff, 0x00000000,
330 0x838, 0xffffff00, 0x00000000,
331 0x86c, 0x000000ff, 0x00000000,
332 0x83c, 0xffffffff, 0x00000000,
333 0x848, 0xffffffff, 0x00000000,
334 0x84c, 0xffffffff, 0x00000000,
335 0x868, 0xffffffff, 0x00000000,
336};
337
338u32 RTL8723E_RADIOA_1TARRAY[Rtl8723ERADIOA_1TARRAYLENGTH] = {
339 0x000, 0x00030159,
340 0x001, 0x00031284,
341 0x002, 0x00098000,
342 0x003, 0x00018c63,
343 0x004, 0x000210e7,
344 0x009, 0x0002044f,
345 0x00a, 0x0001a3f1,
346 0x00b, 0x00014787,
347 0x00c, 0x000896fe,
348 0x00d, 0x0000e02c,
349 0x00e, 0x00039ce7,
350 0x00f, 0x00000451,
351 0x019, 0x00000000,
352 0x01a, 0x00030355,
353 0x01b, 0x00060a00,
354 0x01c, 0x000fc378,
355 0x01d, 0x000a1250,
356 0x01e, 0x0004445f,
357 0x01f, 0x00080001,
358 0x020, 0x0000b614,
359 0x021, 0x0006c000,
360 0x022, 0x00000000,
361 0x023, 0x00001558,
362 0x024, 0x00000060,
363 0x025, 0x00000483,
364 0x026, 0x0004f000,
365 0x027, 0x000ec7d9,
366 0x028, 0x00057730,
367 0x029, 0x00004783,
368 0x02a, 0x00000001,
369 0x02b, 0x00021334,
370 0x02a, 0x00000000,
371 0x02b, 0x00000054,
372 0x02a, 0x00000001,
373 0x02b, 0x00000808,
374 0x02b, 0x00053333,
375 0x02c, 0x0000000c,
376 0x02a, 0x00000002,
377 0x02b, 0x00000808,
378 0x02b, 0x0005b333,
379 0x02c, 0x0000000d,
380 0x02a, 0x00000003,
381 0x02b, 0x00000808,
382 0x02b, 0x00063333,
383 0x02c, 0x0000000d,
384 0x02a, 0x00000004,
385 0x02b, 0x00000808,
386 0x02b, 0x0006b333,
387 0x02c, 0x0000000d,
388 0x02a, 0x00000005,
389 0x02b, 0x00000808,
390 0x02b, 0x00073333,
391 0x02c, 0x0000000d,
392 0x02a, 0x00000006,
393 0x02b, 0x00000709,
394 0x02b, 0x0005b333,
395 0x02c, 0x0000000d,
396 0x02a, 0x00000007,
397 0x02b, 0x00000709,
398 0x02b, 0x00063333,
399 0x02c, 0x0000000d,
400 0x02a, 0x00000008,
401 0x02b, 0x0000060a,
402 0x02b, 0x0004b333,
403 0x02c, 0x0000000d,
404 0x02a, 0x00000009,
405 0x02b, 0x0000060a,
406 0x02b, 0x00053333,
407 0x02c, 0x0000000d,
408 0x02a, 0x0000000a,
409 0x02b, 0x0000060a,
410 0x02b, 0x0005b333,
411 0x02c, 0x0000000d,
412 0x02a, 0x0000000b,
413 0x02b, 0x0000060a,
414 0x02b, 0x00063333,
415 0x02c, 0x0000000d,
416 0x02a, 0x0000000c,
417 0x02b, 0x0000060a,
418 0x02b, 0x0006b333,
419 0x02c, 0x0000000d,
420 0x02a, 0x0000000d,
421 0x02b, 0x0000060a,
422 0x02b, 0x00073333,
423 0x02c, 0x0000000d,
424 0x02a, 0x0000000e,
425 0x02b, 0x0000050b,
426 0x02b, 0x00066666,
427 0x02c, 0x0000001a,
428 0x02a, 0x000e0000,
429 0x010, 0x0004000f,
430 0x011, 0x000e31fc,
431 0x010, 0x0006000f,
432 0x011, 0x000ff9f8,
433 0x010, 0x0002000f,
434 0x011, 0x000203f9,
435 0x010, 0x0003000f,
436 0x011, 0x000ff500,
437 0x010, 0x00000000,
438 0x011, 0x00000000,
439 0x010, 0x0008000f,
440 0x011, 0x0003f100,
441 0x010, 0x0009000f,
442 0x011, 0x00023100,
443 0x012, 0x00032000,
444 0x012, 0x00071000,
445 0x012, 0x000b0000,
446 0x012, 0x000fc000,
447 0x013, 0x000287b3,
448 0x013, 0x000244b7,
449 0x013, 0x000204ab,
450 0x013, 0x0001c49f,
451 0x013, 0x00018493,
452 0x013, 0x0001429b,
453 0x013, 0x00010299,
454 0x013, 0x0000c29c,
455 0x013, 0x000081a0,
456 0x013, 0x000040ac,
457 0x013, 0x00000020,
458 0x014, 0x0001944c,
459 0x014, 0x00059444,
460 0x014, 0x0009944c,
461 0x014, 0x000d9444,
462 0x015, 0x0000f424,
463 0x015, 0x0004f407,
464 0x015, 0x0008f424,
465 0x015, 0x000cf424,
466 0x016, 0x00000339,
467 0x016, 0x00040339,
468 0x016, 0x00080339,
469 0x016, 0x000c0336,
470 0x000, 0x00010159,
471 0x018, 0x0000f401,
472 0x0fe, 0x00000000,
473 0x0fe, 0x00000000,
474 0x01f, 0x00080003,
475 0x0fe, 0x00000000,
476 0x0fe, 0x00000000,
477 0x01e, 0x00044457,
478 0x01f, 0x00080000,
479 0x000, 0x00030159,
480};
481
482
483u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH] = {
484 0x0,
485};
486
487
488u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH] = {
489 0x420, 0x00000080,
490 0x423, 0x00000000,
491 0x430, 0x00000000,
492 0x431, 0x00000000,
493 0x432, 0x00000000,
494 0x433, 0x00000001,
495 0x434, 0x00000004,
496 0x435, 0x00000005,
497 0x436, 0x00000006,
498 0x437, 0x00000007,
499 0x438, 0x00000000,
500 0x439, 0x00000000,
501 0x43a, 0x00000000,
502 0x43b, 0x00000001,
503 0x43c, 0x00000004,
504 0x43d, 0x00000005,
505 0x43e, 0x00000006,
506 0x43f, 0x00000007,
507 0x440, 0x0000005d,
508 0x441, 0x00000001,
509 0x442, 0x00000000,
510 0x444, 0x00000015,
511 0x445, 0x000000f0,
512 0x446, 0x0000000f,
513 0x447, 0x00000000,
514 0x458, 0x00000041,
515 0x459, 0x000000a8,
516 0x45a, 0x00000072,
517 0x45b, 0x000000b9,
518 0x460, 0x00000066,
519 0x461, 0x00000066,
520 0x462, 0x00000008,
521 0x463, 0x00000003,
522 0x4c8, 0x000000ff,
523 0x4c9, 0x00000008,
524 0x4cc, 0x000000ff,
525 0x4cd, 0x000000ff,
526 0x4ce, 0x00000001,
527 0x500, 0x00000026,
528 0x501, 0x000000a2,
529 0x502, 0x0000002f,
530 0x503, 0x00000000,
531 0x504, 0x00000028,
532 0x505, 0x000000a3,
533 0x506, 0x0000005e,
534 0x507, 0x00000000,
535 0x508, 0x0000002b,
536 0x509, 0x000000a4,
537 0x50a, 0x0000005e,
538 0x50b, 0x00000000,
539 0x50c, 0x0000004f,
540 0x50d, 0x000000a4,
541 0x50e, 0x00000000,
542 0x50f, 0x00000000,
543 0x512, 0x0000001c,
544 0x514, 0x0000000a,
545 0x515, 0x00000010,
546 0x516, 0x0000000a,
547 0x517, 0x00000010,
548 0x51a, 0x00000016,
549 0x524, 0x0000000f,
550 0x525, 0x0000004f,
551 0x546, 0x00000040,
552 0x547, 0x00000000,
553 0x550, 0x00000010,
554 0x551, 0x00000010,
555 0x559, 0x00000002,
556 0x55a, 0x00000002,
557 0x55d, 0x000000ff,
558 0x605, 0x00000030,
559 0x608, 0x0000000e,
560 0x609, 0x0000002a,
561 0x652, 0x00000020,
562 0x63c, 0x0000000a,
563 0x63d, 0x0000000e,
564 0x63e, 0x0000000a,
565 0x63f, 0x0000000e,
566 0x66e, 0x00000005,
567 0x700, 0x00000021,
568 0x701, 0x00000043,
569 0x702, 0x00000065,
570 0x703, 0x00000087,
571 0x708, 0x00000021,
572 0x709, 0x00000043,
573 0x70a, 0x00000065,
574 0x70b, 0x00000087,
575};
576
577u32 RTL8723EAGCTAB_1TARRAY[RTL8723E_AGCTAB_1TARRAYLENGTH] = {
578 0xc78, 0x7b000001,
579 0xc78, 0x7b010001,
580 0xc78, 0x7b020001,
581 0xc78, 0x7b030001,
582 0xc78, 0x7b040001,
583 0xc78, 0x7b050001,
584 0xc78, 0x7a060001,
585 0xc78, 0x79070001,
586 0xc78, 0x78080001,
587 0xc78, 0x77090001,
588 0xc78, 0x760a0001,
589 0xc78, 0x750b0001,
590 0xc78, 0x740c0001,
591 0xc78, 0x730d0001,
592 0xc78, 0x720e0001,
593 0xc78, 0x710f0001,
594 0xc78, 0x70100001,
595 0xc78, 0x6f110001,
596 0xc78, 0x6e120001,
597 0xc78, 0x6d130001,
598 0xc78, 0x6c140001,
599 0xc78, 0x6b150001,
600 0xc78, 0x6a160001,
601 0xc78, 0x69170001,
602 0xc78, 0x68180001,
603 0xc78, 0x67190001,
604 0xc78, 0x661a0001,
605 0xc78, 0x651b0001,
606 0xc78, 0x641c0001,
607 0xc78, 0x631d0001,
608 0xc78, 0x621e0001,
609 0xc78, 0x611f0001,
610 0xc78, 0x60200001,
611 0xc78, 0x49210001,
612 0xc78, 0x48220001,
613 0xc78, 0x47230001,
614 0xc78, 0x46240001,
615 0xc78, 0x45250001,
616 0xc78, 0x44260001,
617 0xc78, 0x43270001,
618 0xc78, 0x42280001,
619 0xc78, 0x41290001,
620 0xc78, 0x402a0001,
621 0xc78, 0x262b0001,
622 0xc78, 0x252c0001,
623 0xc78, 0x242d0001,
624 0xc78, 0x232e0001,
625 0xc78, 0x222f0001,
626 0xc78, 0x21300001,
627 0xc78, 0x20310001,
628 0xc78, 0x06320001,
629 0xc78, 0x05330001,
630 0xc78, 0x04340001,
631 0xc78, 0x03350001,
632 0xc78, 0x02360001,
633 0xc78, 0x01370001,
634 0xc78, 0x00380001,
635 0xc78, 0x00390001,
636 0xc78, 0x003a0001,
637 0xc78, 0x003b0001,
638 0xc78, 0x003c0001,
639 0xc78, 0x003d0001,
640 0xc78, 0x003e0001,
641 0xc78, 0x003f0001,
642 0xc78, 0x7b400001,
643 0xc78, 0x7b410001,
644 0xc78, 0x7b420001,
645 0xc78, 0x7b430001,
646 0xc78, 0x7b440001,
647 0xc78, 0x7b450001,
648 0xc78, 0x7a460001,
649 0xc78, 0x79470001,
650 0xc78, 0x78480001,
651 0xc78, 0x77490001,
652 0xc78, 0x764a0001,
653 0xc78, 0x754b0001,
654 0xc78, 0x744c0001,
655 0xc78, 0x734d0001,
656 0xc78, 0x724e0001,
657 0xc78, 0x714f0001,
658 0xc78, 0x70500001,
659 0xc78, 0x6f510001,
660 0xc78, 0x6e520001,
661 0xc78, 0x6d530001,
662 0xc78, 0x6c540001,
663 0xc78, 0x6b550001,
664 0xc78, 0x6a560001,
665 0xc78, 0x69570001,
666 0xc78, 0x68580001,
667 0xc78, 0x67590001,
668 0xc78, 0x665a0001,
669 0xc78, 0x655b0001,
670 0xc78, 0x645c0001,
671 0xc78, 0x635d0001,
672 0xc78, 0x625e0001,
673 0xc78, 0x615f0001,
674 0xc78, 0x60600001,
675 0xc78, 0x49610001,
676 0xc78, 0x48620001,
677 0xc78, 0x47630001,
678 0xc78, 0x46640001,
679 0xc78, 0x45650001,
680 0xc78, 0x44660001,
681 0xc78, 0x43670001,
682 0xc78, 0x42680001,
683 0xc78, 0x41690001,
684 0xc78, 0x406a0001,
685 0xc78, 0x266b0001,
686 0xc78, 0x256c0001,
687 0xc78, 0x246d0001,
688 0xc78, 0x236e0001,
689 0xc78, 0x226f0001,
690 0xc78, 0x21700001,
691 0xc78, 0x20710001,
692 0xc78, 0x06720001,
693 0xc78, 0x05730001,
694 0xc78, 0x04740001,
695 0xc78, 0x03750001,
696 0xc78, 0x02760001,
697 0xc78, 0x01770001,
698 0xc78, 0x00780001,
699 0xc78, 0x00790001,
700 0xc78, 0x007a0001,
701 0xc78, 0x007b0001,
702 0xc78, 0x007c0001,
703 0xc78, 0x007d0001,
704 0xc78, 0x007e0001,
705 0xc78, 0x007f0001,
706 0xc78, 0x3800001e,
707 0xc78, 0x3801001e,
708 0xc78, 0x3802001e,
709 0xc78, 0x3803001e,
710 0xc78, 0x3804001e,
711 0xc78, 0x3805001e,
712 0xc78, 0x3806001e,
713 0xc78, 0x3807001e,
714 0xc78, 0x3808001e,
715 0xc78, 0x3c09001e,
716 0xc78, 0x3e0a001e,
717 0xc78, 0x400b001e,
718 0xc78, 0x440c001e,
719 0xc78, 0x480d001e,
720 0xc78, 0x4c0e001e,
721 0xc78, 0x500f001e,
722 0xc78, 0x5210001e,
723 0xc78, 0x5611001e,
724 0xc78, 0x5a12001e,
725 0xc78, 0x5e13001e,
726 0xc78, 0x6014001e,
727 0xc78, 0x6015001e,
728 0xc78, 0x6016001e,
729 0xc78, 0x6217001e,
730 0xc78, 0x6218001e,
731 0xc78, 0x6219001e,
732 0xc78, 0x621a001e,
733 0xc78, 0x621b001e,
734 0xc78, 0x621c001e,
735 0xc78, 0x621d001e,
736 0xc78, 0x621e001e,
737 0xc78, 0x621f001e,
738};
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/rtlwifi/rtl8723ae/table.h
new file mode 100644
index 000000000000..f5ce71375c20
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/table.h
@@ -0,0 +1,50 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Created on 2010/ 5/18, 1:41
27 *
28 * Larry Finger <Larry.Finger@lwfinger.net>
29 *
30 *****************************************************************************/
31
32#ifndef __RTL8723E_TABLE__H_
33#define __RTL8723E_TABLE__H_
34
35#include <linux/types.h>
36
37#define RTL8723E_PHY_REG_1TARRAY_LENGTH 372
38extern u32 RTL8723EPHY_REG_1TARRAY[RTL8723E_PHY_REG_1TARRAY_LENGTH];
39#define RTL8723E_PHY_REG_ARRAY_PGLENGTH 336
40extern u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH];
41#define Rtl8723ERADIOA_1TARRAYLENGTH 282
42extern u32 RTL8723E_RADIOA_1TARRAY[Rtl8723ERADIOA_1TARRAYLENGTH];
43#define RTL8723E_RADIOB_1TARRAYLENGTH 1
44extern u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH];
45#define RTL8723E_MACARRAYLENGTH 172
46extern u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH];
47#define RTL8723E_AGCTAB_1TARRAYLENGTH 320
48extern u32 RTL8723EAGCTAB_1TARRAY[RTL8723E_AGCTAB_1TARRAYLENGTH];
49
50#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
new file mode 100644
index 000000000000..87331d826d73
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -0,0 +1,670 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../base.h"
33#include "../stats.h"
34#include "reg.h"
35#include "def.h"
36#include "phy.h"
37#include "trx.h"
38#include "led.h"
39
40static u8 _rtl8723ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
41{
42 __le16 fc = rtl_get_fc(skb);
43
44 if (unlikely(ieee80211_is_beacon(fc)))
45 return QSLT_BEACON;
46 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
47 return QSLT_MGNT;
48
49 return skb->priority;
50}
51
52static void _rtl8723ae_query_rxphystatus(struct ieee80211_hw *hw,
53 struct rtl_stats *pstatus, u8 *pdesc,
54 struct rx_fwinfo_8723e *p_drvinfo,
55 bool bpacket_match_bssid,
56 bool bpacket_toself, bool packet_beacon)
57{
58 struct rtl_priv *rtlpriv = rtl_priv(hw);
59 struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
60 struct phy_sts_cck_8723e_t *cck_buf;
61 s8 rx_pwr_all, rx_pwr[4];
62 u8 rf_rx_num = 0, evm, pwdb_all;
63 u8 i, max_spatial_stream;
64 u32 rssi, total_rssi = 0;
65 bool is_cck = pstatus->is_cck;
66
67 /* Record it for next packet processing */
68 pstatus->packet_matchbssid = bpacket_match_bssid;
69 pstatus->packet_toself = bpacket_toself;
70 pstatus->packet_beacon = packet_beacon;
71 pstatus->rx_mimo_sig_qual[0] = -1;
72 pstatus->rx_mimo_sig_qual[1] = -1;
73
74 if (is_cck) {
75 u8 report, cck_highpwr;
76
77 /* CCK Driver info Structure is not the same as OFDM packet. */
78 cck_buf = (struct phy_sts_cck_8723e_t *)p_drvinfo;
79
80 /* (1)Hardware does not provide RSSI for CCK
81 * (2)PWDB, Average PWDB cacluated by
82 * hardware (for rate adaptive)
83 */
84 if (ppsc->rfpwr_state == ERFON)
85 cck_highpwr = (u8) rtl_get_bbreg(hw,
86 RFPGA0_XA_HSSIPARAMETER2,
87 BIT(9));
88 else
89 cck_highpwr = false;
90
91 if (!cck_highpwr) {
92 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
93 report = cck_buf->cck_agc_rpt & 0xc0;
94 report = report >> 6;
95 switch (report) {
96 case 0x3:
97 rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
98 break;
99 case 0x2:
100 rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
101 break;
102 case 0x1:
103 rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
104 break;
105 case 0x0:
106 rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
107 break;
108 }
109 } else {
110 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
111 report = p_drvinfo->cfosho[0] & 0x60;
112 report = report >> 5;
113 switch (report) {
114 case 0x3:
115 rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
116 break;
117 case 0x2:
118 rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
119 break;
120 case 0x1:
121 rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
122 break;
123 case 0x0:
124 rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
125 break;
126 }
127 }
128
129 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
130 /* CCK gain is smaller than OFDM/MCS gain,
131 * so we add gain diff. From experience, the val is 6
132 */
133 pwdb_all += 6;
134 if (pwdb_all > 100)
135 pwdb_all = 100;
136 /* modify the offset to make the same
137 * gain index with OFDM.
138 */
139 if (pwdb_all > 34 && pwdb_all <= 42)
140 pwdb_all -= 2;
141 else if (pwdb_all > 26 && pwdb_all <= 34)
142 pwdb_all -= 6;
143 else if (pwdb_all > 14 && pwdb_all <= 26)
144 pwdb_all -= 8;
145 else if (pwdb_all > 4 && pwdb_all <= 14)
146 pwdb_all -= 4;
147
148 pstatus->rx_pwdb_all = pwdb_all;
149 pstatus->recvsignalpower = rx_pwr_all;
150
151 /* (3) Get Signal Quality (EVM) */
152 if (bpacket_match_bssid) {
153 u8 sq;
154
155 if (pstatus->rx_pwdb_all > 40) {
156 sq = 100;
157 } else {
158 sq = cck_buf->sq_rpt;
159 if (sq > 64)
160 sq = 0;
161 else if (sq < 20)
162 sq = 100;
163 else
164 sq = ((64 - sq) * 100) / 44;
165 }
166
167 pstatus->signalquality = sq;
168 pstatus->rx_mimo_sig_qual[0] = sq;
169 pstatus->rx_mimo_sig_qual[1] = -1;
170 }
171 } else {
172 rtlpriv->dm.rfpath_rxenable[0] =
173 rtlpriv->dm.rfpath_rxenable[1] = true;
174
175 /* (1)Get RSSI for HT rate */
176 for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
177
178 /* we will judge RF RX path now. */
179 if (rtlpriv->dm.rfpath_rxenable[i])
180 rf_rx_num++;
181
182 rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f)*2) - 110;
183
184 /* Translate DBM to percentage. */
185 rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
186 total_rssi += rssi;
187
188 /* Get Rx snr value in DB */
189 rtlpriv->stats.rx_snr_db[i] = (p_drvinfo->rxsnr[i] / 2);
190
191 /* Record Signal Strength for next packet */
192 if (bpacket_match_bssid)
193 pstatus->rx_mimo_signalstrength[i] = (u8) rssi;
194 }
195
196 /* (2)PWDB, Average PWDB cacluated by
197 * hardware (for rate adaptive)
198 */
199 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
200
201 pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
202 pstatus->rx_pwdb_all = pwdb_all;
203 pstatus->rxpower = rx_pwr_all;
204 pstatus->recvsignalpower = rx_pwr_all;
205
206 /* (3)EVM of HT rate */
207 if (pstatus->is_ht && pstatus->rate >= DESC92_RATEMCS8 &&
208 pstatus->rate <= DESC92_RATEMCS15)
209 max_spatial_stream = 2;
210 else
211 max_spatial_stream = 1;
212
213 for (i = 0; i < max_spatial_stream; i++) {
214 evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
215
216 if (bpacket_match_bssid) {
217 /* Fill value in RFD, Get the first
218 * spatial stream only
219 */
220 if (i == 0)
221 pstatus->signalquality = (evm & 0xff);
222 pstatus->rx_mimo_sig_qual[i] = (evm & 0xff);
223 }
224 }
225 }
226
227 /* UI BSS List signal strength(in percentage),
228 * make it good looking, from 0~100.
229 */
230 if (is_cck)
231 pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
232 pwdb_all));
233 else if (rf_rx_num != 0)
234 pstatus->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
235 total_rssi /= rf_rx_num));
236}
237
238static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
239 struct sk_buff *skb, struct rtl_stats *pstatus,
240 u8 *pdesc, struct rx_fwinfo_8723e *p_drvinfo)
241{
242 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
243 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
244 struct ieee80211_hdr *hdr;
245 u8 *tmp_buf;
246 u8 *praddr;
247 u8 *psaddr;
248 __le16 fc;
249 u16 type;
250 bool packet_matchbssid, packet_toself, packet_beacon;
251
252 tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
253
254 hdr = (struct ieee80211_hdr *)tmp_buf;
255 fc = hdr->frame_control;
256 type = WLAN_FC_GET_TYPE(fc);
257 praddr = hdr->addr1;
258 psaddr = ieee80211_get_SA(hdr);
259
260 packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
261 (!compare_ether_addr(mac->bssid,
262 (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ?
263 hdr->addr1 : (le16_to_cpu(fc) &
264 IEEE80211_FCTL_FROMDS) ?
265 hdr->addr2 : hdr->addr3)) && (!pstatus->hwerror) &&
266 (!pstatus->crc) && (!pstatus->icv));
267
268 packet_toself = packet_matchbssid &&
269 (!compare_ether_addr(praddr, rtlefuse->dev_addr));
270
271 if (ieee80211_is_beacon(fc))
272 packet_beacon = true;
273
274 _rtl8723ae_query_rxphystatus(hw, pstatus, pdesc, p_drvinfo,
275 packet_matchbssid, packet_toself,
276 packet_beacon);
277
278 rtl_process_phyinfo(hw, tmp_buf, pstatus);
279}
280
281bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
282 struct rtl_stats *status,
283 struct ieee80211_rx_status *rx_status,
284 u8 *pdesc, struct sk_buff *skb)
285{
286 struct rx_fwinfo_8723e *p_drvinfo;
287 struct ieee80211_hdr *hdr;
288 u32 phystatus = GET_RX_DESC_PHYST(pdesc);
289
290 status->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
291 status->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
292 RX_DRV_INFO_SIZE_UNIT;
293 status->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
294 status->icv = (u16) GET_RX_DESC_ICV(pdesc);
295 status->crc = (u16) GET_RX_DESC_CRC32(pdesc);
296 status->hwerror = (status->crc | status->icv);
297 status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
298 status->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
299 status->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
300 status->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
301 status->isfirst_ampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
302 && (GET_RX_DESC_FAGGR(pdesc) == 1));
303 status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
304 status->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
305 status->is_ht = (bool)GET_RX_DESC_RXHT(pdesc);
306
307 status->is_cck = RTL8723E_RX_HAL_IS_CCK_RATE(status->rate);
308
309 rx_status->freq = hw->conf.channel->center_freq;
310 rx_status->band = hw->conf.channel->band;
311
312 hdr = (struct ieee80211_hdr *)(skb->data + status->rx_drvinfo_size
313 + status->rx_bufshift);
314
315 if (status->crc)
316 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
317
318 if (status->rx_is40Mhzpacket)
319 rx_status->flag |= RX_FLAG_40MHZ;
320
321 if (status->is_ht)
322 rx_status->flag |= RX_FLAG_HT;
323
324 rx_status->flag |= RX_FLAG_MACTIME_START;
325
326 /* hw will set status->decrypted true, if it finds the
327 * frame is open data frame or mgmt frame.
328 * Thus hw will not decrypt a robust managment frame
329 * for IEEE80211w but still set status->decrypted
330 * true, so here we should set it back to undecrypted
331 * for IEEE80211w frame, and mac80211 sw will help
332 * to decrypt it
333 */
334 if (status->decrypted) {
335 if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
336 (ieee80211_has_protected(hdr->frame_control)))
337 rx_status->flag &= ~RX_FLAG_DECRYPTED;
338 else
339 rx_status->flag |= RX_FLAG_DECRYPTED;
340 }
341
342 /* rate_idx: index of data rate into band's
343 * supported rates or MCS index if HT rates
344 * are use (RX_FLAG_HT)
345 */
346 rx_status->rate_idx = rtlwifi_rate_mapping(hw, status->is_ht,
347 status->rate, false);
348
349 rx_status->mactime = status->timestamp_low;
350 if (phystatus == true) {
351 p_drvinfo = (struct rx_fwinfo_8723e *)(skb->data +
352 status->rx_bufshift);
353
354 _rtl8723ae_translate_rx_signal_stuff(hw,
355 skb, status, pdesc, p_drvinfo);
356 }
357
358 /*rx_status->qual = status->signal; */
359 rx_status->signal = status->recvsignalpower + 10;
360 /*rx_status->noise = -status->noise; */
361
362 return true;
363}
364
365void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
366 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
367 struct ieee80211_tx_info *info,
368 struct ieee80211_sta *sta,
369 struct sk_buff *skb, u8 hw_queue,
370 struct rtl_tcb_desc *ptcdesc)
371{
372 struct rtl_priv *rtlpriv = rtl_priv(hw);
373 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
374 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
375 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
376 bool defaultadapter = true;
377 u8 *pdesc = (u8 *) pdesc_tx;
378 u16 seq_number;
379 __le16 fc = hdr->frame_control;
380 u8 fw_qsel = _rtl8723ae_map_hwqueue_to_fwqueue(skb, hw_queue);
381 bool firstseg = ((hdr->seq_ctrl &
382 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
383 bool lastseg = ((hdr->frame_control &
384 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
385 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
386 skb->data, skb->len,
387 PCI_DMA_TODEVICE);
388 u8 bw_40 = 0;
389
390 if (mac->opmode == NL80211_IFTYPE_STATION) {
391 bw_40 = mac->bw_40;
392 } else if (mac->opmode == NL80211_IFTYPE_AP ||
393 mac->opmode == NL80211_IFTYPE_ADHOC) {
394 if (sta)
395 bw_40 = sta->ht_cap.cap &
396 IEEE80211_HT_CAP_SUP_WIDTH_20_40;
397 }
398
399 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
400
401 rtl_get_tcb_desc(hw, info, sta, skb, ptcdesc);
402
403 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723e));
404
405 if (ieee80211_is_nullfunc(fc) || ieee80211_is_ctl(fc)) {
406 firstseg = true;
407 lastseg = true;
408 }
409
410 if (firstseg) {
411 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
412
413 SET_TX_DESC_TX_RATE(pdesc, ptcdesc->hw_rate);
414
415 if (ptcdesc->use_shortgi || ptcdesc->use_shortpreamble)
416 SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
417
418 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
419 SET_TX_DESC_AGG_BREAK(pdesc, 1);
420 SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
421 }
422 SET_TX_DESC_SEQ(pdesc, seq_number);
423
424 SET_TX_DESC_RTS_ENABLE(pdesc, ((ptcdesc->rts_enable &&
425 !ptcdesc->
426 cts_enable) ? 1 : 0));
427 SET_TX_DESC_HW_RTS_ENABLE(pdesc,
428 ((ptcdesc->rts_enable
429 || ptcdesc->cts_enable) ? 1 : 0));
430 SET_TX_DESC_CTS2SELF(pdesc, ((ptcdesc->cts_enable) ? 1 : 0));
431 SET_TX_DESC_RTS_STBC(pdesc, ((ptcdesc->rts_stbc) ? 1 : 0));
432
433 SET_TX_DESC_RTS_RATE(pdesc, ptcdesc->rts_rate);
434 SET_TX_DESC_RTS_BW(pdesc, 0);
435 SET_TX_DESC_RTS_SC(pdesc, ptcdesc->rts_sc);
436 SET_TX_DESC_RTS_SHORT(pdesc,
437 ((ptcdesc->rts_rate <= DESC92_RATE54M) ?
438 (ptcdesc->rts_use_shortpreamble ? 1 : 0)
439 : (ptcdesc->rts_use_shortgi ? 1 : 0)));
440
441 if (bw_40) {
442 if (ptcdesc->packet_bw) {
443 SET_TX_DESC_DATA_BW(pdesc, 1);
444 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
445 } else {
446 SET_TX_DESC_DATA_BW(pdesc, 0);
447 SET_TX_DESC_TX_SUB_CARRIER(pdesc,
448 mac->cur_40_prime_sc);
449 }
450 } else {
451 SET_TX_DESC_DATA_BW(pdesc, 0);
452 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
453 }
454
455 SET_TX_DESC_LINIP(pdesc, 0);
456 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
457
458 if (sta) {
459 u8 ampdu_density = sta->ht_cap.ampdu_density;
460 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
461 }
462
463 if (info->control.hw_key) {
464 struct ieee80211_key_conf *keyconf =
465 info->control.hw_key;
466
467 switch (keyconf->cipher) {
468 case WLAN_CIPHER_SUITE_WEP40:
469 case WLAN_CIPHER_SUITE_WEP104:
470 case WLAN_CIPHER_SUITE_TKIP:
471 SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
472 break;
473 case WLAN_CIPHER_SUITE_CCMP:
474 SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
475 break;
476 default:
477 SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
478 break;
479 }
480 }
481
482 SET_TX_DESC_PKT_ID(pdesc, 0);
483 SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
484
485 SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
486 SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
487 SET_TX_DESC_DISABLE_FB(pdesc, 0);
488 SET_TX_DESC_USE_RATE(pdesc, ptcdesc->use_driver_rate ? 1 : 0);
489
490 if (ieee80211_is_data_qos(fc)) {
491 if (mac->rdg_en) {
492 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
493 "Enable RDG function.\n");
494 SET_TX_DESC_RDG_ENABLE(pdesc, 1);
495 SET_TX_DESC_HTC(pdesc, 1);
496 }
497 }
498 }
499
500 SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0));
501 SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0));
502
503 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
504
505 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
506
507 if (rtlpriv->dm.useramask) {
508 SET_TX_DESC_RATE_ID(pdesc, ptcdesc->ratr_index);
509 SET_TX_DESC_MACID(pdesc, ptcdesc->mac_id);
510 } else {
511 SET_TX_DESC_RATE_ID(pdesc, 0xC + ptcdesc->ratr_index);
512 SET_TX_DESC_MACID(pdesc, ptcdesc->ratr_index);
513 }
514
515 if ((!ieee80211_is_data_qos(fc)) && ppsc->fwctrl_lps) {
516 SET_TX_DESC_HWSEQ_EN_8723(pdesc, 1);
517
518 if (!defaultadapter)
519 SET_TX_DESC_HWSEQ_SEL_8723(pdesc, 1);
520 }
521
522 SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1));
523
524 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
525 is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
526 SET_TX_DESC_BMC(pdesc, 1);
527 }
528
529 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
530}
531
532void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
533 u8 *pdesc, bool firstseg,
534 bool lastseg, struct sk_buff *skb)
535{
536 struct rtl_priv *rtlpriv = rtl_priv(hw);
537 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
538 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
539 u8 fw_queue = QSLT_BEACON;
540 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
541 skb->data, skb->len,
542 PCI_DMA_TODEVICE);
543 __le16 fc = hdr->frame_control;
544
545 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
546
547 if (firstseg)
548 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
549
550 SET_TX_DESC_TX_RATE(pdesc, DESC92_RATE1M);
551
552 SET_TX_DESC_SEQ(pdesc, 0);
553
554 SET_TX_DESC_LINIP(pdesc, 0);
555
556 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
557
558 SET_TX_DESC_FIRST_SEG(pdesc, 1);
559 SET_TX_DESC_LAST_SEG(pdesc, 1);
560
561 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
562
563 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
564
565 SET_TX_DESC_RATE_ID(pdesc, 7);
566 SET_TX_DESC_MACID(pdesc, 0);
567
568 SET_TX_DESC_OWN(pdesc, 1);
569
570 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
571
572 SET_TX_DESC_FIRST_SEG(pdesc, 1);
573 SET_TX_DESC_LAST_SEG(pdesc, 1);
574
575 SET_TX_DESC_OFFSET(pdesc, 0x20);
576
577 SET_TX_DESC_USE_RATE(pdesc, 1);
578
579 if (!ieee80211_is_data_qos(fc)) {
580 SET_TX_DESC_HWSEQ_EN_8723(pdesc, 1);
581 /* SET_TX_DESC_HWSEQ_EN(pdesc, 1); */
582 /* SET_TX_DESC_PKT_ID(pdesc, 8); */
583 }
584
585 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
586 "H2C Tx Cmd Content\n",
587 pdesc, TX_DESC_SIZE);
588}
589
590void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
591{
592 if (istx == true) {
593 switch (desc_name) {
594 case HW_DESC_OWN:
595 SET_TX_DESC_OWN(pdesc, 1);
596 break;
597 case HW_DESC_TX_NEXTDESC_ADDR:
598 SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
599 break;
600 default:
601 RT_ASSERT(false, "ERR txdesc :%d not process\n",
602 desc_name);
603 break;
604 }
605 } else {
606 switch (desc_name) {
607 case HW_DESC_RXOWN:
608 SET_RX_DESC_OWN(pdesc, 1);
609 break;
610 case HW_DESC_RXBUFF_ADDR:
611 SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
612 break;
613 case HW_DESC_RXPKT_LEN:
614 SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
615 break;
616 case HW_DESC_RXERO:
617 SET_RX_DESC_EOR(pdesc, 1);
618 break;
619 default:
620 RT_ASSERT(false, "ERR rxdesc :%d not process\n",
621 desc_name);
622 break;
623 }
624 }
625}
626
627u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
628{
629 u32 ret = 0;
630
631 if (istx == true) {
632 switch (desc_name) {
633 case HW_DESC_OWN:
634 ret = GET_TX_DESC_OWN(pdesc);
635 break;
636 case HW_DESC_TXBUFF_ADDR:
637 ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
638 break;
639 default:
640 RT_ASSERT(false, "ERR txdesc :%d not process\n",
641 desc_name);
642 break;
643 }
644 } else {
645 switch (desc_name) {
646 case HW_DESC_OWN:
647 ret = GET_RX_DESC_OWN(pdesc);
648 break;
649 case HW_DESC_RXPKT_LEN:
650 ret = GET_RX_DESC_PKT_LEN(pdesc);
651 break;
652 default:
653 RT_ASSERT(false, "ERR rxdesc :%d not process\n",
654 desc_name);
655 break;
656 }
657 }
658 return ret;
659}
660
661void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue)
662{
663 struct rtl_priv *rtlpriv = rtl_priv(hw);
664 if (hw_queue == BEACON_QUEUE) {
665 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
666 } else {
667 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
668 BIT(0) << (hw_queue));
669 }
670}
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
new file mode 100644
index 000000000000..ad05b54bc0f1
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.h
@@ -0,0 +1,725 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL8723E_TRX_H__
31#define __RTL8723E_TRX_H__
32
33#define TX_DESC_SIZE 64
34#define TX_DESC_AGGR_SUBFRAME_SIZE 32
35
36#define RX_DESC_SIZE 32
37#define RX_DRV_INFO_SIZE_UNIT 8
38
39#define TX_DESC_NEXT_DESC_OFFSET 40
40#define USB_HWDESC_HEADER_LEN 32
41#define CRCLENGTH 4
42
43#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
44 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
45#define SET_TX_DESC_OFFSET(__pdesc, __val) \
46 SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
47#define SET_TX_DESC_BMC(__pdesc, __val) \
48 SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
49#define SET_TX_DESC_HTC(__pdesc, __val) \
50 SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
51#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
52 SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
53#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
54 SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
55#define SET_TX_DESC_LINIP(__pdesc, __val) \
56 SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
57#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
58 SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
59#define SET_TX_DESC_GF(__pdesc, __val) \
60 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
61#define SET_TX_DESC_OWN(__pdesc, __val) \
62 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
63
64#define GET_TX_DESC_PKT_SIZE(__pdesc) \
65 LE_BITS_TO_4BYTE(__pdesc, 0, 16)
66#define GET_TX_DESC_OFFSET(__pdesc) \
67 LE_BITS_TO_4BYTE(__pdesc, 16, 8)
68#define GET_TX_DESC_BMC(__pdesc) \
69 LE_BITS_TO_4BYTE(__pdesc, 24, 1)
70#define GET_TX_DESC_HTC(__pdesc) \
71 LE_BITS_TO_4BYTE(__pdesc, 25, 1)
72#define GET_TX_DESC_LAST_SEG(__pdesc) \
73 LE_BITS_TO_4BYTE(__pdesc, 26, 1)
74#define GET_TX_DESC_FIRST_SEG(__pdesc) \
75 LE_BITS_TO_4BYTE(__pdesc, 27, 1)
76#define GET_TX_DESC_LINIP(__pdesc) \
77 LE_BITS_TO_4BYTE(__pdesc, 28, 1)
78#define GET_TX_DESC_NO_ACM(__pdesc) \
79 LE_BITS_TO_4BYTE(__pdesc, 29, 1)
80#define GET_TX_DESC_GF(__pdesc) \
81 LE_BITS_TO_4BYTE(__pdesc, 30, 1)
82#define GET_TX_DESC_OWN(__pdesc) \
83 LE_BITS_TO_4BYTE(__pdesc, 31, 1)
84
85#define SET_TX_DESC_MACID(__pdesc, __val) \
86 SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val)
87#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
88 SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val)
89#define SET_TX_DESC_BK(__pdesc, __val) \
90 SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val)
91#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
92 SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val)
93#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
94 SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
95#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
96 SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
97#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
98 SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
99#define SET_TX_DESC_PIFS(__pdesc, __val) \
100 SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
101#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
102 SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
103#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
104 SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
105#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
106 SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
107#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
108 SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
109#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
110 SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
111
112#define GET_TX_DESC_MACID(__pdesc) \
113 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
114#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
115 LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
116#define GET_TX_DESC_AGG_BREAK(__pdesc) \
117 LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
118#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
119 LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
120#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
121 LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
122#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
123 LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
124#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
125 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
126#define GET_TX_DESC_PIFS(__pdesc) \
127 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
128#define GET_TX_DESC_RATE_ID(__pdesc) \
129 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
130#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
131 LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
132#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
133 LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
134#define GET_TX_DESC_SEC_TYPE(__pdesc) \
135 LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
136#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
137 LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
138
139#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
140 SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
141#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
142 SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
143#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
144 SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
145#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
146 SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
147#define SET_TX_DESC_RAW(__pdesc, __val) \
148 SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
149#define SET_TX_DESC_CCX(__pdesc, __val) \
150 SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
151#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
152 SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
153#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
154 SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
155#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
156 SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
157#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
158 SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
159#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
160 SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
161#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
162 SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
163
164#define GET_TX_DESC_RTS_RC(__pdesc) \
165 LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
166#define GET_TX_DESC_DATA_RC(__pdesc) \
167 LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
168#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
169 LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
170#define GET_TX_DESC_MORE_FRAG(__pdesc) \
171 LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
172#define GET_TX_DESC_RAW(__pdesc) \
173 LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
174#define GET_TX_DESC_CCX(__pdesc) \
175 LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
176#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
177 LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
178#define GET_TX_DESC_ANTSEL_A(__pdesc) \
179 LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
180#define GET_TX_DESC_ANTSEL_B(__pdesc) \
181 LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
182#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
183 LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
184#define GET_TX_DESC_TX_ANTL(__pdesc) \
185 LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
186#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
187 LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
188
189#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
190 SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
191#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
192 SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
193#define SET_TX_DESC_SEQ(__pdesc, __val) \
194 SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
195#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
196 SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val)
197
198#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
199 LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
200#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
201 LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
202#define GET_TX_DESC_SEQ(__pdesc) \
203 LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
204#define GET_TX_DESC_PKT_ID(__pdesc) \
205 LE_BITS_TO_4BYTE(__pdesc+12, 28, 4)
206
207/* For RTL8723 */
208#define SET_TX_DESC_TRIGGER_INT(__pdesc, __val) \
209 SET_BITS_TO_LE_4BYTE(__pdesc+12, 30, 1, __val)
210#define SET_TX_DESC_HWSEQ_EN_8723(__pdesc, __val) \
211 SET_BITS_TO_LE_4BYTE(__pdesc+12, 31, 1, __val)
212#define SET_TX_DESC_HWSEQ_SEL_8723(__pTxDesc, __Value) \
213 SET_BITS_TO_LE_4BYTE(__pTxDesc+16, 6, 2, __Value)
214
215#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
216 SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
217#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
218 SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
219#define SET_TX_DESC_QOS(__pdesc, __val) \
220 SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
221#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
222 SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
223#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
224 SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
225#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
226 SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
227#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
228 SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
229#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
230 SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
231#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
232 SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
233#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
234 SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
235#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
236 SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
237#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
238 SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
239#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
240 SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
241#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
242 SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
243#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
244 SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
245#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
246 SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
247#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
248 SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
249#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
250 SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
251#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
252 SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
253#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
254 SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
255#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
256 SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
257
258#define GET_TX_DESC_RTS_RATE(__pdesc) \
259 LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
260#define GET_TX_DESC_AP_DCFE(__pdesc) \
261 LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
262#define GET_TX_DESC_QOS(__pdesc) \
263 LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
264#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
265 LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
266#define GET_TX_DESC_USE_RATE(__pdesc) \
267 LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
268#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
269 LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
270#define GET_TX_DESC_DISABLE_FB(__pdesc) \
271 LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
272#define GET_TX_DESC_CTS2SELF(__pdesc) \
273 LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
274#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
275 LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
276#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
277 LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
278#define GET_TX_DESC_PORT_ID(__pdesc) \
279 LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
280#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
281 LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
282#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
283 LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
284#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
285 LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
286#define GET_TX_DESC_TX_STBC(__pdesc) \
287 LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
288#define GET_TX_DESC_DATA_SHORT(__pdesc) \
289 LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
290#define GET_TX_DESC_DATA_BW(__pdesc) \
291 LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
292#define GET_TX_DESC_RTS_SHORT(__pdesc) \
293 LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
294#define GET_TX_DESC_RTS_BW(__pdesc) \
295 LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
296#define GET_TX_DESC_RTS_SC(__pdesc) \
297 LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
298#define GET_TX_DESC_RTS_STBC(__pdesc) \
299 LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
300
301#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
302 SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
303#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
304 SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
305#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
306 SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
307#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
308 SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
309#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
310 SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
311#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
312 SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
313#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
314 SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
315#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
316 SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
317
318#define GET_TX_DESC_TX_RATE(__pdesc) \
319 LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
320#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
321 LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
322#define GET_TX_DESC_CCX_TAG(__pdesc) \
323 LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
324#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
325 LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
326#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
327 LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
328#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
329 LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
330#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
331 LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
332#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
333 LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
334
335#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
336 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
337#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
338 SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
339#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
340 SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
341#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
342 SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
343#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
344 SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
345#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
346 SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
347#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
348 SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
349#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val)\
350 SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
351
352#define GET_TX_DESC_TXAGC_A(__pdesc) \
353 LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
354#define GET_TX_DESC_TXAGC_B(__pdesc) \
355 LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
356#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
357 LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
358#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
359 LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
360#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
361 LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
362#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
363 LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
364#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
365 LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
366#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
367 LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
368
369#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
370 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
371#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
372 SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val)
373#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
374 SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val)
375#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
376 SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
377#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
378 SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val)
379
380#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
381 LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
382#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
383 LE_BITS_TO_4BYTE(__pdesc+28, 16, 4)
384#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
385 LE_BITS_TO_4BYTE(__pdesc+28, 20, 4)
386#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
387 LE_BITS_TO_4BYTE(__pdesc+28, 24, 4)
388#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
389 LE_BITS_TO_4BYTE(__pdesc+28, 28, 4)
390
391#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
392 SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
393#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
394 SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
395
396#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
397 LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
398#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
399 LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
400
401#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
402 SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
403#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
404 SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
405
406#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
407 LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
408#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
409 LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
410
411#define GET_RX_DESC_PKT_LEN(__pdesc) \
412 LE_BITS_TO_4BYTE(__pdesc, 0, 14)
413#define GET_RX_DESC_CRC32(__pdesc) \
414 LE_BITS_TO_4BYTE(__pdesc, 14, 1)
415#define GET_RX_DESC_ICV(__pdesc) \
416 LE_BITS_TO_4BYTE(__pdesc, 15, 1)
417#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
418 LE_BITS_TO_4BYTE(__pdesc, 16, 4)
419#define GET_RX_DESC_SECURITY(__pdesc) \
420 LE_BITS_TO_4BYTE(__pdesc, 20, 3)
421#define GET_RX_DESC_QOS(__pdesc) \
422 LE_BITS_TO_4BYTE(__pdesc, 23, 1)
423#define GET_RX_DESC_SHIFT(__pdesc) \
424 LE_BITS_TO_4BYTE(__pdesc, 24, 2)
425#define GET_RX_DESC_PHYST(__pdesc) \
426 LE_BITS_TO_4BYTE(__pdesc, 26, 1)
427#define GET_RX_DESC_SWDEC(__pdesc) \
428 LE_BITS_TO_4BYTE(__pdesc, 27, 1)
429#define GET_RX_DESC_LS(__pdesc) \
430 LE_BITS_TO_4BYTE(__pdesc, 28, 1)
431#define GET_RX_DESC_FS(__pdesc) \
432 LE_BITS_TO_4BYTE(__pdesc, 29, 1)
433#define GET_RX_DESC_EOR(__pdesc) \
434 LE_BITS_TO_4BYTE(__pdesc, 30, 1)
435#define GET_RX_DESC_OWN(__pdesc) \
436 LE_BITS_TO_4BYTE(__pdesc, 31, 1)
437
438#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
439 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
440#define SET_RX_DESC_EOR(__pdesc, __val) \
441 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
442#define SET_RX_DESC_OWN(__pdesc, __val) \
443 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
444
445#define GET_RX_DESC_MACID(__pdesc) \
446 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
447#define GET_RX_DESC_TID(__pdesc) \
448 LE_BITS_TO_4BYTE(__pdesc+4, 5, 4)
449#define GET_RX_DESC_HWRSVD(__pdesc) \
450 LE_BITS_TO_4BYTE(__pdesc+4, 9, 5)
451#define GET_RX_DESC_PAGGR(__pdesc) \
452 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
453#define GET_RX_DESC_FAGGR(__pdesc) \
454 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
455#define GET_RX_DESC_A1_FIT(__pdesc) \
456 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
457#define GET_RX_DESC_A2_FIT(__pdesc) \
458 LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
459#define GET_RX_DESC_PAM(__pdesc) \
460 LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
461#define GET_RX_DESC_PWR(__pdesc) \
462 LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
463#define GET_RX_DESC_MD(__pdesc) \
464 LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
465#define GET_RX_DESC_MF(__pdesc) \
466 LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
467#define GET_RX_DESC_TYPE(__pdesc) \
468 LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
469#define GET_RX_DESC_MC(__pdesc) \
470 LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
471#define GET_RX_DESC_BC(__pdesc) \
472 LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
473#define GET_RX_DESC_SEQ(__pdesc) \
474 LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
475#define GET_RX_DESC_FRAG(__pdesc) \
476 LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
477#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
478 LE_BITS_TO_4BYTE(__pdesc+8, 16, 14)
479#define GET_RX_DESC_NEXT_IND(__pdesc) \
480 LE_BITS_TO_4BYTE(__pdesc+8, 30, 1)
481#define GET_RX_DESC_RSVD(__pdesc) \
482 LE_BITS_TO_4BYTE(__pdesc+8, 31, 1)
483
484#define GET_RX_DESC_RXMCS(__pdesc) \
485 LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
486#define GET_RX_DESC_RXHT(__pdesc) \
487 LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
488#define GET_RX_DESC_SPLCP(__pdesc) \
489 LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
490#define GET_RX_DESC_BW(__pdesc) \
491 LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
492#define GET_RX_DESC_HTC(__pdesc) \
493 LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
494#define GET_RX_DESC_HWPC_ERR(__pdesc) \
495 LE_BITS_TO_4BYTE(__pdesc+12, 14, 1)
496#define GET_RX_DESC_HWPC_IND(__pdesc) \
497 LE_BITS_TO_4BYTE(__pdesc+12, 15, 1)
498#define GET_RX_DESC_IV0(__pdesc) \
499 LE_BITS_TO_4BYTE(__pdesc+12, 16, 16)
500
501#define GET_RX_DESC_IV1(__pdesc) \
502 LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
503#define GET_RX_DESC_TSFL(__pdesc) \
504 LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
505
506#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
507 LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
508#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
509 LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
510
511#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
512 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
513#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
514 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
515
516#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
517do { \
518 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
519 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
520 else \
521 memset(__pdesc, 0, _size); \
522} while (0)
523
524#define RTL8723E_RX_HAL_IS_CCK_RATE(rxmcs) \
525 ((rxmcs) == DESC92_RATE1M || \
526 (rxmcs) == DESC92_RATE2M || \
527 (rxmcs) == DESC92_RATE5_5M || \
528 (rxmcs) == DESC92_RATE11M)
529
530struct rx_fwinfo_8723e {
531 u8 gain_trsw[4];
532 u8 pwdb_all;
533 u8 cfosho[4];
534 u8 cfotail[4];
535 char rxevm[2];
536 char rxsnr[4];
537 u8 pdsnr[2];
538 u8 csi_current[2];
539 u8 csi_target[2];
540 u8 sigevm;
541 u8 max_ex_pwr;
542 u8 ex_intf_flag:1;
543 u8 sgi_en:1;
544 u8 rxsc:2;
545 u8 reserve:4;
546} __packed;
547
548struct tx_desc_8723e {
549 u32 pktsize:16;
550 u32 offset:8;
551 u32 bmc:1;
552 u32 htc:1;
553 u32 lastseg:1;
554 u32 firstseg:1;
555 u32 linip:1;
556 u32 noacm:1;
557 u32 gf:1;
558 u32 own:1;
559
560 u32 macid:5;
561 u32 agg_en:1;
562 u32 bk:1;
563 u32 rdg_en:1;
564 u32 queuesel:5;
565 u32 rd_nav_ext:1;
566 u32 lsig_txop_en:1;
567 u32 pifs:1;
568 u32 rateid:4;
569 u32 nav_usehdr:1;
570 u32 en_descid:1;
571 u32 sectype:2;
572 u32 pktoffset:8;
573
574 u32 rts_rc:6;
575 u32 data_rc:6;
576 u32 rsvd0:2;
577 u32 bar_retryht:2;
578 u32 rsvd1:1;
579 u32 morefrag:1;
580 u32 raw:1;
581 u32 ccx:1;
582 u32 ampdudensity:3;
583 u32 rsvd2:1;
584 u32 ant_sela:1;
585 u32 ant_selb:1;
586 u32 txant_cck:2;
587 u32 txant_l:2;
588 u32 txant_ht:2;
589
590 u32 nextheadpage:8;
591 u32 tailpage:8;
592 u32 seq:12;
593 u32 pktid:4;
594
595 u32 rtsrate:5;
596 u32 apdcfe:1;
597 u32 qos:1;
598 u32 hwseq_enable:1;
599 u32 userrate:1;
600 u32 dis_rtsfb:1;
601 u32 dis_datafb:1;
602 u32 cts2self:1;
603 u32 rts_en:1;
604 u32 hwrts_en:1;
605 u32 portid:1;
606 u32 rsvd3:3;
607 u32 waitdcts:1;
608 u32 cts2ap_en:1;
609 u32 txsc:2;
610 u32 stbc:2;
611 u32 txshort:1;
612 u32 txbw:1;
613 u32 rtsshort:1;
614 u32 rtsbw:1;
615 u32 rtssc:2;
616 u32 rtsstbc:2;
617
618 u32 txrate:6;
619 u32 shortgi:1;
620 u32 ccxt:1;
621 u32 txrate_fb_lmt:5;
622 u32 rtsrate_fb_lmt:4;
623 u32 retrylmt_en:1;
624 u32 txretrylmt:6;
625 u32 usb_txaggnum:8;
626
627 u32 txagca:5;
628 u32 txagcb:5;
629 u32 usemaxlen:1;
630 u32 maxaggnum:5;
631 u32 mcsg1maxlen:4;
632 u32 mcsg2maxlen:4;
633 u32 mcsg3maxlen:4;
634 u32 mcs7sgimaxlen:4;
635
636 u32 txbuffersize:16;
637 u32 mcsg4maxlen:4;
638 u32 mcsg5maxlen:4;
639 u32 mcsg6maxlen:4;
640 u32 mcsg15sgimaxlen:4;
641
642 u32 txbuffaddr;
643 u32 txbufferaddr64;
644 u32 nextdescaddress;
645 u32 nextdescaddress64;
646
647 u32 reserve_pass_pcie_mm_limit[4];
648} __packed;
649
650struct rx_desc_8723e {
651 u32 length:14;
652 u32 crc32:1;
653 u32 icverror:1;
654 u32 drv_infosize:4;
655 u32 security:3;
656 u32 qos:1;
657 u32 shift:2;
658 u32 phystatus:1;
659 u32 swdec:1;
660 u32 lastseg:1;
661 u32 firstseg:1;
662 u32 eor:1;
663 u32 own:1;
664
665 u32 macid:5;
666 u32 tid:4;
667 u32 hwrsvd:5;
668 u32 paggr:1;
669 u32 faggr:1;
670 u32 a1_fit:4;
671 u32 a2_fit:4;
672 u32 pam:1;
673 u32 pwr:1;
674 u32 moredata:1;
675 u32 morefrag:1;
676 u32 type:2;
677 u32 mc:1;
678 u32 bc:1;
679
680 u32 seq:12;
681 u32 frag:4;
682 u32 nextpktlen:14;
683 u32 nextind:1;
684 u32 rsvd:1;
685
686 u32 rxmcs:6;
687 u32 rxht:1;
688 u32 amsdu:1;
689 u32 splcp:1;
690 u32 bandwidth:1;
691 u32 htc:1;
692 u32 tcpchk_rpt:1;
693 u32 ipcchk_rpt:1;
694 u32 tcpchk_valid:1;
695 u32 hwpcerr:1;
696 u32 hwpcind:1;
697 u32 iv0:16;
698
699 u32 iv1;
700
701 u32 tsfl;
702
703 u32 bufferaddress;
704 u32 bufferaddress64;
705
706} __packed;
707
708void rtl8723ae_tx_fill_desc(struct ieee80211_hw *hw,
709 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
710 struct ieee80211_tx_info *info,
711 struct ieee80211_sta *sta,
712 struct sk_buff *skb, u8 hw_queue,
713 struct rtl_tcb_desc *ptcb_desc);
714bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
715 struct rtl_stats *status,
716 struct ieee80211_rx_status *rx_status,
717 u8 *pdesc, struct sk_buff *skb);
718void rtl8723ae_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
719u32 rtl8723ae_get_desc(u8 *pdesc, bool istx, u8 desc_name);
720void rtl8723ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
721void rtl8723ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
722 bool b_firstseg, bool b_lastseg,
723 struct sk_buff *skb);
724
725#endif
diff --git a/drivers/net/wireless/rtlwifi/stats.c b/drivers/net/wireless/rtlwifi/stats.c
new file mode 100644
index 000000000000..8ed31744a054
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/stats.c
@@ -0,0 +1,268 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29#include "wifi.h"
30#include "stats.h"
31#include <linux/export.h>
32
33u8 rtl_query_rxpwrpercentage(char antpower)
34{
35 if ((antpower <= -100) || (antpower >= 20))
36 return 0;
37 else if (antpower >= 0)
38 return 100;
39 else
40 return 100 + antpower;
41}
42EXPORT_SYMBOL(rtl_query_rxpwrpercentage);
43
44u8 rtl_evm_db_to_percentage(char value)
45{
46 char ret_val;
47 ret_val = value;
48
49 if (ret_val >= 0)
50 ret_val = 0;
51 if (ret_val <= -33)
52 ret_val = -33;
53 ret_val = 0 - ret_val;
54 ret_val *= 3;
55 if (ret_val == 99)
56 ret_val = 100;
57
58 return ret_val;
59}
60EXPORT_SYMBOL(rtl_evm_db_to_percentage);
61
62static long rtl_translate_todbm(struct ieee80211_hw *hw,
63 u8 signal_strength_index)
64{
65 long signal_power;
66
67 signal_power = (long)((signal_strength_index + 1) >> 1);
68 signal_power -= 95;
69 return signal_power;
70}
71
72long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig)
73{
74 long retsig;
75
76 if (currsig >= 61 && currsig <= 100)
77 retsig = 90 + ((currsig - 60) / 4);
78 else if (currsig >= 41 && currsig <= 60)
79 retsig = 78 + ((currsig - 40) / 2);
80 else if (currsig >= 31 && currsig <= 40)
81 retsig = 66 + (currsig - 30);
82 else if (currsig >= 21 && currsig <= 30)
83 retsig = 54 + (currsig - 20);
84 else if (currsig >= 5 && currsig <= 20)
85 retsig = 42 + (((currsig - 5) * 2) / 3);
86 else if (currsig == 4)
87 retsig = 36;
88 else if (currsig == 3)
89 retsig = 27;
90 else if (currsig == 2)
91 retsig = 18;
92 else if (currsig == 1)
93 retsig = 9;
94 else
95 retsig = currsig;
96
97 return retsig;
98}
99EXPORT_SYMBOL(rtl_signal_scale_mapping);
100
101static void rtl_process_ui_rssi(struct ieee80211_hw *hw,
102 struct rtl_stats *pstatus)
103{
104 struct rtl_priv *rtlpriv = rtl_priv(hw);
105 struct rtl_phy *rtlphy = &(rtlpriv->phy);
106 u8 rfpath;
107 u32 last_rssi, tmpval;
108
109 rtlpriv->stats.rssi_calculate_cnt++;
110
111 if (rtlpriv->stats.ui_rssi.total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
112 rtlpriv->stats.ui_rssi.total_num = PHY_RSSI_SLID_WIN_MAX;
113 last_rssi = rtlpriv->stats.ui_rssi.elements[
114 rtlpriv->stats.ui_rssi.index];
115 rtlpriv->stats.ui_rssi.total_val -= last_rssi;
116 }
117 rtlpriv->stats.ui_rssi.total_val += pstatus->signalstrength;
118 rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.index++] =
119 pstatus->signalstrength;
120 if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
121 rtlpriv->stats.ui_rssi.index = 0;
122 tmpval = rtlpriv->stats.ui_rssi.total_val /
123 rtlpriv->stats.ui_rssi.total_num;
124 rtlpriv->stats.signal_strength = rtl_translate_todbm(hw,
125 (u8) tmpval);
126 pstatus->rssi = rtlpriv->stats.signal_strength;
127
128 if (pstatus->is_cck)
129 return;
130
131 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
132 rfpath++) {
133 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
134 rtlpriv->stats.rx_rssi_percentage[rfpath] =
135 pstatus->rx_mimo_signalstrength[rfpath];
136
137 }
138 if (pstatus->rx_mimo_signalstrength[rfpath] >
139 rtlpriv->stats.rx_rssi_percentage[rfpath]) {
140 rtlpriv->stats.rx_rssi_percentage[rfpath] =
141 ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
142 (RX_SMOOTH_FACTOR - 1)) +
143 (pstatus->rx_mimo_signalstrength[rfpath])) /
144 (RX_SMOOTH_FACTOR);
145 rtlpriv->stats.rx_rssi_percentage[rfpath] =
146 rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
147 } else {
148 rtlpriv->stats.rx_rssi_percentage[rfpath] =
149 ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
150 (RX_SMOOTH_FACTOR - 1)) +
151 (pstatus->rx_mimo_signalstrength[rfpath])) /
152 (RX_SMOOTH_FACTOR);
153 }
154 }
155}
156
157static void rtl_update_rxsignalstatistics(struct ieee80211_hw *hw,
158 struct rtl_stats *pstatus)
159{
160 struct rtl_priv *rtlpriv = rtl_priv(hw);
161 int weighting = 0;
162
163 if (rtlpriv->stats.recv_signal_power == 0)
164 rtlpriv->stats.recv_signal_power = pstatus->recvsignalpower;
165 if (pstatus->recvsignalpower > rtlpriv->stats.recv_signal_power)
166 weighting = 5;
167 else if (pstatus->recvsignalpower < rtlpriv->stats.recv_signal_power)
168 weighting = (-5);
169 rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
170 5 + pstatus->recvsignalpower + weighting) / 6;
171}
172
173static void rtl_process_pwdb(struct ieee80211_hw *hw, struct rtl_stats *pstatus)
174{
175 struct rtl_priv *rtlpriv = rtl_priv(hw);
176 struct rtl_sta_info *drv_priv = NULL;
177 struct ieee80211_sta *sta = NULL;
178 long undec_sm_pwdb;
179
180 rcu_read_lock();
181 if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
182 sta = rtl_find_sta(hw, pstatus->psaddr);
183
184 /* adhoc or ap mode */
185 if (sta) {
186 drv_priv = (struct rtl_sta_info *) sta->drv_priv;
187 undec_sm_pwdb = drv_priv->rssi_stat.undec_sm_pwdb;
188 } else {
189 undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
190 }
191
192 if (undec_sm_pwdb < 0)
193 undec_sm_pwdb = pstatus->rx_pwdb_all;
194 if (pstatus->rx_pwdb_all > (u32) undec_sm_pwdb) {
195 undec_sm_pwdb = (((undec_sm_pwdb) *
196 (RX_SMOOTH_FACTOR - 1)) +
197 (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
198 undec_sm_pwdb = undec_sm_pwdb + 1;
199 } else {
200 undec_sm_pwdb = (((undec_sm_pwdb) * (RX_SMOOTH_FACTOR - 1)) +
201 (pstatus->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
202 }
203
204 if (sta) {
205 drv_priv->rssi_stat.undec_sm_pwdb = undec_sm_pwdb;
206 } else {
207 rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
208 }
209 rcu_read_unlock();
210
211 rtl_update_rxsignalstatistics(hw, pstatus);
212}
213
214static void rtl_process_ui_link_quality(struct ieee80211_hw *hw,
215 struct rtl_stats *pstatus)
216{
217 struct rtl_priv *rtlpriv = rtl_priv(hw);
218 u32 last_evm, n_stream, tmpval;
219
220 if (pstatus->signalquality == 0)
221 return;
222
223 if (rtlpriv->stats.ui_link_quality.total_num++ >=
224 PHY_LINKQUALITY_SLID_WIN_MAX) {
225 rtlpriv->stats.ui_link_quality.total_num =
226 PHY_LINKQUALITY_SLID_WIN_MAX;
227 last_evm = rtlpriv->stats.ui_link_quality.elements[
228 rtlpriv->stats.ui_link_quality.index];
229 rtlpriv->stats.ui_link_quality.total_val -= last_evm;
230 }
231 rtlpriv->stats.ui_link_quality.total_val += pstatus->signalquality;
232 rtlpriv->stats.ui_link_quality.elements[
233 rtlpriv->stats.ui_link_quality.index++] =
234 pstatus->signalquality;
235 if (rtlpriv->stats.ui_link_quality.index >=
236 PHY_LINKQUALITY_SLID_WIN_MAX)
237 rtlpriv->stats.ui_link_quality.index = 0;
238 tmpval = rtlpriv->stats.ui_link_quality.total_val /
239 rtlpriv->stats.ui_link_quality.total_num;
240 rtlpriv->stats.signal_quality = tmpval;
241 rtlpriv->stats.last_sigstrength_inpercent = tmpval;
242 for (n_stream = 0; n_stream < 2; n_stream++) {
243 if (pstatus->rx_mimo_sig_qual[n_stream] != -1) {
244 if (rtlpriv->stats.rx_evm_percentage[n_stream] == 0) {
245 rtlpriv->stats.rx_evm_percentage[n_stream] =
246 pstatus->rx_mimo_sig_qual[n_stream];
247 }
248 rtlpriv->stats.rx_evm_percentage[n_stream] =
249 ((rtlpriv->stats.rx_evm_percentage[n_stream]
250 * (RX_SMOOTH_FACTOR - 1)) +
251 (pstatus->rx_mimo_sig_qual[n_stream] * 1)) /
252 (RX_SMOOTH_FACTOR);
253 }
254 }
255}
256
257void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
258 struct rtl_stats *pstatus)
259{
260
261 if (!pstatus->packet_matchbssid)
262 return;
263
264 rtl_process_ui_rssi(hw, pstatus);
265 rtl_process_pwdb(hw, pstatus);
266 rtl_process_ui_link_quality(hw, pstatus);
267}
268EXPORT_SYMBOL(rtl_process_phyinfo);
diff --git a/drivers/net/wireless/rtlwifi/stats.h b/drivers/net/wireless/rtlwifi/stats.h
new file mode 100644
index 000000000000..0dbdc5203830
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/stats.h
@@ -0,0 +1,46 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2012 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL_STATS_H__
31#define __RTL_STATS_H__
32
33#define PHY_RSSI_SLID_WIN_MAX 100
34#define PHY_LINKQUALITY_SLID_WIN_MAX 20
35#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
36
37/* Rx smooth factor */
38#define RX_SMOOTH_FACTOR 20
39
40u8 rtl_query_rxpwrpercentage(char antpower);
41u8 rtl_evm_db_to_percentage(char value);
42long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig);
43void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer,
44 struct rtl_stats *pstatus);
45
46#endif
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index e3ea4b346889..29f0969e4ba0 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -940,7 +940,7 @@ static struct rtl_intf_ops rtl_usb_ops = {
940 .waitq_insert = rtl_usb_tx_chk_waitq_insert, 940 .waitq_insert = rtl_usb_tx_chk_waitq_insert,
941}; 941};
942 942
943int __devinit rtl_usb_probe(struct usb_interface *intf, 943int rtl_usb_probe(struct usb_interface *intf,
944 const struct usb_device_id *id) 944 const struct usb_device_id *id)
945{ 945{
946 int err; 946 int err;
diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h
index 43846b329153..5235136f6dd2 100644
--- a/drivers/net/wireless/rtlwifi/usb.h
+++ b/drivers/net/wireless/rtlwifi/usb.h
@@ -156,7 +156,7 @@ struct rtl_usb_priv {
156 156
157 157
158 158
159int __devinit rtl_usb_probe(struct usb_interface *intf, 159int rtl_usb_probe(struct usb_interface *intf,
160 const struct usb_device_id *id); 160 const struct usb_device_id *id);
161void rtl_usb_disconnect(struct usb_interface *intf); 161void rtl_usb_disconnect(struct usb_interface *intf);
162int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message); 162int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index f1b6bc693b0a..21a5f4f4a135 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -198,15 +198,15 @@ struct bb_reg_def {
198 u32 rftxgain_stage; 198 u32 rftxgain_stage;
199 u32 rfhssi_para1; 199 u32 rfhssi_para1;
200 u32 rfhssi_para2; 200 u32 rfhssi_para2;
201 u32 rfswitch_control; 201 u32 rfsw_ctrl;
202 u32 rfagc_control1; 202 u32 rfagc_control1;
203 u32 rfagc_control2; 203 u32 rfagc_control2;
204 u32 rfrxiq_imbalance; 204 u32 rfrxiq_imbal;
205 u32 rfrx_afe; 205 u32 rfrx_afe;
206 u32 rftxiq_imbalance; 206 u32 rftxiq_imbal;
207 u32 rftx_afe; 207 u32 rftx_afe;
208 u32 rflssi_readback; 208 u32 rf_rb; /* rflssi_readback */
209 u32 rflssi_readbackpi; 209 u32 rf_rbpi; /* rflssi_readbackpi */
210}; 210};
211 211
212enum io_type { 212enum io_type {
@@ -350,6 +350,11 @@ enum rt_oem_id {
350 RT_CID_819x_WNC_COREGA = 31, 350 RT_CID_819x_WNC_COREGA = 31,
351 RT_CID_819x_Foxcoon = 32, 351 RT_CID_819x_Foxcoon = 32,
352 RT_CID_819x_DELL = 33, 352 RT_CID_819x_DELL = 33,
353 RT_CID_819x_PRONETS = 34,
354 RT_CID_819x_Edimax_ASUS = 35,
355 RT_CID_NETGEAR = 36,
356 RT_CID_PLANEX = 37,
357 RT_CID_CC_C = 38,
353}; 358};
354 359
355enum hw_descs { 360enum hw_descs {
@@ -505,6 +510,7 @@ enum rtl_var_map {
505 RTL_IMR_ROK, /*Receive DMA OK Interrupt */ 510 RTL_IMR_ROK, /*Receive DMA OK Interrupt */
506 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK | 511 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK |
507 * RTL_IMR_TBDER) */ 512 * RTL_IMR_TBDER) */
513 RTL_IMR_C2HCMD, /*fw interrupt*/
508 514
509 /*CCK Rates, TxHT = 0 */ 515 /*CCK Rates, TxHT = 0 */
510 RTL_RC_CCK_RATE1M, 516 RTL_RC_CCK_RATE1M,
@@ -661,6 +667,11 @@ enum ba_action {
661 ACT_DELBA = 2, 667 ACT_DELBA = 2,
662}; 668};
663 669
670enum rt_polarity_ctl {
671 RT_POLARITY_LOW_ACT = 0,
672 RT_POLARITY_HIGH_ACT = 1,
673};
674
664struct octet_string { 675struct octet_string {
665 u8 *octet; 676 u8 *octet;
666 u16 length; 677 u16 length;
@@ -885,7 +896,7 @@ struct rtl_phy {
885 u8 pwrgroup_cnt; 896 u8 pwrgroup_cnt;
886 u8 cck_high_power; 897 u8 cck_high_power;
887 /* MAX_PG_GROUP groups of pwr diff by rates */ 898 /* MAX_PG_GROUP groups of pwr diff by rates */
888 u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16]; 899 u32 mcs_offset[MAX_PG_GROUP][16];
889 u8 default_initialgain[4]; 900 u8 default_initialgain[4];
890 901
891 /* the current Tx power level */ 902 /* the current Tx power level */
@@ -903,6 +914,8 @@ struct rtl_phy {
903 u8 num_total_rfpath; 914 u8 num_total_rfpath;
904 struct phy_parameters hwparam_tables[MAX_TAB]; 915 struct phy_parameters hwparam_tables[MAX_TAB];
905 u16 rf_pathmap; 916 u16 rf_pathmap;
917
918 enum rt_polarity_ctl polarity_ctl;
906}; 919};
907 920
908#define MAX_TID_COUNT 9 921#define MAX_TID_COUNT 9
@@ -933,7 +946,7 @@ struct rtl_tid_data {
933}; 946};
934 947
935struct rssi_sta { 948struct rssi_sta {
936 long undecorated_smoothed_pwdb; 949 long undec_sm_pwdb;
937}; 950};
938 951
939struct rtl_sta_info { 952struct rtl_sta_info {
@@ -1042,13 +1055,64 @@ struct rtl_mac {
1042 /*QOS & EDCA */ 1055 /*QOS & EDCA */
1043 struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE]; 1056 struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE];
1044 struct rtl_qos_parameters ac[AC_MAX]; 1057 struct rtl_qos_parameters ac[AC_MAX];
1058
1059 /* counters */
1060 u64 last_txok_cnt;
1061 u64 last_rxok_cnt;
1062 u32 last_bt_edca_ul;
1063 u32 last_bt_edca_dl;
1064};
1065
1066struct btdm_8723 {
1067 bool all_off;
1068 bool agc_table_en;
1069 bool adc_back_off_on;
1070 bool b2_ant_hid_en;
1071 bool low_penalty_rate_adaptive;
1072 bool rf_rx_lpf_shrink;
1073 bool reject_aggre_pkt;
1074 bool tra_tdma_on;
1075 u8 tra_tdma_nav;
1076 u8 tra_tdma_ant;
1077 bool tdma_on;
1078 u8 tdma_ant;
1079 u8 tdma_nav;
1080 u8 tdma_dac_swing;
1081 u8 fw_dac_swing_lvl;
1082 bool ps_tdma_on;
1083 u8 ps_tdma_byte[5];
1084 bool pta_on;
1085 u32 val_0x6c0;
1086 u32 val_0x6c8;
1087 u32 val_0x6cc;
1088 bool sw_dac_swing_on;
1089 u32 sw_dac_swing_lvl;
1090 u32 wlan_act_hi;
1091 u32 wlan_act_lo;
1092 u32 bt_retry_index;
1093 bool dec_bt_pwr;
1094 bool ignore_wlan_act;
1095};
1096
1097struct bt_coexist_8723 {
1098 u32 high_priority_tx;
1099 u32 high_priority_rx;
1100 u32 low_priority_tx;
1101 u32 low_priority_rx;
1102 u8 c2h_bt_info;
1103 bool c2h_bt_info_req_sent;
1104 bool c2h_bt_inquiry_page;
1105 u32 bt_inq_page_start_time;
1106 u8 bt_retry_cnt;
1107 u8 c2h_bt_info_original;
1108 u8 bt_inquiry_page_cnt;
1109 struct btdm_8723 btdm;
1045}; 1110};
1046 1111
1047struct rtl_hal { 1112struct rtl_hal {
1048 struct ieee80211_hw *hw; 1113 struct ieee80211_hw *hw;
1049 1114 struct bt_coexist_8723 hal_coex_8723;
1050 bool up_first_time; 1115 bool up_first_time;
1051 bool first_init;
1052 bool being_init_adapter; 1116 bool being_init_adapter;
1053 bool bbrf_ready; 1117 bool bbrf_ready;
1054 1118
@@ -1131,9 +1195,9 @@ struct rtl_security {
1131 1195
1132struct rtl_dm { 1196struct rtl_dm {
1133 /*PHY status for Dynamic Management */ 1197 /*PHY status for Dynamic Management */
1134 long entry_min_undecoratedsmoothed_pwdb; 1198 long entry_min_undec_sm_pwdb;
1135 long undecorated_smoothed_pwdb; /*out dm */ 1199 long undec_sm_pwdb; /*out dm */
1136 long entry_max_undecoratedsmoothed_pwdb; 1200 long entry_max_undec_sm_pwdb;
1137 bool dm_initialgain_enable; 1201 bool dm_initialgain_enable;
1138 bool dynamic_txpower_enable; 1202 bool dynamic_txpower_enable;
1139 bool current_turbo_edca; 1203 bool current_turbo_edca;
@@ -1209,7 +1273,7 @@ struct rtl_efuse {
1209 u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX]; 1273 u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX];
1210 u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G]; 1274 u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G];
1211 u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX]; 1275 u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX];
1212 u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX]; 1276 u8 eprom_chnl_txpwr_ht40_2sdf[2][CHANNEL_GROUP_MAX];
1213 u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G]; 1277 u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G];
1214 u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */ 1278 u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
1215 u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */ 1279 u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */
@@ -1312,6 +1376,7 @@ struct rtl_ps_ctl {
1312}; 1376};
1313 1377
1314struct rtl_stats { 1378struct rtl_stats {
1379 u8 psaddr[ETH_ALEN];
1315 u32 mac_time[2]; 1380 u32 mac_time[2];
1316 s8 rssi; 1381 s8 rssi;
1317 u8 signal; 1382 u8 signal;
@@ -1351,7 +1416,7 @@ struct rtl_stats {
1351 bool rx_is40Mhzpacket; 1416 bool rx_is40Mhzpacket;
1352 u32 rx_pwdb_all; 1417 u32 rx_pwdb_all;
1353 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */ 1418 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
1354 s8 rx_mimo_signalquality[2]; 1419 s8 rx_mimo_sig_qual[2];
1355 bool packet_matchbssid; 1420 bool packet_matchbssid;
1356 bool is_cck; 1421 bool is_cck;
1357 bool is_ht; 1422 bool is_ht;
@@ -1503,6 +1568,10 @@ struct rtl_hal_ops {
1503 void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t); 1568 void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t);
1504 void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw); 1569 void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw);
1505 void (*dm_dynamic_txpower) (struct ieee80211_hw *hw); 1570 void (*dm_dynamic_txpower) (struct ieee80211_hw *hw);
1571 void (*c2h_command_handle) (struct ieee80211_hw *hw);
1572 void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw,
1573 bool mstate);
1574 void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw);
1506}; 1575};
1507 1576
1508struct rtl_intf_ops { 1577struct rtl_intf_ops {
@@ -1679,7 +1748,7 @@ struct dig_t {
1679 u32 rssi_highthresh; 1748 u32 rssi_highthresh;
1680 u32 fa_lowthresh; 1749 u32 fa_lowthresh;
1681 u32 fa_highthresh; 1750 u32 fa_highthresh;
1682 long last_min_undecorated_pwdb_for_dm; 1751 long last_min_undec_pwdb_for_dm;
1683 long rssi_highpower_lowthresh; 1752 long rssi_highpower_lowthresh;
1684 long rssi_highpower_highthresh; 1753 long rssi_highpower_highthresh;
1685 u32 recover_cnt; 1754 u32 recover_cnt;
@@ -1692,15 +1761,15 @@ struct dig_t {
1692 u8 dig_twoport_algorithm; 1761 u8 dig_twoport_algorithm;
1693 u8 dig_dbgmode; 1762 u8 dig_dbgmode;
1694 u8 dig_slgorithm_switch; 1763 u8 dig_slgorithm_switch;
1695 u8 cursta_connectstate; 1764 u8 cursta_cstate;
1696 u8 presta_connectstate; 1765 u8 presta_cstate;
1697 u8 curmultista_connectstate; 1766 u8 curmultista_cstate;
1698 char backoff_val; 1767 char back_val;
1699 char backoff_val_range_max; 1768 char back_range_max;
1700 char backoff_val_range_min; 1769 char back_range_min;
1701 u8 rx_gain_range_max; 1770 u8 rx_gain_range_max;
1702 u8 rx_gain_range_min; 1771 u8 rx_gain_range_min;
1703 u8 min_undecorated_pwdb_for_dm; 1772 u8 min_undec_pwdb_for_dm;
1704 u8 rssi_val_min; 1773 u8 rssi_val_min;
1705 u8 pre_cck_pd_state; 1774 u8 pre_cck_pd_state;
1706 u8 cur_cck_pd_state; 1775 u8 cur_cck_pd_state;
@@ -1712,10 +1781,10 @@ struct dig_t {
1712 u8 forbidden_igi; 1781 u8 forbidden_igi;
1713 u8 dig_state; 1782 u8 dig_state;
1714 u8 dig_highpwrstate; 1783 u8 dig_highpwrstate;
1715 u8 cur_sta_connectstate; 1784 u8 cur_sta_cstate;
1716 u8 pre_sta_connectstate; 1785 u8 pre_sta_cstate;
1717 u8 cur_ap_connectstate; 1786 u8 cur_ap_cstate;
1718 u8 pre_ap_connectstate; 1787 u8 pre_ap_cstate;
1719 u8 cur_pd_thstate; 1788 u8 cur_pd_thstate;
1720 u8 pre_pd_thstate; 1789 u8 pre_pd_thstate;
1721 u8 cur_cs_ratiostate; 1790 u8 cur_cs_ratiostate;
@@ -1781,9 +1850,22 @@ struct rtl_priv {
1781 struct dig_t dm_digtable; 1850 struct dig_t dm_digtable;
1782 struct ps_t dm_pstable; 1851 struct ps_t dm_pstable;
1783 1852
1784 /* data buffer pointer for USB reads */ 1853 /* section shared by individual drivers */
1785 __le32 *usb_data; 1854 union {
1786 int usb_data_index; 1855 struct { /* data buffer pointer for USB reads */
1856 __le32 *usb_data;
1857 int usb_data_index;
1858 bool initialized;
1859 };
1860 struct { /* section for 8723ae */
1861 bool reg_init; /* true if regs saved */
1862 u32 reg_874;
1863 u32 reg_c70;
1864 u32 reg_85c;
1865 u32 reg_a74;
1866 bool bt_operation_on;
1867 };
1868 };
1787 1869
1788 /*This must be the last item so 1870 /*This must be the last item so
1789 that it points to the data allocated 1871 that it points to the data allocated
@@ -1815,6 +1897,7 @@ enum bt_co_type {
1815 BT_CSR_BC4 = 3, 1897 BT_CSR_BC4 = 3,
1816 BT_CSR_BC8 = 4, 1898 BT_CSR_BC8 = 4,
1817 BT_RTL8756 = 5, 1899 BT_RTL8756 = 5,
1900 BT_RTL8723A = 6,
1818}; 1901};
1819 1902
1820enum bt_cur_state { 1903enum bt_cur_state {
@@ -1846,7 +1929,7 @@ struct bt_coexist_info {
1846 u8 eeprom_bt_coexist; 1929 u8 eeprom_bt_coexist;
1847 u8 eeprom_bt_type; 1930 u8 eeprom_bt_type;
1848 u8 eeprom_bt_ant_num; 1931 u8 eeprom_bt_ant_num;
1849 u8 eeprom_bt_ant_isolation; 1932 u8 eeprom_bt_ant_isol;
1850 u8 eeprom_bt_radio_shared; 1933 u8 eeprom_bt_radio_shared;
1851 1934
1852 u8 bt_coexistence; 1935 u8 bt_coexistence;
@@ -1873,13 +1956,27 @@ struct bt_coexist_info {
1873 1956
1874 bool fw_coexist_all_off; 1957 bool fw_coexist_all_off;
1875 bool sw_coexist_all_off; 1958 bool sw_coexist_all_off;
1876 u32 current_state; 1959 bool hw_coexist_all_off;
1960 u32 cstate;
1877 u32 previous_state; 1961 u32 previous_state;
1962 u32 cstate_h;
1963 u32 previous_state_h;
1964
1878 u8 bt_pre_rssi_state; 1965 u8 bt_pre_rssi_state;
1966 u8 bt_pre_rssi_state1;
1879 1967
1880 u8 reg_bt_iso; 1968 u8 reg_bt_iso;
1881 u8 reg_bt_sco; 1969 u8 reg_bt_sco;
1882 1970 bool balance_on;
1971 u8 bt_active_zero_cnt;
1972 bool cur_bt_disabled;
1973 bool pre_bt_disabled;
1974
1975 u8 bt_profile_case;
1976 u8 bt_profile_action;
1977 bool bt_busy;
1978 bool hold_for_bt_operation;
1979 u8 lps_counter;
1883}; 1980};
1884 1981
1885 1982
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 441cbccbd381..f47e8b0482ad 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -896,11 +896,13 @@ static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
896 goto out; 896 goto out;
897 897
898 skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len, 898 skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
899 req->ie, req->ie_len); 899 req->ie_len);
900 if (!skb) { 900 if (!skb) {
901 ret = -ENOMEM; 901 ret = -ENOMEM;
902 goto out; 902 goto out;
903 } 903 }
904 if (req->ie_len)
905 memcpy(skb_put(skb, req->ie_len), req->ie, req->ie_len);
904 906
905 ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data, 907 ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
906 skb->len); 908 skb->len);
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 6af35265c900..23289d49dd31 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -81,7 +81,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
81 status->freq = ieee80211_channel_to_frequency(desc->channel, 81 status->freq = ieee80211_channel_to_frequency(desc->channel,
82 status->band); 82 status->band);
83 83
84 status->flag |= RX_FLAG_MACTIME_MPDU; 84 status->flag |= RX_FLAG_MACTIME_START;
85 85
86 if (desc->flags & RX_DESC_ENCRYPTION_MASK) { 86 if (desc->flags & RX_DESC_ENCRYPTION_MASK) {
87 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 87 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index e2750a12c6f1..e57ee48edff6 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -305,7 +305,7 @@ out_free_hw:
305 return ret; 305 return ret;
306} 306}
307 307
308static void __devexit wl1251_sdio_remove(struct sdio_func *func) 308static void wl1251_sdio_remove(struct sdio_func *func)
309{ 309{
310 struct wl1251 *wl = sdio_get_drvdata(func); 310 struct wl1251 *wl = sdio_get_drvdata(func);
311 struct wl1251_sdio *wl_sdio = wl->if_priv; 311 struct wl1251_sdio *wl_sdio = wl->if_priv;
@@ -347,7 +347,7 @@ static struct sdio_driver wl1251_sdio_driver = {
347 .name = "wl1251_sdio", 347 .name = "wl1251_sdio",
348 .id_table = wl1251_devices, 348 .id_table = wl1251_devices,
349 .probe = wl1251_sdio_probe, 349 .probe = wl1251_sdio_probe,
350 .remove = __devexit_p(wl1251_sdio_remove), 350 .remove = wl1251_sdio_remove,
351 .drv.pm = &wl1251_sdio_pm_ops, 351 .drv.pm = &wl1251_sdio_pm_ops,
352}; 352};
353 353
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 567660cd2fcd..3b266d3231a3 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -237,7 +237,7 @@ static const struct wl1251_if_operations wl1251_spi_ops = {
237 .power = wl1251_spi_set_power, 237 .power = wl1251_spi_set_power,
238}; 238};
239 239
240static int __devinit wl1251_spi_probe(struct spi_device *spi) 240static int wl1251_spi_probe(struct spi_device *spi)
241{ 241{
242 struct wl12xx_platform_data *pdata; 242 struct wl12xx_platform_data *pdata;
243 struct ieee80211_hw *hw; 243 struct ieee80211_hw *hw;
@@ -309,7 +309,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
309 return ret; 309 return ret;
310} 310}
311 311
312static int __devexit wl1251_spi_remove(struct spi_device *spi) 312static int wl1251_spi_remove(struct spi_device *spi)
313{ 313{
314 struct wl1251 *wl = dev_get_drvdata(&spi->dev); 314 struct wl1251 *wl = dev_get_drvdata(&spi->dev);
315 315
@@ -326,7 +326,7 @@ static struct spi_driver wl1251_spi_driver = {
326 }, 326 },
327 327
328 .probe = wl1251_spi_probe, 328 .probe = wl1251_spi_probe,
329 .remove = __devexit_p(wl1251_spi_remove), 329 .remove = wl1251_spi_remove,
330}; 330};
331 331
332static int __init wl1251_spi_init(void) 332static int __init wl1251_spi_init(void)
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index dadf1dbb002a..e5f5f8f39144 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1696,7 +1696,7 @@ static int wl12xx_setup(struct wl1271 *wl)
1696 return 0; 1696 return 0;
1697} 1697}
1698 1698
1699static int __devinit wl12xx_probe(struct platform_device *pdev) 1699static int wl12xx_probe(struct platform_device *pdev)
1700{ 1700{
1701 struct wl1271 *wl; 1701 struct wl1271 *wl;
1702 struct ieee80211_hw *hw; 1702 struct ieee80211_hw *hw;
@@ -1725,7 +1725,7 @@ out:
1725 return ret; 1725 return ret;
1726} 1726}
1727 1727
1728static const struct platform_device_id wl12xx_id_table[] __devinitconst = { 1728static const struct platform_device_id wl12xx_id_table[] = {
1729 { "wl12xx", 0 }, 1729 { "wl12xx", 0 },
1730 { } /* Terminating Entry */ 1730 { } /* Terminating Entry */
1731}; 1731};
@@ -1733,7 +1733,7 @@ MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
1733 1733
1734static struct platform_driver wl12xx_driver = { 1734static struct platform_driver wl12xx_driver = {
1735 .probe = wl12xx_probe, 1735 .probe = wl12xx_probe,
1736 .remove = __devexit_p(wlcore_remove), 1736 .remove = wlcore_remove,
1737 .id_table = wl12xx_id_table, 1737 .id_table = wl12xx_id_table,
1738 .driver = { 1738 .driver = {
1739 .name = "wl12xx_driver", 1739 .name = "wl12xx_driver",
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index a39682a7c25f..8d8c1f8c63b7 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1499,7 +1499,7 @@ static int wl18xx_setup(struct wl1271 *wl)
1499 return 0; 1499 return 0;
1500} 1500}
1501 1501
1502static int __devinit wl18xx_probe(struct platform_device *pdev) 1502static int wl18xx_probe(struct platform_device *pdev)
1503{ 1503{
1504 struct wl1271 *wl; 1504 struct wl1271 *wl;
1505 struct ieee80211_hw *hw; 1505 struct ieee80211_hw *hw;
@@ -1528,7 +1528,7 @@ out:
1528 return ret; 1528 return ret;
1529} 1529}
1530 1530
1531static const struct platform_device_id wl18xx_id_table[] __devinitconst = { 1531static const struct platform_device_id wl18xx_id_table[] = {
1532 { "wl18xx", 0 }, 1532 { "wl18xx", 0 },
1533 { } /* Terminating Entry */ 1533 { } /* Terminating Entry */
1534}; 1534};
@@ -1536,7 +1536,7 @@ MODULE_DEVICE_TABLE(platform, wl18xx_id_table);
1536 1536
1537static struct platform_driver wl18xx_driver = { 1537static struct platform_driver wl18xx_driver = {
1538 .probe = wl18xx_probe, 1538 .probe = wl18xx_probe,
1539 .remove = __devexit_p(wlcore_remove), 1539 .remove = wlcore_remove,
1540 .id_table = wl18xx_id_table, 1540 .id_table = wl18xx_id_table,
1541 .driver = { 1541 .driver = {
1542 .name = "wl18xx_driver", 1542 .name = "wl18xx_driver",
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index eaef3f41b252..27f83f72a93b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1038,11 +1038,13 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1038 u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5; 1038 u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
1039 1039
1040 skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len, 1040 skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
1041 ie, ie_len); 1041 ie_len);
1042 if (!skb) { 1042 if (!skb) {
1043 ret = -ENOMEM; 1043 ret = -ENOMEM;
1044 goto out; 1044 goto out;
1045 } 1045 }
1046 if (ie_len)
1047 memcpy(skb_put(skb, ie_len), ie, ie_len);
1046 1048
1047 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len); 1049 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
1048 1050
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 25530c8760cb..ea9d8e011bc9 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -677,7 +677,7 @@ static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
677 memset(data, 0, sizeof(*data)); 677 memset(data, 0, sizeof(*data));
678 data->cur_vif = cur_vif; 678 data->cur_vif = cur_vif;
679 679
680 ieee80211_iterate_active_interfaces(hw, 680 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
681 wl12xx_vif_count_iter, data); 681 wl12xx_vif_count_iter, data);
682} 682}
683 683
@@ -3791,7 +3791,7 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3791 3791
3792 /* Handle HT information change */ 3792 /* Handle HT information change */
3793 if ((changed & BSS_CHANGED_HT) && 3793 if ((changed & BSS_CHANGED_HT) &&
3794 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { 3794 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3795 ret = wl1271_acx_set_ht_information(wl, wlvif, 3795 ret = wl1271_acx_set_ht_information(wl, wlvif,
3796 bss_conf->ht_operation_mode); 3796 bss_conf->ht_operation_mode);
3797 if (ret < 0) { 3797 if (ret < 0) {
@@ -3905,7 +3905,8 @@ sta_not_found:
3905 u32 rates; 3905 u32 rates;
3906 int ieoffset; 3906 int ieoffset;
3907 wlvif->aid = bss_conf->aid; 3907 wlvif->aid = bss_conf->aid;
3908 wlvif->channel_type = bss_conf->channel_type; 3908 wlvif->channel_type =
3909 cfg80211_get_chandef_type(&bss_conf->chandef);
3909 wlvif->beacon_int = bss_conf->beacon_int; 3910 wlvif->beacon_int = bss_conf->beacon_int;
3910 do_join = true; 3911 do_join = true;
3911 set_assoc = true; 3912 set_assoc = true;
@@ -4071,7 +4072,7 @@ sta_not_found:
4071 /* Handle new association with HT. Do this after join. */ 4072 /* Handle new association with HT. Do this after join. */
4072 if (sta_exists) { 4073 if (sta_exists) {
4073 if ((changed & BSS_CHANGED_HT) && 4074 if ((changed & BSS_CHANGED_HT) &&
4074 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { 4075 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4075 ret = wl1271_acx_set_ht_capabilities(wl, 4076 ret = wl1271_acx_set_ht_capabilities(wl,
4076 &sta_ht_cap, 4077 &sta_ht_cap,
4077 true, 4078 true,
@@ -4098,7 +4099,7 @@ sta_not_found:
4098 4099
4099 /* Handle HT information change. Done after join. */ 4100 /* Handle HT information change. Done after join. */
4100 if ((changed & BSS_CHANGED_HT) && 4101 if ((changed & BSS_CHANGED_HT) &&
4101 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { 4102 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4102 ret = wl1271_acx_set_ht_information(wl, wlvif, 4103 ret = wl1271_acx_set_ht_information(wl, wlvif,
4103 bss_conf->ht_operation_mode); 4104 bss_conf->ht_operation_mode);
4104 if (ret < 0) { 4105 if (ret < 0) {
@@ -5659,7 +5660,7 @@ out:
5659 complete_all(&wl->nvs_loading_complete); 5660 complete_all(&wl->nvs_loading_complete);
5660} 5661}
5661 5662
5662int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) 5663int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5663{ 5664{
5664 int ret; 5665 int ret;
5665 5666
@@ -5682,7 +5683,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5682} 5683}
5683EXPORT_SYMBOL_GPL(wlcore_probe); 5684EXPORT_SYMBOL_GPL(wlcore_probe);
5684 5685
5685int __devexit wlcore_remove(struct platform_device *pdev) 5686int wlcore_remove(struct platform_device *pdev)
5686{ 5687{
5687 struct wl1271 *wl = platform_get_drvdata(pdev); 5688 struct wl1271 *wl = platform_get_drvdata(pdev);
5688 5689
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 73ace4b2604e..646f703ae739 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -54,7 +54,7 @@ struct wl12xx_sdio_glue {
54 struct platform_device *core; 54 struct platform_device *core;
55}; 55};
56 56
57static const struct sdio_device_id wl1271_devices[] __devinitconst = { 57static const struct sdio_device_id wl1271_devices[] = {
58 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) }, 58 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
59 {} 59 {}
60}; 60};
@@ -214,7 +214,7 @@ static struct wl1271_if_operations sdio_ops = {
214 .set_block_size = wl1271_sdio_set_block_size, 214 .set_block_size = wl1271_sdio_set_block_size,
215}; 215};
216 216
217static int __devinit wl1271_probe(struct sdio_func *func, 217static int wl1271_probe(struct sdio_func *func,
218 const struct sdio_device_id *id) 218 const struct sdio_device_id *id)
219{ 219{
220 struct wl12xx_platform_data *wlan_data; 220 struct wl12xx_platform_data *wlan_data;
@@ -319,7 +319,7 @@ out:
319 return ret; 319 return ret;
320} 320}
321 321
322static void __devexit wl1271_remove(struct sdio_func *func) 322static void wl1271_remove(struct sdio_func *func)
323{ 323{
324 struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func); 324 struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
325 325
@@ -384,7 +384,7 @@ static struct sdio_driver wl1271_sdio_driver = {
384 .name = "wl1271_sdio", 384 .name = "wl1271_sdio",
385 .id_table = wl1271_devices, 385 .id_table = wl1271_devices,
386 .probe = wl1271_probe, 386 .probe = wl1271_probe,
387 .remove = __devexit_p(wl1271_remove), 387 .remove = wl1271_remove,
388#ifdef CONFIG_PM 388#ifdef CONFIG_PM
389 .drv = { 389 .drv = {
390 .pm = &wl1271_sdio_pm_ops, 390 .pm = &wl1271_sdio_pm_ops,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index a519bc3adec1..f06f4770ce02 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -324,7 +324,7 @@ static struct wl1271_if_operations spi_ops = {
324 .set_block_size = NULL, 324 .set_block_size = NULL,
325}; 325};
326 326
327static int __devinit wl1271_probe(struct spi_device *spi) 327static int wl1271_probe(struct spi_device *spi)
328{ 328{
329 struct wl12xx_spi_glue *glue; 329 struct wl12xx_spi_glue *glue;
330 struct wl12xx_platform_data *pdata; 330 struct wl12xx_platform_data *pdata;
@@ -403,7 +403,7 @@ out:
403 return ret; 403 return ret;
404} 404}
405 405
406static int __devexit wl1271_remove(struct spi_device *spi) 406static int wl1271_remove(struct spi_device *spi)
407{ 407{
408 struct wl12xx_spi_glue *glue = spi_get_drvdata(spi); 408 struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
409 409
@@ -422,7 +422,7 @@ static struct spi_driver wl1271_spi_driver = {
422 }, 422 },
423 423
424 .probe = wl1271_probe, 424 .probe = wl1271_probe,
425 .remove = __devexit_p(wl1271_remove), 425 .remove = wl1271_remove,
426}; 426};
427 427
428static int __init wl1271_init(void) 428static int __init wl1271_init(void)
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 68584aa0f2b0..c3884937c007 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -414,8 +414,8 @@ struct wl1271 {
414 struct completion nvs_loading_complete; 414 struct completion nvs_loading_complete;
415}; 415};
416 416
417int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev); 417int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
418int __devexit wlcore_remove(struct platform_device *pdev); 418int wlcore_remove(struct platform_device *pdev);
419struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size); 419struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size);
420int wlcore_free_hw(struct wl1271 *wl); 420int wlcore_free_hw(struct wl1271 *wl);
421int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, 421int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index fc24eb9b3948..c26e28b4bd9f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1311,7 +1311,7 @@ static const struct net_device_ops xennet_netdev_ops = {
1311#endif 1311#endif
1312}; 1312};
1313 1313
1314static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1314static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1315{ 1315{
1316 int i, err; 1316 int i, err;
1317 struct net_device *netdev; 1317 struct net_device *netdev;
@@ -1407,8 +1407,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1407 * structures and the ring buffers for communication with the backend, and 1407 * structures and the ring buffers for communication with the backend, and
1408 * inform the backend of the appropriate details for those. 1408 * inform the backend of the appropriate details for those.
1409 */ 1409 */
1410static int __devinit netfront_probe(struct xenbus_device *dev, 1410static int netfront_probe(struct xenbus_device *dev,
1411 const struct xenbus_device_id *id) 1411 const struct xenbus_device_id *id)
1412{ 1412{
1413 int err; 1413 int err;
1414 struct net_device *netdev; 1414 struct net_device *netdev;
@@ -1967,7 +1967,7 @@ static const struct xenbus_device_id netfront_ids[] = {
1967}; 1967};
1968 1968
1969 1969
1970static int __devexit xennet_remove(struct xenbus_device *dev) 1970static int xennet_remove(struct xenbus_device *dev)
1971{ 1971{
1972 struct netfront_info *info = dev_get_drvdata(&dev->dev); 1972 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1973 1973
@@ -1990,7 +1990,7 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
1990 1990
1991static DEFINE_XENBUS_DRIVER(netfront, , 1991static DEFINE_XENBUS_DRIVER(netfront, ,
1992 .probe = netfront_probe, 1992 .probe = netfront_probe,
1993 .remove = __devexit_p(xennet_remove), 1993 .remove = xennet_remove,
1994 .resume = netfront_resume, 1994 .resume = netfront_resume,
1995 .otherend_changed = netback_changed, 1995 .otherend_changed = netback_changed,
1996); 1996);
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index bf05831fdf09..36c359043f54 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -2,7 +2,7 @@
2# Makefile for nfc devices 2# Makefile for nfc devices
3# 3#
4 4
5obj-$(CONFIG_PN544_HCI_NFC) += pn544_hci.o 5obj-$(CONFIG_PN544_HCI_NFC) += pn544/
6obj-$(CONFIG_NFC_PN533) += pn533.o 6obj-$(CONFIG_NFC_PN533) += pn533.o
7obj-$(CONFIG_NFC_WILINK) += nfcwilink.o 7obj-$(CONFIG_NFC_WILINK) += nfcwilink.o
8 8
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 30ae18a03a9c..ada681b01a17 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -84,6 +84,10 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
84#define PN533_LISTEN_TIME 2 84#define PN533_LISTEN_TIME 2
85 85
86/* frame definitions */ 86/* frame definitions */
87#define PN533_NORMAL_FRAME_MAX_LEN 262 /* 6 (PREAMBLE, SOF, LEN, LCS, TFI)
88 254 (DATA)
89 2 (DCS, postamble) */
90
87#define PN533_FRAME_TAIL_SIZE 2 91#define PN533_FRAME_TAIL_SIZE 2
88#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \ 92#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
89 PN533_FRAME_TAIL_SIZE) 93 PN533_FRAME_TAIL_SIZE)
@@ -1166,8 +1170,7 @@ static void pn533_poll_create_mod_list(struct pn533 *dev,
1166 pn533_poll_add_mod(dev, PN533_LISTEN_MOD); 1170 pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
1167} 1171}
1168 1172
1169static int pn533_start_poll_complete(struct pn533 *dev, void *arg, 1173static int pn533_start_poll_complete(struct pn533 *dev, u8 *params, int params_len)
1170 u8 *params, int params_len)
1171{ 1174{
1172 struct pn533_poll_response *resp; 1175 struct pn533_poll_response *resp;
1173 int rc; 1176 int rc;
@@ -1305,8 +1308,7 @@ static void pn533_wq_tg_get_data(struct work_struct *work)
1305} 1308}
1306 1309
1307#define ATR_REQ_GB_OFFSET 17 1310#define ATR_REQ_GB_OFFSET 17
1308static int pn533_init_target_complete(struct pn533 *dev, void *arg, 1311static int pn533_init_target_complete(struct pn533 *dev, u8 *params, int params_len)
1309 u8 *params, int params_len)
1310{ 1312{
1311 struct pn533_cmd_init_target_response *resp; 1313 struct pn533_cmd_init_target_response *resp;
1312 u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb; 1314 u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb;
@@ -1403,9 +1405,9 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
1403 if (cur_mod->len == 0) { 1405 if (cur_mod->len == 0) {
1404 del_timer(&dev->listen_timer); 1406 del_timer(&dev->listen_timer);
1405 1407
1406 return pn533_init_target_complete(dev, arg, params, params_len); 1408 return pn533_init_target_complete(dev, params, params_len);
1407 } else { 1409 } else {
1408 rc = pn533_start_poll_complete(dev, arg, params, params_len); 1410 rc = pn533_start_poll_complete(dev, params, params_len);
1409 if (!rc) 1411 if (!rc)
1410 return rc; 1412 return rc;
1411 } 1413 }
@@ -2376,9 +2378,9 @@ static int pn533_probe(struct usb_interface *interface,
2376 goto error; 2378 goto error;
2377 } 2379 }
2378 2380
2379 dev->in_frame = kmalloc(dev->in_maxlen, GFP_KERNEL); 2381 dev->in_frame = kmalloc(PN533_NORMAL_FRAME_MAX_LEN, GFP_KERNEL);
2380 dev->in_urb = usb_alloc_urb(0, GFP_KERNEL); 2382 dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
2381 dev->out_frame = kmalloc(dev->out_maxlen, GFP_KERNEL); 2383 dev->out_frame = kmalloc(PN533_NORMAL_FRAME_MAX_LEN, GFP_KERNEL);
2382 dev->out_urb = usb_alloc_urb(0, GFP_KERNEL); 2384 dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
2383 2385
2384 if (!dev->in_frame || !dev->out_frame || 2386 if (!dev->in_frame || !dev->out_frame ||
diff --git a/drivers/nfc/pn544/Makefile b/drivers/nfc/pn544/Makefile
new file mode 100644
index 000000000000..725733881eb3
--- /dev/null
+++ b/drivers/nfc/pn544/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for PN544 HCI based NFC driver
3#
4
5obj-$(CONFIG_PN544_HCI_NFC) += pn544_i2c.o
6
7pn544_i2c-y := pn544.o i2c.o
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
new file mode 100644
index 000000000000..7da9071b68b6
--- /dev/null
+++ b/drivers/nfc/pn544/i2c.c
@@ -0,0 +1,500 @@
1/*
2 * I2C Link Layer for PN544 HCI based Driver
3 *
4 * Copyright (C) 2012 Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#include <linux/crc-ccitt.h>
22#include <linux/module.h>
23#include <linux/i2c.h>
24#include <linux/gpio.h>
25#include <linux/miscdevice.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28
29#include <linux/platform_data/pn544.h>
30
31#include <net/nfc/hci.h>
32#include <net/nfc/llc.h>
33
34#include "pn544.h"
35
36#define PN544_I2C_FRAME_HEADROOM 1
37#define PN544_I2C_FRAME_TAILROOM 2
38
39/* framing in HCI mode */
40#define PN544_HCI_I2C_LLC_LEN 1
41#define PN544_HCI_I2C_LLC_CRC 2
42#define PN544_HCI_I2C_LLC_LEN_CRC (PN544_HCI_I2C_LLC_LEN + \
43 PN544_HCI_I2C_LLC_CRC)
44#define PN544_HCI_I2C_LLC_MIN_SIZE (1 + PN544_HCI_I2C_LLC_LEN_CRC)
45#define PN544_HCI_I2C_LLC_MAX_PAYLOAD 29
46#define PN544_HCI_I2C_LLC_MAX_SIZE (PN544_HCI_I2C_LLC_LEN_CRC + 1 + \
47 PN544_HCI_I2C_LLC_MAX_PAYLOAD)
48
49static struct i2c_device_id pn544_hci_i2c_id_table[] = {
50 {"pn544", 0},
51 {}
52};
53
54MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
55
56#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
57
58struct pn544_i2c_phy {
59 struct i2c_client *i2c_dev;
60 struct nfc_hci_dev *hdev;
61
62 unsigned int gpio_en;
63 unsigned int gpio_irq;
64 unsigned int gpio_fw;
65 unsigned int en_polarity;
66
67 int powered;
68
69 int hard_fault; /*
70 * < 0 if hardware error occured (e.g. i2c err)
71 * and prevents normal operation.
72 */
73};
74
75#define I2C_DUMP_SKB(info, skb) \
76do { \
77 pr_debug("%s:\n", info); \
78 print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \
79 16, 1, (skb)->data, (skb)->len, 0); \
80} while (0)
81
82static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
83{
84 int polarity, retry, ret;
85 char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
86 int count = sizeof(rset_cmd);
87
88 pr_info(DRIVER_DESC ": %s\n", __func__);
89 dev_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
90
91 /* Disable fw download */
92 gpio_set_value(phy->gpio_fw, 0);
93
94 for (polarity = 0; polarity < 2; polarity++) {
95 phy->en_polarity = polarity;
96 retry = 3;
97 while (retry--) {
98 /* power off */
99 gpio_set_value(phy->gpio_en, !phy->en_polarity);
100 usleep_range(10000, 15000);
101
102 /* power on */
103 gpio_set_value(phy->gpio_en, phy->en_polarity);
104 usleep_range(10000, 15000);
105
106 /* send reset */
107 dev_dbg(&phy->i2c_dev->dev, "Sending reset cmd\n");
108 ret = i2c_master_send(phy->i2c_dev, rset_cmd, count);
109 if (ret == count) {
110 dev_info(&phy->i2c_dev->dev,
111 "nfc_en polarity : active %s\n",
112 (polarity == 0 ? "low" : "high"));
113 goto out;
114 }
115 }
116 }
117
118 dev_err(&phy->i2c_dev->dev,
119 "Could not detect nfc_en polarity, fallback to active high\n");
120
121out:
122 gpio_set_value(phy->gpio_en, !phy->en_polarity);
123}
124
125static int pn544_hci_i2c_enable(void *phy_id)
126{
127 struct pn544_i2c_phy *phy = phy_id;
128
129 pr_info(DRIVER_DESC ": %s\n", __func__);
130
131 gpio_set_value(phy->gpio_fw, 0);
132 gpio_set_value(phy->gpio_en, phy->en_polarity);
133 usleep_range(10000, 15000);
134
135 phy->powered = 1;
136
137 return 0;
138}
139
140static void pn544_hci_i2c_disable(void *phy_id)
141{
142 struct pn544_i2c_phy *phy = phy_id;
143
144 pr_info(DRIVER_DESC ": %s\n", __func__);
145
146 gpio_set_value(phy->gpio_fw, 0);
147 gpio_set_value(phy->gpio_en, !phy->en_polarity);
148 usleep_range(10000, 15000);
149
150 gpio_set_value(phy->gpio_en, phy->en_polarity);
151 usleep_range(10000, 15000);
152
153 gpio_set_value(phy->gpio_en, !phy->en_polarity);
154 usleep_range(10000, 15000);
155
156 phy->powered = 0;
157}
158
159static void pn544_hci_i2c_add_len_crc(struct sk_buff *skb)
160{
161 u16 crc;
162 int len;
163
164 len = skb->len + 2;
165 *skb_push(skb, 1) = len;
166
167 crc = crc_ccitt(0xffff, skb->data, skb->len);
168 crc = ~crc;
169 *skb_put(skb, 1) = crc & 0xff;
170 *skb_put(skb, 1) = crc >> 8;
171}
172
173static void pn544_hci_i2c_remove_len_crc(struct sk_buff *skb)
174{
175 skb_pull(skb, PN544_I2C_FRAME_HEADROOM);
176 skb_trim(skb, PN544_I2C_FRAME_TAILROOM);
177}
178
179/*
180 * Writing a frame must not return the number of written bytes.
181 * It must return either zero for success, or <0 for error.
182 * In addition, it must not alter the skb
183 */
184static int pn544_hci_i2c_write(void *phy_id, struct sk_buff *skb)
185{
186 int r;
187 struct pn544_i2c_phy *phy = phy_id;
188 struct i2c_client *client = phy->i2c_dev;
189
190 if (phy->hard_fault != 0)
191 return phy->hard_fault;
192
193 usleep_range(3000, 6000);
194
195 pn544_hci_i2c_add_len_crc(skb);
196
197 I2C_DUMP_SKB("i2c frame written", skb);
198
199 r = i2c_master_send(client, skb->data, skb->len);
200
201 if (r == -EREMOTEIO) { /* Retry, chip was in standby */
202 usleep_range(6000, 10000);
203 r = i2c_master_send(client, skb->data, skb->len);
204 }
205
206 if (r >= 0) {
207 if (r != skb->len)
208 r = -EREMOTEIO;
209 else
210 r = 0;
211 }
212
213 pn544_hci_i2c_remove_len_crc(skb);
214
215 return r;
216}
217
218static int check_crc(u8 *buf, int buflen)
219{
220 int len;
221 u16 crc;
222
223 len = buf[0] + 1;
224 crc = crc_ccitt(0xffff, buf, len - 2);
225 crc = ~crc;
226
227 if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
228 pr_err(PN544_HCI_I2C_DRIVER_NAME
229 ": CRC error 0x%x != 0x%x 0x%x\n",
230 crc, buf[len - 1], buf[len - 2]);
231
232 pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
233 print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
234 16, 2, buf, buflen, false);
235 return -EPERM;
236 }
237 return 0;
238}
239
240/*
241 * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
242 * that i2c bus will be flushed and that next read will start on a new frame.
243 * returned skb contains only LLC header and payload.
244 * returns:
245 * -EREMOTEIO : i2c read error (fatal)
246 * -EBADMSG : frame was incorrect and discarded
247 * -ENOMEM : cannot allocate skb, frame dropped
248 */
249static int pn544_hci_i2c_read(struct pn544_i2c_phy *phy, struct sk_buff **skb)
250{
251 int r;
252 u8 len;
253 u8 tmp[PN544_HCI_I2C_LLC_MAX_SIZE - 1];
254 struct i2c_client *client = phy->i2c_dev;
255
256 r = i2c_master_recv(client, &len, 1);
257 if (r != 1) {
258 dev_err(&client->dev, "cannot read len byte\n");
259 return -EREMOTEIO;
260 }
261
262 if ((len < (PN544_HCI_I2C_LLC_MIN_SIZE - 1)) ||
263 (len > (PN544_HCI_I2C_LLC_MAX_SIZE - 1))) {
264 dev_err(&client->dev, "invalid len byte\n");
265 r = -EBADMSG;
266 goto flush;
267 }
268
269 *skb = alloc_skb(1 + len, GFP_KERNEL);
270 if (*skb == NULL) {
271 r = -ENOMEM;
272 goto flush;
273 }
274
275 *skb_put(*skb, 1) = len;
276
277 r = i2c_master_recv(client, skb_put(*skb, len), len);
278 if (r != len) {
279 kfree_skb(*skb);
280 return -EREMOTEIO;
281 }
282
283 I2C_DUMP_SKB("i2c frame read", *skb);
284
285 r = check_crc((*skb)->data, (*skb)->len);
286 if (r != 0) {
287 kfree_skb(*skb);
288 r = -EBADMSG;
289 goto flush;
290 }
291
292 skb_pull(*skb, 1);
293 skb_trim(*skb, (*skb)->len - 2);
294
295 usleep_range(3000, 6000);
296
297 return 0;
298
299flush:
300 if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
301 r = -EREMOTEIO;
302
303 usleep_range(3000, 6000);
304
305 return r;
306}
307
308/*
309 * Reads an shdlc frame from the chip. This is not as straightforward as it
310 * seems. There are cases where we could loose the frame start synchronization.
311 * The frame format is len-data-crc, and corruption can occur anywhere while
312 * transiting on i2c bus, such that we could read an invalid len.
313 * In order to recover synchronization with the next frame, we must be sure
314 * to read the real amount of data without using the len byte. We do this by
315 * assuming the following:
316 * - the chip will always present only one single complete frame on the bus
317 * before triggering the interrupt
318 * - the chip will not present a new frame until we have completely read
319 * the previous one (or until we have handled the interrupt).
320 * The tricky case is when we read a corrupted len that is less than the real
321 * len. We must detect this here in order to determine that we need to flush
322 * the bus. This is the reason why we check the crc here.
323 */
324static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
325{
326 struct pn544_i2c_phy *phy = phy_id;
327 struct i2c_client *client;
328 struct sk_buff *skb = NULL;
329 int r;
330
331 if (!phy || irq != phy->i2c_dev->irq) {
332 WARN_ON_ONCE(1);
333 return IRQ_NONE;
334 }
335
336 client = phy->i2c_dev;
337 dev_dbg(&client->dev, "IRQ\n");
338
339 if (phy->hard_fault != 0)
340 return IRQ_HANDLED;
341
342 r = pn544_hci_i2c_read(phy, &skb);
343 if (r == -EREMOTEIO) {
344 phy->hard_fault = r;
345
346 nfc_hci_recv_frame(phy->hdev, NULL);
347
348 return IRQ_HANDLED;
349 } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
350 return IRQ_HANDLED;
351 }
352
353 nfc_hci_recv_frame(phy->hdev, skb);
354
355 return IRQ_HANDLED;
356}
357
358static struct nfc_phy_ops i2c_phy_ops = {
359 .write = pn544_hci_i2c_write,
360 .enable = pn544_hci_i2c_enable,
361 .disable = pn544_hci_i2c_disable,
362};
363
364static int __devinit pn544_hci_i2c_probe(struct i2c_client *client,
365 const struct i2c_device_id *id)
366{
367 struct pn544_i2c_phy *phy;
368 struct pn544_nfc_platform_data *pdata;
369 int r = 0;
370
371 dev_dbg(&client->dev, "%s\n", __func__);
372 dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
373
374 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
375 dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
376 return -ENODEV;
377 }
378
379 phy = kzalloc(sizeof(struct pn544_i2c_phy), GFP_KERNEL);
380 if (!phy) {
381 dev_err(&client->dev,
382 "Cannot allocate memory for pn544 i2c phy.\n");
383 r = -ENOMEM;
384 goto err_phy_alloc;
385 }
386
387 phy->i2c_dev = client;
388 i2c_set_clientdata(client, phy);
389
390 pdata = client->dev.platform_data;
391 if (pdata == NULL) {
392 dev_err(&client->dev, "No platform data\n");
393 r = -EINVAL;
394 goto err_pdata;
395 }
396
397 if (pdata->request_resources == NULL) {
398 dev_err(&client->dev, "request_resources() missing\n");
399 r = -EINVAL;
400 goto err_pdata;
401 }
402
403 r = pdata->request_resources(client);
404 if (r) {
405 dev_err(&client->dev, "Cannot get platform resources\n");
406 goto err_pdata;
407 }
408
409 phy->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
410 phy->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
411 phy->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
412
413 pn544_hci_i2c_platform_init(phy);
414
415 r = request_threaded_irq(client->irq, NULL, pn544_hci_i2c_irq_thread_fn,
416 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
417 PN544_HCI_I2C_DRIVER_NAME, phy);
418 if (r < 0) {
419 dev_err(&client->dev, "Unable to register IRQ handler\n");
420 goto err_rti;
421 }
422
423 r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
424 PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM,
425 PN544_HCI_I2C_LLC_MAX_PAYLOAD, &phy->hdev);
426 if (r < 0)
427 goto err_hci;
428
429 return 0;
430
431err_hci:
432 free_irq(client->irq, phy);
433
434err_rti:
435 if (pdata->free_resources != NULL)
436 pdata->free_resources();
437
438err_pdata:
439 kfree(phy);
440
441err_phy_alloc:
442 return r;
443}
444
445static __devexit int pn544_hci_i2c_remove(struct i2c_client *client)
446{
447 struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
448 struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
449
450 dev_dbg(&client->dev, "%s\n", __func__);
451
452 pn544_hci_remove(phy->hdev);
453
454 if (phy->powered)
455 pn544_hci_i2c_disable(phy);
456
457 free_irq(client->irq, phy);
458 if (pdata->free_resources)
459 pdata->free_resources();
460
461 kfree(phy);
462
463 return 0;
464}
465
466static struct i2c_driver pn544_hci_i2c_driver = {
467 .driver = {
468 .name = PN544_HCI_I2C_DRIVER_NAME,
469 },
470 .probe = pn544_hci_i2c_probe,
471 .id_table = pn544_hci_i2c_id_table,
472 .remove = __devexit_p(pn544_hci_i2c_remove),
473};
474
475static int __init pn544_hci_i2c_init(void)
476{
477 int r;
478
479 pr_debug(DRIVER_DESC ": %s\n", __func__);
480
481 r = i2c_add_driver(&pn544_hci_i2c_driver);
482 if (r) {
483 pr_err(PN544_HCI_I2C_DRIVER_NAME
484 ": driver registration failed\n");
485 return r;
486 }
487
488 return 0;
489}
490
491static void __exit pn544_hci_i2c_exit(void)
492{
493 i2c_del_driver(&pn544_hci_i2c_driver);
494}
495
496module_init(pn544_hci_i2c_init);
497module_exit(pn544_hci_i2c_exit);
498
499MODULE_LICENSE("GPL");
500MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544/pn544.c
index c9c8570273ab..cc666de3b8e5 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -18,47 +18,21 @@
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 19 */
20 20
21#include <linux/crc-ccitt.h>
22#include <linux/module.h>
23#include <linux/delay.h> 21#include <linux/delay.h>
24#include <linux/slab.h> 22#include <linux/slab.h>
25#include <linux/miscdevice.h>
26#include <linux/interrupt.h>
27#include <linux/gpio.h>
28#include <linux/i2c.h>
29 23
30#include <linux/nfc.h> 24#include <linux/nfc.h>
31#include <net/nfc/hci.h> 25#include <net/nfc/hci.h>
32#include <net/nfc/llc.h> 26#include <net/nfc/llc.h>
33 27
34#include <linux/nfc/pn544.h> 28#include "pn544.h"
35
36#define DRIVER_DESC "HCI NFC driver for PN544"
37
38#define PN544_HCI_DRIVER_NAME "pn544_hci"
39 29
40/* Timing restrictions (ms) */ 30/* Timing restrictions (ms) */
41#define PN544_HCI_RESETVEN_TIME 30 31#define PN544_HCI_RESETVEN_TIME 30
42 32
43static struct i2c_device_id pn544_hci_id_table[] = {
44 {"pn544", 0},
45 {}
46};
47
48MODULE_DEVICE_TABLE(i2c, pn544_hci_id_table);
49
50#define HCI_MODE 0 33#define HCI_MODE 0
51#define FW_MODE 1 34#define FW_MODE 1
52 35
53/* framing in HCI mode */
54#define PN544_HCI_LLC_LEN 1
55#define PN544_HCI_LLC_CRC 2
56#define PN544_HCI_LLC_LEN_CRC (PN544_HCI_LLC_LEN + PN544_HCI_LLC_CRC)
57#define PN544_HCI_LLC_MIN_SIZE (1 + PN544_HCI_LLC_LEN_CRC)
58#define PN544_HCI_LLC_MAX_PAYLOAD 29
59#define PN544_HCI_LLC_MAX_SIZE (PN544_HCI_LLC_LEN_CRC + 1 + \
60 PN544_HCI_LLC_MAX_PAYLOAD)
61
62enum pn544_state { 36enum pn544_state {
63 PN544_ST_COLD, 37 PN544_ST_COLD,
64 PN544_ST_FW_READY, 38 PN544_ST_FW_READY,
@@ -100,6 +74,10 @@ enum pn544_state {
100#define PN544_SYS_MGMT_INFO_NOTIFICATION 0x02 74#define PN544_SYS_MGMT_INFO_NOTIFICATION 0x02
101 75
102#define PN544_POLLING_LOOP_MGMT_GATE 0x94 76#define PN544_POLLING_LOOP_MGMT_GATE 0x94
77#define PN544_DEP_MODE 0x01
78#define PN544_DEP_ATR_REQ 0x02
79#define PN544_DEP_ATR_RES 0x03
80#define PN544_DEP_MERGE 0x0D
103#define PN544_PL_RDPHASES 0x06 81#define PN544_PL_RDPHASES 0x06
104#define PN544_PL_EMULATION 0x07 82#define PN544_PL_EMULATION 0x07
105#define PN544_PL_NFCT_DEACTIVATED 0x09 83#define PN544_PL_NFCT_DEACTIVATED 0x09
@@ -108,6 +86,15 @@ enum pn544_state {
108 86
109#define PN544_NFC_WI_MGMT_GATE 0xA1 87#define PN544_NFC_WI_MGMT_GATE 0xA1
110 88
89#define PN544_HCI_EVT_SND_DATA 0x01
90#define PN544_HCI_EVT_ACTIVATED 0x02
91#define PN544_HCI_EVT_DEACTIVATED 0x03
92#define PN544_HCI_EVT_RCV_DATA 0x04
93#define PN544_HCI_EVT_CONTINUE_MI 0x05
94
95#define PN544_HCI_CMD_ATTREQUEST 0x12
96#define PN544_HCI_CMD_CONTINUE_ACTIVATION 0x13
97
111static struct nfc_hci_gate pn544_gates[] = { 98static struct nfc_hci_gate pn544_gates[] = {
112 {NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE}, 99 {NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
113 {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE}, 100 {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
@@ -128,259 +115,22 @@ static struct nfc_hci_gate pn544_gates[] = {
128 115
129/* Largest headroom needed for outgoing custom commands */ 116/* Largest headroom needed for outgoing custom commands */
130#define PN544_CMDS_HEADROOM 2 117#define PN544_CMDS_HEADROOM 2
131#define PN544_FRAME_HEADROOM 1
132#define PN544_FRAME_TAILROOM 2
133 118
134struct pn544_hci_info { 119struct pn544_hci_info {
135 struct i2c_client *i2c_dev; 120 struct nfc_phy_ops *phy_ops;
121 void *phy_id;
122
136 struct nfc_hci_dev *hdev; 123 struct nfc_hci_dev *hdev;
137 124
138 enum pn544_state state; 125 enum pn544_state state;
139 126
140 struct mutex info_lock; 127 struct mutex info_lock;
141 128
142 unsigned int gpio_en;
143 unsigned int gpio_irq;
144 unsigned int gpio_fw;
145 unsigned int en_polarity;
146
147 int hard_fault; /*
148 * < 0 if hardware error occured (e.g. i2c err)
149 * and prevents normal operation.
150 */
151 int async_cb_type; 129 int async_cb_type;
152 data_exchange_cb_t async_cb; 130 data_exchange_cb_t async_cb;
153 void *async_cb_context; 131 void *async_cb_context;
154}; 132};
155 133
156static void pn544_hci_platform_init(struct pn544_hci_info *info)
157{
158 int polarity, retry, ret;
159 char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
160 int count = sizeof(rset_cmd);
161
162 pr_info(DRIVER_DESC ": %s\n", __func__);
163 dev_info(&info->i2c_dev->dev, "Detecting nfc_en polarity\n");
164
165 /* Disable fw download */
166 gpio_set_value(info->gpio_fw, 0);
167
168 for (polarity = 0; polarity < 2; polarity++) {
169 info->en_polarity = polarity;
170 retry = 3;
171 while (retry--) {
172 /* power off */
173 gpio_set_value(info->gpio_en, !info->en_polarity);
174 usleep_range(10000, 15000);
175
176 /* power on */
177 gpio_set_value(info->gpio_en, info->en_polarity);
178 usleep_range(10000, 15000);
179
180 /* send reset */
181 dev_dbg(&info->i2c_dev->dev, "Sending reset cmd\n");
182 ret = i2c_master_send(info->i2c_dev, rset_cmd, count);
183 if (ret == count) {
184 dev_info(&info->i2c_dev->dev,
185 "nfc_en polarity : active %s\n",
186 (polarity == 0 ? "low" : "high"));
187 goto out;
188 }
189 }
190 }
191
192 dev_err(&info->i2c_dev->dev,
193 "Could not detect nfc_en polarity, fallback to active high\n");
194
195out:
196 gpio_set_value(info->gpio_en, !info->en_polarity);
197}
198
199static int pn544_hci_enable(struct pn544_hci_info *info, int mode)
200{
201 pr_info(DRIVER_DESC ": %s\n", __func__);
202
203 gpio_set_value(info->gpio_fw, 0);
204 gpio_set_value(info->gpio_en, info->en_polarity);
205 usleep_range(10000, 15000);
206
207 return 0;
208}
209
210static void pn544_hci_disable(struct pn544_hci_info *info)
211{
212 pr_info(DRIVER_DESC ": %s\n", __func__);
213
214 gpio_set_value(info->gpio_fw, 0);
215 gpio_set_value(info->gpio_en, !info->en_polarity);
216 usleep_range(10000, 15000);
217
218 gpio_set_value(info->gpio_en, info->en_polarity);
219 usleep_range(10000, 15000);
220
221 gpio_set_value(info->gpio_en, !info->en_polarity);
222 usleep_range(10000, 15000);
223}
224
225static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
226{
227 int r;
228
229 usleep_range(3000, 6000);
230
231 r = i2c_master_send(client, buf, len);
232
233 if (r == -EREMOTEIO) { /* Retry, chip was in standby */
234 usleep_range(6000, 10000);
235 r = i2c_master_send(client, buf, len);
236 }
237
238 if (r >= 0) {
239 if (r != len)
240 return -EREMOTEIO;
241 else
242 return 0;
243 }
244
245 return r;
246}
247
248static int check_crc(u8 *buf, int buflen)
249{
250 int len;
251 u16 crc;
252
253 len = buf[0] + 1;
254 crc = crc_ccitt(0xffff, buf, len - 2);
255 crc = ~crc;
256
257 if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
258 pr_err(PN544_HCI_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
259 crc, buf[len - 1], buf[len - 2]);
260
261 pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
262 print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
263 16, 2, buf, buflen, false);
264 return -EPERM;
265 }
266 return 0;
267}
268
269/*
270 * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees
271 * that i2c bus will be flushed and that next read will start on a new frame.
272 * returned skb contains only LLC header and payload.
273 * returns:
274 * -EREMOTEIO : i2c read error (fatal)
275 * -EBADMSG : frame was incorrect and discarded
276 * -ENOMEM : cannot allocate skb, frame dropped
277 */
278static int pn544_hci_i2c_read(struct i2c_client *client, struct sk_buff **skb)
279{
280 int r;
281 u8 len;
282 u8 tmp[PN544_HCI_LLC_MAX_SIZE - 1];
283
284 r = i2c_master_recv(client, &len, 1);
285 if (r != 1) {
286 dev_err(&client->dev, "cannot read len byte\n");
287 return -EREMOTEIO;
288 }
289
290 if ((len < (PN544_HCI_LLC_MIN_SIZE - 1)) ||
291 (len > (PN544_HCI_LLC_MAX_SIZE - 1))) {
292 dev_err(&client->dev, "invalid len byte\n");
293 r = -EBADMSG;
294 goto flush;
295 }
296
297 *skb = alloc_skb(1 + len, GFP_KERNEL);
298 if (*skb == NULL) {
299 r = -ENOMEM;
300 goto flush;
301 }
302
303 *skb_put(*skb, 1) = len;
304
305 r = i2c_master_recv(client, skb_put(*skb, len), len);
306 if (r != len) {
307 kfree_skb(*skb);
308 return -EREMOTEIO;
309 }
310
311 r = check_crc((*skb)->data, (*skb)->len);
312 if (r != 0) {
313 kfree_skb(*skb);
314 r = -EBADMSG;
315 goto flush;
316 }
317
318 skb_pull(*skb, 1);
319 skb_trim(*skb, (*skb)->len - 2);
320
321 usleep_range(3000, 6000);
322
323 return 0;
324
325flush:
326 if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0)
327 r = -EREMOTEIO;
328
329 usleep_range(3000, 6000);
330
331 return r;
332}
333
334/*
335 * Reads an shdlc frame from the chip. This is not as straightforward as it
336 * seems. There are cases where we could loose the frame start synchronization.
337 * The frame format is len-data-crc, and corruption can occur anywhere while
338 * transiting on i2c bus, such that we could read an invalid len.
339 * In order to recover synchronization with the next frame, we must be sure
340 * to read the real amount of data without using the len byte. We do this by
341 * assuming the following:
342 * - the chip will always present only one single complete frame on the bus
343 * before triggering the interrupt
344 * - the chip will not present a new frame until we have completely read
345 * the previous one (or until we have handled the interrupt).
346 * The tricky case is when we read a corrupted len that is less than the real
347 * len. We must detect this here in order to determine that we need to flush
348 * the bus. This is the reason why we check the crc here.
349 */
350static irqreturn_t pn544_hci_irq_thread_fn(int irq, void *dev_id)
351{
352 struct pn544_hci_info *info = dev_id;
353 struct i2c_client *client;
354 struct sk_buff *skb = NULL;
355 int r;
356
357 if (!info || irq != info->i2c_dev->irq) {
358 WARN_ON_ONCE(1);
359 return IRQ_NONE;
360 }
361
362 client = info->i2c_dev;
363 dev_dbg(&client->dev, "IRQ\n");
364
365 if (info->hard_fault != 0)
366 return IRQ_HANDLED;
367
368 r = pn544_hci_i2c_read(client, &skb);
369 if (r == -EREMOTEIO) {
370 info->hard_fault = r;
371
372 nfc_hci_recv_frame(info->hdev, NULL);
373
374 return IRQ_HANDLED;
375 } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
376 return IRQ_HANDLED;
377 }
378
379 nfc_hci_recv_frame(info->hdev, skb);
380
381 return IRQ_HANDLED;
382}
383
384static int pn544_hci_open(struct nfc_hci_dev *hdev) 134static int pn544_hci_open(struct nfc_hci_dev *hdev)
385{ 135{
386 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); 136 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
@@ -393,7 +143,7 @@ static int pn544_hci_open(struct nfc_hci_dev *hdev)
393 goto out; 143 goto out;
394 } 144 }
395 145
396 r = pn544_hci_enable(info, HCI_MODE); 146 r = info->phy_ops->enable(info->phy_id);
397 147
398 if (r == 0) 148 if (r == 0)
399 info->state = PN544_ST_READY; 149 info->state = PN544_ST_READY;
@@ -412,7 +162,7 @@ static void pn544_hci_close(struct nfc_hci_dev *hdev)
412 if (info->state == PN544_ST_COLD) 162 if (info->state == PN544_ST_COLD)
413 goto out; 163 goto out;
414 164
415 pn544_hci_disable(info); 165 info->phy_ops->disable(info->phy_id);
416 166
417 info->state = PN544_ST_COLD; 167 info->state = PN544_ST_COLD;
418 168
@@ -587,40 +337,11 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
587 return 0; 337 return 0;
588} 338}
589 339
590static void pn544_hci_add_len_crc(struct sk_buff *skb)
591{
592 u16 crc;
593 int len;
594
595 len = skb->len + 2;
596 *skb_push(skb, 1) = len;
597
598 crc = crc_ccitt(0xffff, skb->data, skb->len);
599 crc = ~crc;
600 *skb_put(skb, 1) = crc & 0xff;
601 *skb_put(skb, 1) = crc >> 8;
602}
603
604static void pn544_hci_remove_len_crc(struct sk_buff *skb)
605{
606 skb_pull(skb, PN544_FRAME_HEADROOM);
607 skb_trim(skb, PN544_FRAME_TAILROOM);
608}
609
610static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) 340static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
611{ 341{
612 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); 342 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
613 struct i2c_client *client = info->i2c_dev;
614 int r;
615 343
616 if (info->hard_fault != 0) 344 return info->phy_ops->write(info->phy_id, skb);
617 return info->hard_fault;
618
619 pn544_hci_add_len_crc(skb);
620 r = pn544_hci_i2c_write(client, skb->data, skb->len);
621 pn544_hci_remove_len_crc(skb);
622
623 return r;
624} 345}
625 346
626static int pn544_hci_start_poll(struct nfc_hci_dev *hdev, 347static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
@@ -630,6 +351,9 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
630 int r; 351 int r;
631 u8 duration[2]; 352 u8 duration[2];
632 u8 activated; 353 u8 activated;
354 u8 i_mode = 0x3f; /* Enable all supported modes */
355 u8 t_mode = 0x0f;
356 u8 t_merge = 0x01; /* Enable merge by default */
633 357
634 pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n", 358 pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
635 __func__, im_protocols, tm_protocols); 359 __func__, im_protocols, tm_protocols);
@@ -667,6 +391,61 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
667 if (r < 0) 391 if (r < 0)
668 return r; 392 return r;
669 393
394 if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
395 hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
396 &hdev->gb_len);
397 pr_debug("generate local bytes %p", hdev->gb);
398 if (hdev->gb == NULL || hdev->gb_len == 0) {
399 im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
400 tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
401 }
402 }
403
404 if (im_protocols & NFC_PROTO_NFC_DEP_MASK) {
405 r = nfc_hci_send_event(hdev,
406 PN544_RF_READER_NFCIP1_INITIATOR_GATE,
407 NFC_HCI_EVT_END_OPERATION, NULL, 0);
408 if (r < 0)
409 return r;
410
411 r = nfc_hci_set_param(hdev,
412 PN544_RF_READER_NFCIP1_INITIATOR_GATE,
413 PN544_DEP_MODE, &i_mode, 1);
414 if (r < 0)
415 return r;
416
417 r = nfc_hci_set_param(hdev,
418 PN544_RF_READER_NFCIP1_INITIATOR_GATE,
419 PN544_DEP_ATR_REQ, hdev->gb, hdev->gb_len);
420 if (r < 0)
421 return r;
422
423 r = nfc_hci_send_event(hdev,
424 PN544_RF_READER_NFCIP1_INITIATOR_GATE,
425 NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
426 if (r < 0)
427 nfc_hci_send_event(hdev,
428 PN544_RF_READER_NFCIP1_INITIATOR_GATE,
429 NFC_HCI_EVT_END_OPERATION, NULL, 0);
430 }
431
432 if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
433 r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
434 PN544_DEP_MODE, &t_mode, 1);
435 if (r < 0)
436 return r;
437
438 r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
439 PN544_DEP_ATR_RES, hdev->gb, hdev->gb_len);
440 if (r < 0)
441 return r;
442
443 r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
444 PN544_DEP_MERGE, &t_merge, 1);
445 if (r < 0)
446 return r;
447 }
448
670 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, 449 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
671 NFC_HCI_EVT_READER_REQUESTED, NULL, 0); 450 NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
672 if (r < 0) 451 if (r < 0)
@@ -676,6 +455,43 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
676 return r; 455 return r;
677} 456}
678 457
458static int pn544_hci_dep_link_up(struct nfc_hci_dev *hdev,
459 struct nfc_target *target, u8 comm_mode,
460 u8 *gb, size_t gb_len)
461{
462 struct sk_buff *rgb_skb = NULL;
463 int r;
464
465 r = nfc_hci_get_param(hdev, target->hci_reader_gate,
466 PN544_DEP_ATR_RES, &rgb_skb);
467 if (r < 0)
468 return r;
469
470 if (rgb_skb->len == 0 || rgb_skb->len > NFC_GB_MAXSIZE) {
471 r = -EPROTO;
472 goto exit;
473 }
474 print_hex_dump(KERN_DEBUG, "remote gb: ", DUMP_PREFIX_OFFSET,
475 16, 1, rgb_skb->data, rgb_skb->len, true);
476
477 r = nfc_set_remote_general_bytes(hdev->ndev, rgb_skb->data,
478 rgb_skb->len);
479
480 if (r == 0)
481 r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode,
482 NFC_RF_INITIATOR);
483exit:
484 kfree_skb(rgb_skb);
485 return r;
486}
487
488static int pn544_hci_dep_link_down(struct nfc_hci_dev *hdev)
489{
490
491 return nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE,
492 NFC_HCI_EVT_END_OPERATION, NULL, 0);
493}
494
679static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, 495static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
680 struct nfc_target *target) 496 struct nfc_target *target)
681{ 497{
@@ -687,6 +503,9 @@ static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
687 target->supported_protocols = NFC_PROTO_JEWEL_MASK; 503 target->supported_protocols = NFC_PROTO_JEWEL_MASK;
688 target->sens_res = 0x0c00; 504 target->sens_res = 0x0c00;
689 break; 505 break;
506 case PN544_RF_READER_NFCIP1_INITIATOR_GATE:
507 target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
508 break;
690 default: 509 default:
691 return -EPROTO; 510 return -EPROTO;
692 } 511 }
@@ -701,7 +520,18 @@ static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
701 struct sk_buff *uid_skb; 520 struct sk_buff *uid_skb;
702 int r = 0; 521 int r = 0;
703 522
704 if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) { 523 if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE)
524 return r;
525
526 if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) {
527 r = nfc_hci_send_cmd(hdev,
528 PN544_RF_READER_NFCIP1_INITIATOR_GATE,
529 PN544_HCI_CMD_CONTINUE_ACTIVATION, NULL, 0, NULL);
530 if (r < 0)
531 return r;
532
533 target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE;
534 } else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
705 if (target->nfcid1_len != 4 && target->nfcid1_len != 7 && 535 if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
706 target->nfcid1_len != 10) 536 target->nfcid1_len != 10)
707 return -EPROTO; 537 return -EPROTO;
@@ -724,6 +554,16 @@ static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
724 PN544_RF_READER_CMD_ACTIVATE_NEXT, 554 PN544_RF_READER_CMD_ACTIVATE_NEXT,
725 uid_skb->data, uid_skb->len, NULL); 555 uid_skb->data, uid_skb->len, NULL);
726 kfree_skb(uid_skb); 556 kfree_skb(uid_skb);
557
558 r = nfc_hci_send_cmd(hdev,
559 PN544_RF_READER_NFCIP1_INITIATOR_GATE,
560 PN544_HCI_CMD_CONTINUE_ACTIVATION,
561 NULL, 0, NULL);
562 if (r < 0)
563 return r;
564
565 target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE;
566 target->supported_protocols = NFC_PROTO_NFC_DEP_MASK;
727 } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) { 567 } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) {
728 /* 568 /*
729 * TODO: maybe other ISO 14443 require some kind of continue 569 * TODO: maybe other ISO 14443 require some kind of continue
@@ -769,7 +609,7 @@ static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb,
769 * <= 0: driver handled the data exchange 609 * <= 0: driver handled the data exchange
770 * 1: driver doesn't especially handle, please do standard processing 610 * 1: driver doesn't especially handle, please do standard processing
771 */ 611 */
772static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev, 612static int pn544_hci_im_transceive(struct nfc_hci_dev *hdev,
773 struct nfc_target *target, 613 struct nfc_target *target,
774 struct sk_buff *skb, data_exchange_cb_t cb, 614 struct sk_buff *skb, data_exchange_cb_t cb,
775 void *cb_context) 615 void *cb_context)
@@ -822,17 +662,110 @@ static int pn544_hci_data_exchange(struct nfc_hci_dev *hdev,
822 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, 662 return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
823 PN544_JEWEL_RAW_CMD, skb->data, 663 PN544_JEWEL_RAW_CMD, skb->data,
824 skb->len, cb, cb_context); 664 skb->len, cb, cb_context);
665 case PN544_RF_READER_NFCIP1_INITIATOR_GATE:
666 *skb_push(skb, 1) = 0;
667
668 return nfc_hci_send_event(hdev, target->hci_reader_gate,
669 PN544_HCI_EVT_SND_DATA, skb->data,
670 skb->len);
825 default: 671 default:
826 return 1; 672 return 1;
827 } 673 }
828} 674}
829 675
676static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
677{
678 /* Set default false for multiple information chaining */
679 *skb_push(skb, 1) = 0;
680
681 return nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE,
682 PN544_HCI_EVT_SND_DATA, skb->data, skb->len);
683}
684
830static int pn544_hci_check_presence(struct nfc_hci_dev *hdev, 685static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
831 struct nfc_target *target) 686 struct nfc_target *target)
832{ 687{
833 return nfc_hci_send_cmd(hdev, target->hci_reader_gate, 688 pr_debug("supported protocol %d", target->supported_protocols);
834 PN544_RF_READER_CMD_PRESENCE_CHECK, 689 if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK |
835 NULL, 0, NULL); 690 NFC_PROTO_ISO14443_B_MASK)) {
691 return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
692 PN544_RF_READER_CMD_PRESENCE_CHECK,
693 NULL, 0, NULL);
694 } else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) {
695 if (target->nfcid1_len != 4 && target->nfcid1_len != 7 &&
696 target->nfcid1_len != 10)
697 return -EOPNOTSUPP;
698
699 return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE,
700 PN544_RF_READER_CMD_ACTIVATE_NEXT,
701 target->nfcid1, target->nfcid1_len, NULL);
702 } else if (target->supported_protocols & NFC_PROTO_JEWEL_MASK) {
703 return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
704 PN544_JEWEL_RAW_CMD, NULL, 0, NULL);
705 } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) {
706 return nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE,
707 PN544_FELICA_RAW, NULL, 0, NULL);
708 } else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) {
709 return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
710 PN544_HCI_CMD_ATTREQUEST,
711 NULL, 0, NULL);
712 }
713
714 return 0;
715}
716
717static void pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate,
718 u8 event, struct sk_buff *skb)
719{
720 struct sk_buff *rgb_skb = NULL;
721 int r = 0;
722
723 pr_debug("hci event %d", event);
724 switch (event) {
725 case PN544_HCI_EVT_ACTIVATED:
726 if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE)
727 nfc_hci_target_discovered(hdev, gate);
728 else if (gate == PN544_RF_READER_NFCIP1_TARGET_GATE) {
729 r = nfc_hci_get_param(hdev, gate, PN544_DEP_ATR_REQ,
730 &rgb_skb);
731
732 if (r < 0)
733 goto exit;
734
735 nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK,
736 NFC_COMM_PASSIVE, rgb_skb->data,
737 rgb_skb->len);
738
739 kfree_skb(rgb_skb);
740 }
741
742 break;
743 case PN544_HCI_EVT_DEACTIVATED:
744 nfc_hci_send_event(hdev, gate,
745 NFC_HCI_EVT_END_OPERATION, NULL, 0);
746 break;
747 case PN544_HCI_EVT_RCV_DATA:
748 if (skb->len < 2) {
749 r = -EPROTO;
750 goto exit;
751 }
752
753 if (skb->data[0] != 0) {
754 pr_debug("data0 %d", skb->data[0]);
755 r = -EPROTO;
756 goto exit;
757 }
758
759 skb_pull(skb, 2);
760 nfc_tm_data_received(hdev->ndev, skb);
761
762 return;
763 default:
764 break;
765 }
766
767exit:
768 kfree_skb(skb);
836} 769}
837 770
838static struct nfc_hci_ops pn544_hci_ops = { 771static struct nfc_hci_ops pn544_hci_ops = {
@@ -841,74 +774,36 @@ static struct nfc_hci_ops pn544_hci_ops = {
841 .hci_ready = pn544_hci_ready, 774 .hci_ready = pn544_hci_ready,
842 .xmit = pn544_hci_xmit, 775 .xmit = pn544_hci_xmit,
843 .start_poll = pn544_hci_start_poll, 776 .start_poll = pn544_hci_start_poll,
777 .dep_link_up = pn544_hci_dep_link_up,
778 .dep_link_down = pn544_hci_dep_link_down,
844 .target_from_gate = pn544_hci_target_from_gate, 779 .target_from_gate = pn544_hci_target_from_gate,
845 .complete_target_discovered = pn544_hci_complete_target_discovered, 780 .complete_target_discovered = pn544_hci_complete_target_discovered,
846 .data_exchange = pn544_hci_data_exchange, 781 .im_transceive = pn544_hci_im_transceive,
782 .tm_send = pn544_hci_tm_send,
847 .check_presence = pn544_hci_check_presence, 783 .check_presence = pn544_hci_check_presence,
784 .event_received = pn544_hci_event_received,
848}; 785};
849 786
850static int __devinit pn544_hci_probe(struct i2c_client *client, 787int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
851 const struct i2c_device_id *id) 788 int phy_headroom, int phy_tailroom, int phy_payload,
789 struct nfc_hci_dev **hdev)
852{ 790{
853 struct pn544_hci_info *info; 791 struct pn544_hci_info *info;
854 struct pn544_nfc_platform_data *pdata;
855 int r = 0;
856 u32 protocols; 792 u32 protocols;
857 struct nfc_hci_init_data init_data; 793 struct nfc_hci_init_data init_data;
858 794 int r;
859 dev_dbg(&client->dev, "%s\n", __func__);
860 dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
861
862 if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
863 dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
864 return -ENODEV;
865 }
866 795
867 info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL); 796 info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
868 if (!info) { 797 if (!info) {
869 dev_err(&client->dev, 798 pr_err("Cannot allocate memory for pn544_hci_info.\n");
870 "Cannot allocate memory for pn544_hci_info.\n");
871 r = -ENOMEM; 799 r = -ENOMEM;
872 goto err_info_alloc; 800 goto err_info_alloc;
873 } 801 }
874 802
875 info->i2c_dev = client; 803 info->phy_ops = phy_ops;
804 info->phy_id = phy_id;
876 info->state = PN544_ST_COLD; 805 info->state = PN544_ST_COLD;
877 mutex_init(&info->info_lock); 806 mutex_init(&info->info_lock);
878 i2c_set_clientdata(client, info);
879
880 pdata = client->dev.platform_data;
881 if (pdata == NULL) {
882 dev_err(&client->dev, "No platform data\n");
883 r = -EINVAL;
884 goto err_pdata;
885 }
886
887 if (pdata->request_resources == NULL) {
888 dev_err(&client->dev, "request_resources() missing\n");
889 r = -EINVAL;
890 goto err_pdata;
891 }
892
893 r = pdata->request_resources(client);
894 if (r) {
895 dev_err(&client->dev, "Cannot get platform resources\n");
896 goto err_pdata;
897 }
898
899 info->gpio_en = pdata->get_gpio(NFC_GPIO_ENABLE);
900 info->gpio_fw = pdata->get_gpio(NFC_GPIO_FW_RESET);
901 info->gpio_irq = pdata->get_gpio(NFC_GPIO_IRQ);
902
903 pn544_hci_platform_init(info);
904
905 r = request_threaded_irq(client->irq, NULL, pn544_hci_irq_thread_fn,
906 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
907 PN544_HCI_DRIVER_NAME, info);
908 if (r < 0) {
909 dev_err(&client->dev, "Unable to register IRQ handler\n");
910 goto err_rti;
911 }
912 807
913 init_data.gate_count = ARRAY_SIZE(pn544_gates); 808 init_data.gate_count = ARRAY_SIZE(pn544_gates);
914 809
@@ -928,13 +823,11 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
928 NFC_PROTO_NFC_DEP_MASK; 823 NFC_PROTO_NFC_DEP_MASK;
929 824
930 info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data, 825 info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data,
931 protocols, LLC_SHDLC_NAME, 826 protocols, llc_name,
932 PN544_FRAME_HEADROOM + 827 phy_headroom + PN544_CMDS_HEADROOM,
933 PN544_CMDS_HEADROOM, 828 phy_tailroom, phy_payload);
934 PN544_FRAME_TAILROOM,
935 PN544_HCI_LLC_MAX_PAYLOAD);
936 if (!info->hdev) { 829 if (!info->hdev) {
937 dev_err(&client->dev, "Cannot allocate nfc hdev.\n"); 830 pr_err("Cannot allocate nfc hdev.\n");
938 r = -ENOMEM; 831 r = -ENOMEM;
939 goto err_alloc_hdev; 832 goto err_alloc_hdev;
940 } 833 }
@@ -945,79 +838,25 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
945 if (r) 838 if (r)
946 goto err_regdev; 839 goto err_regdev;
947 840
841 *hdev = info->hdev;
842
948 return 0; 843 return 0;
949 844
950err_regdev: 845err_regdev:
951 nfc_hci_free_device(info->hdev); 846 nfc_hci_free_device(info->hdev);
952 847
953err_alloc_hdev: 848err_alloc_hdev:
954 free_irq(client->irq, info);
955
956err_rti:
957 if (pdata->free_resources != NULL)
958 pdata->free_resources();
959
960err_pdata:
961 kfree(info); 849 kfree(info);
962 850
963err_info_alloc: 851err_info_alloc:
964 return r; 852 return r;
965} 853}
966 854
967static __devexit int pn544_hci_remove(struct i2c_client *client) 855void pn544_hci_remove(struct nfc_hci_dev *hdev)
968{ 856{
969 struct pn544_hci_info *info = i2c_get_clientdata(client); 857 struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
970 struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
971
972 dev_dbg(&client->dev, "%s\n", __func__);
973
974 nfc_hci_free_device(info->hdev);
975
976 if (info->state != PN544_ST_COLD) {
977 if (pdata->disable)
978 pdata->disable();
979 }
980
981 free_irq(client->irq, info);
982 if (pdata->free_resources)
983 pdata->free_resources();
984 858
859 nfc_hci_unregister_device(hdev);
860 nfc_hci_free_device(hdev);
985 kfree(info); 861 kfree(info);
986
987 return 0;
988} 862}
989
990static struct i2c_driver pn544_hci_driver = {
991 .driver = {
992 .name = PN544_HCI_DRIVER_NAME,
993 },
994 .probe = pn544_hci_probe,
995 .id_table = pn544_hci_id_table,
996 .remove = __devexit_p(pn544_hci_remove),
997};
998
999static int __init pn544_hci_init(void)
1000{
1001 int r;
1002
1003 pr_debug(DRIVER_DESC ": %s\n", __func__);
1004
1005 r = i2c_add_driver(&pn544_hci_driver);
1006 if (r) {
1007 pr_err(PN544_HCI_DRIVER_NAME ": driver registration failed\n");
1008 return r;
1009 }
1010
1011 return 0;
1012}
1013
1014static void __exit pn544_hci_exit(void)
1015{
1016 i2c_del_driver(&pn544_hci_driver);
1017}
1018
1019module_init(pn544_hci_init);
1020module_exit(pn544_hci_exit);
1021
1022MODULE_LICENSE("GPL");
1023MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
new file mode 100644
index 000000000000..f47c6454914b
--- /dev/null
+++ b/drivers/nfc/pn544/pn544.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright (C) 2011 - 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef __LOCAL_PN544_H_
21#define __LOCAL_PN544_H_
22
23#include <net/nfc/hci.h>
24
25#define DRIVER_DESC "HCI NFC driver for PN544"
26
27int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
28 int phy_headroom, int phy_tailroom, int phy_payload,
29 struct nfc_hci_dev **hdev);
30void pn544_hci_remove(struct nfc_hci_dev *hdev);
31
32#endif /* __LOCAL_PN544_H_ */
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 258ca596e1bc..982d16b5a846 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -6,7 +6,6 @@ menu "PPS support"
6 6
7config PPS 7config PPS
8 tristate "PPS support" 8 tristate "PPS support"
9 depends on EXPERIMENTAL
10 ---help--- 9 ---help---
11 PPS (Pulse Per Second) is a special pulse provided by some GPS 10 PPS (Pulse Per Second) is a special pulse provided by some GPS
12 antennae. Userland can use it to get a high-precision time 11 antennae. Userland can use it to get a high-precision time
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index ffdf712f9a67..1ea6f1dbbedd 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -4,13 +4,9 @@
4 4
5menu "PTP clock support" 5menu "PTP clock support"
6 6
7comment "Enable Device Drivers -> PPS to see the PTP clock options."
8 depends on PPS=n
9
10config PTP_1588_CLOCK 7config PTP_1588_CLOCK
11 tristate "PTP clock support" 8 tristate "PTP clock support"
12 depends on EXPERIMENTAL 9 select PPS
13 depends on PPS
14 help 10 help
15 The IEEE 1588 standard defines a method to precisely 11 The IEEE 1588 standard defines a method to precisely
16 synchronize distributed clocks over Ethernet networks. The 12 synchronize distributed clocks over Ethernet networks. The
@@ -29,8 +25,9 @@ config PTP_1588_CLOCK
29 25
30config PTP_1588_CLOCK_GIANFAR 26config PTP_1588_CLOCK_GIANFAR
31 tristate "Freescale eTSEC as PTP clock" 27 tristate "Freescale eTSEC as PTP clock"
32 depends on PTP_1588_CLOCK
33 depends on GIANFAR 28 depends on GIANFAR
29 select PTP_1588_CLOCK
30 default y
34 help 31 help
35 This driver adds support for using the eTSEC as a PTP 32 This driver adds support for using the eTSEC as a PTP
36 clock. This clock is only useful if your PTP programs are 33 clock. This clock is only useful if your PTP programs are
@@ -42,8 +39,9 @@ config PTP_1588_CLOCK_GIANFAR
42 39
43config PTP_1588_CLOCK_IXP46X 40config PTP_1588_CLOCK_IXP46X
44 tristate "Intel IXP46x as PTP clock" 41 tristate "Intel IXP46x as PTP clock"
45 depends on PTP_1588_CLOCK
46 depends on IXP4XX_ETH 42 depends on IXP4XX_ETH
43 select PTP_1588_CLOCK
44 default y
47 help 45 help
48 This driver adds support for using the IXP46X as a PTP 46 This driver adds support for using the IXP46X as a PTP
49 clock. This clock is only useful if your PTP programs are 47 clock. This clock is only useful if your PTP programs are
@@ -54,13 +52,13 @@ config PTP_1588_CLOCK_IXP46X
54 will be called ptp_ixp46x. 52 will be called ptp_ixp46x.
55 53
56comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks." 54comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
57 depends on PTP_1588_CLOCK && (PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n) 55 depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
58 56
59config DP83640_PHY 57config DP83640_PHY
60 tristate "Driver for the National Semiconductor DP83640 PHYTER" 58 tristate "Driver for the National Semiconductor DP83640 PHYTER"
61 depends on PTP_1588_CLOCK
62 depends on NETWORK_PHY_TIMESTAMPING 59 depends on NETWORK_PHY_TIMESTAMPING
63 depends on PHYLIB 60 depends on PHYLIB
61 select PTP_1588_CLOCK
64 ---help--- 62 ---help---
65 Supports the DP83640 PHYTER with IEEE 1588 features. 63 Supports the DP83640 PHYTER with IEEE 1588 features.
66 64
@@ -74,8 +72,7 @@ config DP83640_PHY
74 72
75config PTP_1588_CLOCK_PCH 73config PTP_1588_CLOCK_PCH
76 tristate "Intel PCH EG20T as PTP clock" 74 tristate "Intel PCH EG20T as PTP clock"
77 depends on PTP_1588_CLOCK 75 select PTP_1588_CLOCK
78 depends on PCH_GBE
79 help 76 help
80 This driver adds support for using the PCH EG20T as a PTP 77 This driver adds support for using the PCH EG20T as a PTP
81 clock. The hardware supports time stamping of PTP packets 78 clock. The hardware supports time stamping of PTP packets
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index e7f301da2902..34a0c607318e 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -21,6 +21,7 @@
21#include <linux/posix-clock.h> 21#include <linux/posix-clock.h>
22#include <linux/poll.h> 22#include <linux/poll.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/slab.h>
24 25
25#include "ptp_private.h" 26#include "ptp_private.h"
26 27
@@ -33,9 +34,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
33{ 34{
34 struct ptp_clock_caps caps; 35 struct ptp_clock_caps caps;
35 struct ptp_clock_request req; 36 struct ptp_clock_request req;
37 struct ptp_sys_offset *sysoff = NULL;
36 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 38 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
37 struct ptp_clock_info *ops = ptp->info; 39 struct ptp_clock_info *ops = ptp->info;
40 struct ptp_clock_time *pct;
41 struct timespec ts;
38 int enable, err = 0; 42 int enable, err = 0;
43 unsigned int i;
39 44
40 switch (cmd) { 45 switch (cmd) {
41 46
@@ -88,10 +93,45 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
88 err = ops->enable(ops, &req, enable); 93 err = ops->enable(ops, &req, enable);
89 break; 94 break;
90 95
96 case PTP_SYS_OFFSET:
97 sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL);
98 if (!sysoff) {
99 err = -ENOMEM;
100 break;
101 }
102 if (copy_from_user(sysoff, (void __user *)arg,
103 sizeof(*sysoff))) {
104 err = -EFAULT;
105 break;
106 }
107 if (sysoff->n_samples > PTP_MAX_SAMPLES) {
108 err = -EINVAL;
109 break;
110 }
111 pct = &sysoff->ts[0];
112 for (i = 0; i < sysoff->n_samples; i++) {
113 getnstimeofday(&ts);
114 pct->sec = ts.tv_sec;
115 pct->nsec = ts.tv_nsec;
116 pct++;
117 ptp->info->gettime(ptp->info, &ts);
118 pct->sec = ts.tv_sec;
119 pct->nsec = ts.tv_nsec;
120 pct++;
121 }
122 getnstimeofday(&ts);
123 pct->sec = ts.tv_sec;
124 pct->nsec = ts.tv_nsec;
125 if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
126 err = -EFAULT;
127 break;
128
91 default: 129 default:
92 err = -ENOTTY; 130 err = -ENOTTY;
93 break; 131 break;
94 } 132 }
133
134 kfree(sysoff);
95 return err; 135 return err;
96} 136}
97 137
@@ -104,20 +144,23 @@ unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
104 return queue_cnt(&ptp->tsevq) ? POLLIN : 0; 144 return queue_cnt(&ptp->tsevq) ? POLLIN : 0;
105} 145}
106 146
147#define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
148
107ssize_t ptp_read(struct posix_clock *pc, 149ssize_t ptp_read(struct posix_clock *pc,
108 uint rdflags, char __user *buf, size_t cnt) 150 uint rdflags, char __user *buf, size_t cnt)
109{ 151{
110 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 152 struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
111 struct timestamp_event_queue *queue = &ptp->tsevq; 153 struct timestamp_event_queue *queue = &ptp->tsevq;
112 struct ptp_extts_event event[PTP_BUF_TIMESTAMPS]; 154 struct ptp_extts_event *event;
113 unsigned long flags; 155 unsigned long flags;
114 size_t qcnt, i; 156 size_t qcnt, i;
157 int result;
115 158
116 if (cnt % sizeof(struct ptp_extts_event) != 0) 159 if (cnt % sizeof(struct ptp_extts_event) != 0)
117 return -EINVAL; 160 return -EINVAL;
118 161
119 if (cnt > sizeof(event)) 162 if (cnt > EXTTS_BUFSIZE)
120 cnt = sizeof(event); 163 cnt = EXTTS_BUFSIZE;
121 164
122 cnt = cnt / sizeof(struct ptp_extts_event); 165 cnt = cnt / sizeof(struct ptp_extts_event);
123 166
@@ -135,6 +178,12 @@ ssize_t ptp_read(struct posix_clock *pc,
135 return -ENODEV; 178 return -ENODEV;
136 } 179 }
137 180
181 event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
182 if (!event) {
183 mutex_unlock(&ptp->tsevq_mux);
184 return -ENOMEM;
185 }
186
138 spin_lock_irqsave(&queue->lock, flags); 187 spin_lock_irqsave(&queue->lock, flags);
139 188
140 qcnt = queue_cnt(queue); 189 qcnt = queue_cnt(queue);
@@ -153,8 +202,10 @@ ssize_t ptp_read(struct posix_clock *pc,
153 202
154 mutex_unlock(&ptp->tsevq_mux); 203 mutex_unlock(&ptp->tsevq_mux);
155 204
205 result = cnt;
156 if (copy_to_user(buf, event, cnt)) 206 if (copy_to_user(buf, event, cnt))
157 return -EFAULT; 207 result = -EFAULT;
158 208
159 return cnt; 209 kfree(event);
210 return result;
160} 211}
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index a0a4afe537d0..5c70a6599578 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3018,10 +3018,8 @@ claw_remove_device(struct ccwgroup_device *cgdev)
3018{ 3018{
3019 struct claw_privbk *priv; 3019 struct claw_privbk *priv;
3020 3020
3021 BUG_ON(!cgdev);
3022 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev)); 3021 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3023 priv = dev_get_drvdata(&cgdev->dev); 3022 priv = dev_get_drvdata(&cgdev->dev);
3024 BUG_ON(!priv);
3025 dev_info(&cgdev->dev, " will be removed.\n"); 3023 dev_info(&cgdev->dev, " will be removed.\n");
3026 if (cgdev->state == CCWGROUP_ONLINE) 3024 if (cgdev->state == CCWGROUP_ONLINE)
3027 claw_shutdown_device(cgdev); 3025 claw_shutdown_device(cgdev);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 98ea9cc6f1aa..817b68925ddd 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1691,8 +1691,6 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev)
1691{ 1691{
1692 struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev); 1692 struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
1693 1693
1694 BUG_ON(priv == NULL);
1695
1696 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, 1694 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
1697 "removing device %p, proto : %d", 1695 "removing device %p, proto : %d",
1698 cgdev, priv->protocol); 1696 cgdev, priv->protocol);
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 05b734a2b5b7..2dbc77b5137b 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1367,7 +1367,6 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
1367 struct mpc_group *grp; 1367 struct mpc_group *grp;
1368 struct channel *wch; 1368 struct channel *wch;
1369 1369
1370 BUG_ON(dev == NULL);
1371 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); 1370 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
1372 1371
1373 priv = dev->ml_priv; 1372 priv = dev->ml_priv;
@@ -1472,8 +1471,6 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
1472 struct channel *wch; 1471 struct channel *wch;
1473 struct channel *rch; 1472 struct channel *rch;
1474 1473
1475 BUG_ON(dev == NULL);
1476
1477 priv = dev->ml_priv; 1474 priv = dev->ml_priv;
1478 grp = priv->mpcg; 1475 grp = priv->mpcg;
1479 wch = priv->channel[CTCM_WRITE]; 1476 wch = priv->channel[CTCM_WRITE];
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index fa7adad6f9ba..480fbeab0256 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -933,6 +933,7 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
933int qeth_configure_cq(struct qeth_card *, enum qeth_cq); 933int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
934int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); 934int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
935int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot); 935int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
936void qeth_trace_features(struct qeth_card *);
936 937
937/* exports for OSN */ 938/* exports for OSN */
938int qeth_osn_assist(struct net_device *, void *, int); 939int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4d6ba00d0047..638a57f4d8a1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,13 +73,13 @@ static inline const char *qeth_get_cardname(struct qeth_card *card)
73 if (card->info.guestlan) { 73 if (card->info.guestlan) {
74 switch (card->info.type) { 74 switch (card->info.type) {
75 case QETH_CARD_TYPE_OSD: 75 case QETH_CARD_TYPE_OSD:
76 return " Guest LAN QDIO"; 76 return " Virtual NIC QDIO";
77 case QETH_CARD_TYPE_IQD: 77 case QETH_CARD_TYPE_IQD:
78 return " Guest LAN Hiper"; 78 return " Virtual NIC Hiper";
79 case QETH_CARD_TYPE_OSM: 79 case QETH_CARD_TYPE_OSM:
80 return " Guest LAN QDIO - OSM"; 80 return " Virtual NIC QDIO - OSM";
81 case QETH_CARD_TYPE_OSX: 81 case QETH_CARD_TYPE_OSX:
82 return " Guest LAN QDIO - OSX"; 82 return " Virtual NIC QDIO - OSX";
83 default: 83 default:
84 return " unknown"; 84 return " unknown";
85 } 85 }
@@ -108,13 +108,13 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
108 if (card->info.guestlan) { 108 if (card->info.guestlan) {
109 switch (card->info.type) { 109 switch (card->info.type) {
110 case QETH_CARD_TYPE_OSD: 110 case QETH_CARD_TYPE_OSD:
111 return "GuestLAN QDIO"; 111 return "Virt.NIC QDIO";
112 case QETH_CARD_TYPE_IQD: 112 case QETH_CARD_TYPE_IQD:
113 return "GuestLAN Hiper"; 113 return "Virt.NIC Hiper";
114 case QETH_CARD_TYPE_OSM: 114 case QETH_CARD_TYPE_OSM:
115 return "GuestLAN OSM"; 115 return "Virt.NIC OSM";
116 case QETH_CARD_TYPE_OSX: 116 case QETH_CARD_TYPE_OSX:
117 return "GuestLAN OSX"; 117 return "Virt.NIC OSX";
118 default: 118 default:
119 return "unknown"; 119 return "unknown";
120 } 120 }
@@ -383,7 +383,7 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
383 qeth_release_skbs(c); 383 qeth_release_skbs(c);
384 384
385 c = f->next_pending; 385 c = f->next_pending;
386 BUG_ON(head->next_pending != f); 386 WARN_ON_ONCE(head->next_pending != f);
387 head->next_pending = c; 387 head->next_pending = c;
388 kmem_cache_free(qeth_qdio_outbuf_cache, f); 388 kmem_cache_free(qeth_qdio_outbuf_cache, f);
389 } else { 389 } else {
@@ -415,13 +415,12 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
415 buffer = (struct qeth_qdio_out_buffer *) aob->user1; 415 buffer = (struct qeth_qdio_out_buffer *) aob->user1;
416 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); 416 QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
417 417
418 BUG_ON(buffer == NULL);
419
420 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, 418 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
421 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { 419 QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
422 notification = TX_NOTIFY_OK; 420 notification = TX_NOTIFY_OK;
423 } else { 421 } else {
424 BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING); 422 WARN_ON_ONCE(atomic_read(&buffer->state) !=
423 QETH_QDIO_BUF_PENDING);
425 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); 424 atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
426 notification = TX_NOTIFY_DELAYED_OK; 425 notification = TX_NOTIFY_DELAYED_OK;
427 } 426 }
@@ -1131,7 +1130,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1131 notify_general_error = 1; 1130 notify_general_error = 1;
1132 1131
1133 /* release may never happen from within CQ tasklet scope */ 1132 /* release may never happen from within CQ tasklet scope */
1134 BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 1133 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1135 1134
1136 skb = skb_dequeue(&buf->skb_list); 1135 skb = skb_dequeue(&buf->skb_list);
1137 while (skb) { 1136 while (skb) {
@@ -2280,7 +2279,6 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2280 unsigned long data) 2279 unsigned long data)
2281{ 2280{
2282 struct qeth_cmd_buffer *iob; 2281 struct qeth_cmd_buffer *iob;
2283 int rc = 0;
2284 2282
2285 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); 2283 QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
2286 2284
@@ -2296,7 +2294,7 @@ static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2296 iob->rc = -EMLINK; 2294 iob->rc = -EMLINK;
2297 } 2295 }
2298 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); 2296 QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
2299 return rc; 2297 return 0;
2300} 2298}
2301 2299
2302static int qeth_ulp_setup(struct qeth_card *card) 2300static int qeth_ulp_setup(struct qeth_card *card)
@@ -2401,7 +2399,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2401 card->qdio.out_qs[i]->queue_no = i; 2399 card->qdio.out_qs[i]->queue_no = i;
2402 /* give outbound qeth_qdio_buffers their qdio_buffers */ 2400 /* give outbound qeth_qdio_buffers their qdio_buffers */
2403 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2401 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2404 BUG_ON(card->qdio.out_qs[i]->bufs[j] != NULL); 2402 WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
2405 if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) 2403 if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
2406 goto out_freeoutqbufs; 2404 goto out_freeoutqbufs;
2407 } 2405 }
@@ -2969,9 +2967,6 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
2969 } else 2967 } else
2970 QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" 2968 QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
2971 "\n", dev_name(&card->gdev->dev)); 2969 "\n", dev_name(&card->gdev->dev));
2972 QETH_DBF_TEXT(SETUP, 2, "suppenbl");
2973 QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
2974 QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
2975 return 0; 2970 return 0;
2976} 2971}
2977 2972
@@ -3569,7 +3564,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3569 if (queue->bufstates && 3564 if (queue->bufstates &&
3570 (queue->bufstates[bidx].flags & 3565 (queue->bufstates[bidx].flags &
3571 QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { 3566 QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
3572 BUG_ON(card->options.cq != QETH_CQ_ENABLED); 3567 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
3573 3568
3574 if (atomic_cmpxchg(&buffer->state, 3569 if (atomic_cmpxchg(&buffer->state,
3575 QETH_QDIO_BUF_PRIMED, 3570 QETH_QDIO_BUF_PRIMED,
@@ -3583,7 +3578,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3583 QETH_CARD_TEXT(queue->card, 5, "aob"); 3578 QETH_CARD_TEXT(queue->card, 5, "aob");
3584 QETH_CARD_TEXT_(queue->card, 5, "%lx", 3579 QETH_CARD_TEXT_(queue->card, 5, "%lx",
3585 virt_to_phys(buffer->aob)); 3580 virt_to_phys(buffer->aob));
3586 BUG_ON(bidx < 0 || bidx >= QDIO_MAX_BUFFERS_PER_Q);
3587 if (qeth_init_qdio_out_buf(queue, bidx)) { 3581 if (qeth_init_qdio_out_buf(queue, bidx)) {
3588 QETH_CARD_TEXT(card, 2, "outofbuf"); 3582 QETH_CARD_TEXT(card, 2, "outofbuf");
3589 qeth_schedule_recovery(card); 3583 qeth_schedule_recovery(card);
@@ -4731,6 +4725,19 @@ static void qeth_core_free_card(struct qeth_card *card)
4731 kfree(card); 4725 kfree(card);
4732} 4726}
4733 4727
4728void qeth_trace_features(struct qeth_card *card)
4729{
4730 QETH_CARD_TEXT(card, 2, "features");
4731 QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.supported_funcs);
4732 QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa4.enabled_funcs);
4733 QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.supported_funcs);
4734 QETH_CARD_TEXT_(card, 2, "%x", card->options.ipa6.enabled_funcs);
4735 QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.supported_funcs);
4736 QETH_CARD_TEXT_(card, 2, "%x", card->options.adp.enabled_funcs);
4737 QETH_CARD_TEXT_(card, 2, "%x", card->info.diagass_support);
4738}
4739EXPORT_SYMBOL_GPL(qeth_trace_features);
4740
4734static struct ccw_device_id qeth_ids[] = { 4741static struct ccw_device_id qeth_ids[] = {
4735 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), 4742 {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4736 .driver_info = QETH_CARD_TYPE_OSD}, 4743 .driver_info = QETH_CARD_TYPE_OSD},
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index fddb62654b6a..73195553f84b 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -411,7 +411,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
411 unsigned int len; 411 unsigned int len;
412 412
413 *done = 0; 413 *done = 0;
414 BUG_ON(!budget); 414 WARN_ON_ONCE(!budget);
415 while (budget) { 415 while (budget) {
416 skb = qeth_core_get_next_skb(card, 416 skb = qeth_core_get_next_skb(card,
417 &card->qdio.in_q->bufs[card->rx.b_index], 417 &card->qdio.in_q->bufs[card->rx.b_index],
@@ -973,7 +973,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
973 int rc = 0; 973 int rc = 0;
974 enum qeth_card_states recover_flag; 974 enum qeth_card_states recover_flag;
975 975
976 BUG_ON(!card);
977 mutex_lock(&card->discipline_mutex); 976 mutex_lock(&card->discipline_mutex);
978 mutex_lock(&card->conf_mutex); 977 mutex_lock(&card->conf_mutex);
979 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 978 QETH_DBF_TEXT(SETUP, 2, "setonlin");
@@ -986,6 +985,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
986 rc = -ENODEV; 985 rc = -ENODEV;
987 goto out_remove; 986 goto out_remove;
988 } 987 }
988 qeth_trace_features(card);
989 989
990 if (!card->dev && qeth_l2_setup_netdev(card)) { 990 if (!card->dev && qeth_l2_setup_netdev(card)) {
991 rc = -ENODEV; 991 rc = -ENODEV;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 5ba390658498..6e5eef01e667 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1939,7 +1939,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
1939 __u16 magic; 1939 __u16 magic;
1940 1940
1941 *done = 0; 1941 *done = 0;
1942 BUG_ON(!budget); 1942 WARN_ON_ONCE(!budget);
1943 while (budget) { 1943 while (budget) {
1944 skb = qeth_core_get_next_skb(card, 1944 skb = qeth_core_get_next_skb(card,
1945 &card->qdio.in_q->bufs[card->rx.b_index], 1945 &card->qdio.in_q->bufs[card->rx.b_index],
@@ -3334,7 +3334,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3334 int rc = 0; 3334 int rc = 0;
3335 enum qeth_card_states recover_flag; 3335 enum qeth_card_states recover_flag;
3336 3336
3337 BUG_ON(!card);
3338 mutex_lock(&card->discipline_mutex); 3337 mutex_lock(&card->discipline_mutex);
3339 mutex_lock(&card->conf_mutex); 3338 mutex_lock(&card->conf_mutex);
3340 QETH_DBF_TEXT(SETUP, 2, "setonlin"); 3339 QETH_DBF_TEXT(SETUP, 2, "setonlin");
@@ -3347,6 +3346,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3347 rc = -ENODEV; 3346 rc = -ENODEV;
3348 goto out_remove; 3347 goto out_remove;
3349 } 3348 }
3349 qeth_trace_features(card);
3350 3350
3351 if (!card->dev && qeth_l3_setup_netdev(card)) { 3351 if (!card->dev && qeth_l3_setup_netdev(card)) {
3352 rc = -ENODEV; 3352 rc = -ENODEV;
@@ -3714,9 +3714,9 @@ static void qeth_l3_unregister_notifiers(void)
3714{ 3714{
3715 3715
3716 QETH_DBF_TEXT(SETUP, 5, "unregnot"); 3716 QETH_DBF_TEXT(SETUP, 5, "unregnot");
3717 BUG_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); 3717 WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier));
3718#ifdef CONFIG_QETH_IPV6 3718#ifdef CONFIG_QETH_IPV6
3719 BUG_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); 3719 WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier));
3720#endif /* QETH_IPV6 */ 3720#endif /* QETH_IPV6 */
3721} 3721}
3722 3722
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index 266aa1648a02..19396dc4ee47 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -37,6 +37,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
37 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) }, 37 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
38 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) }, 38 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
39 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) }, 39 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
40 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) },
40 { 0, }, 41 { 0, },
41}; 42};
42MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); 43MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index e9d2ca11283b..95c33a05f434 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -4,6 +4,7 @@
4 * 4 *
5 * Copyright 2005, Broadcom Corporation 5 * Copyright 2005, Broadcom Corporation
6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch> 6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7 * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
7 * 8 *
8 * Licensed under the GNU/GPL. See COPYING for details. 9 * Licensed under the GNU/GPL. See COPYING for details.
9 */ 10 */
@@ -12,6 +13,7 @@
12#include <linux/ssb/ssb_regs.h> 13#include <linux/ssb/ssb_regs.h>
13#include <linux/export.h> 14#include <linux/export.h>
14#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/bcm47xx_wdt.h>
15 17
16#include "ssb_private.h" 18#include "ssb_private.h"
17 19
@@ -280,6 +282,69 @@ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
280 cc->fast_pwrup_delay = tmp; 282 cc->fast_pwrup_delay = tmp;
281} 283}
282 284
285static u32 ssb_chipco_alp_clock(struct ssb_chipcommon *cc)
286{
287 if (cc->capabilities & SSB_CHIPCO_CAP_PMU)
288 return ssb_pmu_get_alp_clock(cc);
289
290 return 20000000;
291}
292
293static u32 ssb_chipco_watchdog_get_max_timer(struct ssb_chipcommon *cc)
294{
295 u32 nb;
296
297 if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
298 if (cc->dev->id.revision < 26)
299 nb = 16;
300 else
301 nb = (cc->dev->id.revision >= 37) ? 32 : 24;
302 } else {
303 nb = 28;
304 }
305 if (nb == 32)
306 return 0xffffffff;
307 else
308 return (1 << nb) - 1;
309}
310
311u32 ssb_chipco_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks)
312{
313 struct ssb_chipcommon *cc = bcm47xx_wdt_get_drvdata(wdt);
314
315 if (cc->dev->bus->bustype != SSB_BUSTYPE_SSB)
316 return 0;
317
318 return ssb_chipco_watchdog_timer_set(cc, ticks);
319}
320
321u32 ssb_chipco_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms)
322{
323 struct ssb_chipcommon *cc = bcm47xx_wdt_get_drvdata(wdt);
324 u32 ticks;
325
326 if (cc->dev->bus->bustype != SSB_BUSTYPE_SSB)
327 return 0;
328
329 ticks = ssb_chipco_watchdog_timer_set(cc, cc->ticks_per_ms * ms);
330 return ticks / cc->ticks_per_ms;
331}
332
333static int ssb_chipco_watchdog_ticks_per_ms(struct ssb_chipcommon *cc)
334{
335 struct ssb_bus *bus = cc->dev->bus;
336
337 if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
338 /* based on 32KHz ILP clock */
339 return 32;
340 } else {
341 if (cc->dev->id.revision < 18)
342 return ssb_clockspeed(bus) / 1000;
343 else
344 return ssb_chipco_alp_clock(cc) / 1000;
345 }
346}
347
283void ssb_chipcommon_init(struct ssb_chipcommon *cc) 348void ssb_chipcommon_init(struct ssb_chipcommon *cc)
284{ 349{
285 if (!cc->dev) 350 if (!cc->dev)
@@ -297,6 +362,11 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
297 chipco_powercontrol_init(cc); 362 chipco_powercontrol_init(cc);
298 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); 363 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
299 calc_fast_powerup_delay(cc); 364 calc_fast_powerup_delay(cc);
365
366 if (cc->dev->bus->bustype == SSB_BUSTYPE_SSB) {
367 cc->ticks_per_ms = ssb_chipco_watchdog_ticks_per_ms(cc);
368 cc->max_timer_ms = ssb_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
369 }
300} 370}
301 371
302void ssb_chipco_suspend(struct ssb_chipcommon *cc) 372void ssb_chipco_suspend(struct ssb_chipcommon *cc)
@@ -395,10 +465,27 @@ void ssb_chipco_timing_init(struct ssb_chipcommon *cc,
395} 465}
396 466
397/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */ 467/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
398void ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks) 468u32 ssb_chipco_watchdog_timer_set(struct ssb_chipcommon *cc, u32 ticks)
399{ 469{
400 /* instant NMI */ 470 u32 maxt;
401 chipco_write32(cc, SSB_CHIPCO_WATCHDOG, ticks); 471 enum ssb_clkmode clkmode;
472
473 maxt = ssb_chipco_watchdog_get_max_timer(cc);
474 if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
475 if (ticks == 1)
476 ticks = 2;
477 else if (ticks > maxt)
478 ticks = maxt;
479 chipco_write32(cc, SSB_CHIPCO_PMU_WATCHDOG, ticks);
480 } else {
481 clkmode = ticks ? SSB_CLKMODE_FAST : SSB_CLKMODE_DYNAMIC;
482 ssb_chipco_set_clockmode(cc, clkmode);
483 if (ticks > maxt)
484 ticks = maxt;
485 /* instant NMI */
486 chipco_write32(cc, SSB_CHIPCO_WATCHDOG, ticks);
487 }
488 return ticks;
402} 489}
403 490
404void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value) 491void ssb_chipco_irq_mask(struct ssb_chipcommon *cc, u32 mask, u32 value)
@@ -473,12 +560,7 @@ int ssb_chipco_serial_init(struct ssb_chipcommon *cc,
473 chipco_read32(cc, SSB_CHIPCO_CORECTL) 560 chipco_read32(cc, SSB_CHIPCO_CORECTL)
474 | SSB_CHIPCO_CORECTL_UARTCLK0); 561 | SSB_CHIPCO_CORECTL_UARTCLK0);
475 } else if ((ccrev >= 11) && (ccrev != 15)) { 562 } else if ((ccrev >= 11) && (ccrev != 15)) {
476 /* Fixed ALP clock */ 563 baud_base = ssb_chipco_alp_clock(cc);
477 baud_base = 20000000;
478 if (cc->capabilities & SSB_CHIPCO_CAP_PMU) {
479 /* FIXME: baud_base is different for devices with a PMU */
480 SSB_WARN_ON(1);
481 }
482 div = 1; 564 div = 1;
483 if (ccrev >= 21) { 565 if (ccrev >= 21) {
484 /* Turn off UART clock before switching clocksource. */ 566 /* Turn off UART clock before switching clocksource. */
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index b58fef780ea0..a43415a7fbed 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -346,6 +346,8 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
346 chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, 0x380005C0); 346 chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, 0x380005C0);
347 } 347 }
348 break; 348 break;
349 case 43222:
350 break;
349 default: 351 default:
350 ssb_printk(KERN_ERR PFX 352 ssb_printk(KERN_ERR PFX
351 "ERROR: PLL init unknown for device %04X\n", 353 "ERROR: PLL init unknown for device %04X\n",
@@ -434,6 +436,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
434 min_msk = 0xCBB; 436 min_msk = 0xCBB;
435 break; 437 break;
436 case 0x4322: 438 case 0x4322:
439 case 43222:
437 /* We keep the default settings: 440 /* We keep the default settings:
438 * min_msk = 0xCBB 441 * min_msk = 0xCBB
439 * max_msk = 0x7FFFF 442 * max_msk = 0x7FFFF
@@ -615,6 +618,33 @@ void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on)
615EXPORT_SYMBOL(ssb_pmu_set_ldo_voltage); 618EXPORT_SYMBOL(ssb_pmu_set_ldo_voltage);
616EXPORT_SYMBOL(ssb_pmu_set_ldo_paref); 619EXPORT_SYMBOL(ssb_pmu_set_ldo_paref);
617 620
621static u32 ssb_pmu_get_alp_clock_clk0(struct ssb_chipcommon *cc)
622{
623 u32 crystalfreq;
624 const struct pmu0_plltab_entry *e = NULL;
625
626 crystalfreq = chipco_read32(cc, SSB_CHIPCO_PMU_CTL) &
627 SSB_CHIPCO_PMU_CTL_XTALFREQ >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT;
628 e = pmu0_plltab_find_entry(crystalfreq);
629 BUG_ON(!e);
630 return e->freq * 1000;
631}
632
633u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
634{
635 struct ssb_bus *bus = cc->dev->bus;
636
637 switch (bus->chip_id) {
638 case 0x5354:
639 ssb_pmu_get_alp_clock_clk0(cc);
640 default:
641 ssb_printk(KERN_ERR PFX
642 "ERROR: PMU alp clock unknown for device %04X\n",
643 bus->chip_id);
644 return 0;
645 }
646}
647
618u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc) 648u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc)
619{ 649{
620 struct ssb_bus *bus = cc->dev->bus; 650 struct ssb_bus *bus = cc->dev->bus;
diff --git a/drivers/ssb/driver_extif.c b/drivers/ssb/driver_extif.c
index dc47f30e9cf7..553227a3062d 100644
--- a/drivers/ssb/driver_extif.c
+++ b/drivers/ssb/driver_extif.c
@@ -112,10 +112,30 @@ void ssb_extif_get_clockcontrol(struct ssb_extif *extif,
112 *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB); 112 *m = extif_read32(extif, SSB_EXTIF_CLOCK_SB);
113} 113}
114 114
115void ssb_extif_watchdog_timer_set(struct ssb_extif *extif, 115u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks)
116 u32 ticks)
117{ 116{
117 struct ssb_extif *extif = bcm47xx_wdt_get_drvdata(wdt);
118
119 return ssb_extif_watchdog_timer_set(extif, ticks);
120}
121
122u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms)
123{
124 struct ssb_extif *extif = bcm47xx_wdt_get_drvdata(wdt);
125 u32 ticks = (SSB_EXTIF_WATCHDOG_CLK / 1000) * ms;
126
127 ticks = ssb_extif_watchdog_timer_set(extif, ticks);
128
129 return (ticks * 1000) / SSB_EXTIF_WATCHDOG_CLK;
130}
131
132u32 ssb_extif_watchdog_timer_set(struct ssb_extif *extif, u32 ticks)
133{
134 if (ticks > SSB_EXTIF_WATCHDOG_MAX_TIMER)
135 ticks = SSB_EXTIF_WATCHDOG_MAX_TIMER;
118 extif_write32(extif, SSB_EXTIF_WATCHDOG, ticks); 136 extif_write32(extif, SSB_EXTIF_WATCHDOG, ticks);
137
138 return ticks;
119} 139}
120 140
121u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask) 141u32 ssb_extif_gpio_in(struct ssb_extif *extif, u32 mask)
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index c6250867a95d..5bd05b136d22 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -178,9 +178,9 @@ static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
178{ 178{
179 struct ssb_bus *bus = mcore->dev->bus; 179 struct ssb_bus *bus = mcore->dev->bus;
180 180
181 if (bus->extif.dev) 181 if (ssb_extif_available(&bus->extif))
182 mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports); 182 mcore->nr_serial_ports = ssb_extif_serial_init(&bus->extif, mcore->serial_ports);
183 else if (bus->chipco.dev) 183 else if (ssb_chipco_available(&bus->chipco))
184 mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports); 184 mcore->nr_serial_ports = ssb_chipco_serial_init(&bus->chipco, mcore->serial_ports);
185 else 185 else
186 mcore->nr_serial_ports = 0; 186 mcore->nr_serial_ports = 0;
@@ -191,10 +191,11 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
191 struct ssb_bus *bus = mcore->dev->bus; 191 struct ssb_bus *bus = mcore->dev->bus;
192 192
193 /* When there is no chipcommon on the bus there is 4MB flash */ 193 /* When there is no chipcommon on the bus there is 4MB flash */
194 if (!bus->chipco.dev) { 194 if (!ssb_chipco_available(&bus->chipco)) {
195 mcore->flash_buswidth = 2; 195 mcore->pflash.present = true;
196 mcore->flash_window = SSB_FLASH1; 196 mcore->pflash.buswidth = 2;
197 mcore->flash_window_size = SSB_FLASH1_SZ; 197 mcore->pflash.window = SSB_FLASH1;
198 mcore->pflash.window_size = SSB_FLASH1_SZ;
198 return; 199 return;
199 } 200 }
200 201
@@ -206,13 +207,14 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
206 break; 207 break;
207 case SSB_CHIPCO_FLASHT_PARA: 208 case SSB_CHIPCO_FLASHT_PARA:
208 pr_debug("Found parallel flash\n"); 209 pr_debug("Found parallel flash\n");
209 mcore->flash_window = SSB_FLASH2; 210 mcore->pflash.present = true;
210 mcore->flash_window_size = SSB_FLASH2_SZ; 211 mcore->pflash.window = SSB_FLASH2;
212 mcore->pflash.window_size = SSB_FLASH2_SZ;
211 if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG) 213 if ((ssb_read32(bus->chipco.dev, SSB_CHIPCO_FLASH_CFG)
212 & SSB_CHIPCO_CFG_DS16) == 0) 214 & SSB_CHIPCO_CFG_DS16) == 0)
213 mcore->flash_buswidth = 1; 215 mcore->pflash.buswidth = 1;
214 else 216 else
215 mcore->flash_buswidth = 2; 217 mcore->pflash.buswidth = 2;
216 break; 218 break;
217 } 219 }
218} 220}
@@ -225,9 +227,9 @@ u32 ssb_cpu_clock(struct ssb_mipscore *mcore)
225 if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU) 227 if (bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU)
226 return ssb_pmu_get_cpu_clock(&bus->chipco); 228 return ssb_pmu_get_cpu_clock(&bus->chipco);
227 229
228 if (bus->extif.dev) { 230 if (ssb_extif_available(&bus->extif)) {
229 ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m); 231 ssb_extif_get_clockcontrol(&bus->extif, &pll_type, &n, &m);
230 } else if (bus->chipco.dev) { 232 } else if (ssb_chipco_available(&bus->chipco)) {
231 ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m); 233 ssb_chipco_get_clockcpu(&bus->chipco, &pll_type, &n, &m);
232 } else 234 } else
233 return 0; 235 return 0;
@@ -263,9 +265,9 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
263 hz = 100000000; 265 hz = 100000000;
264 ns = 1000000000 / hz; 266 ns = 1000000000 / hz;
265 267
266 if (bus->extif.dev) 268 if (ssb_extif_available(&bus->extif))
267 ssb_extif_timing_init(&bus->extif, ns); 269 ssb_extif_timing_init(&bus->extif, ns);
268 else if (bus->chipco.dev) 270 else if (ssb_chipco_available(&bus->chipco))
269 ssb_chipco_timing_init(&bus->chipco, ns); 271 ssb_chipco_timing_init(&bus->chipco, ns);
270 272
271 /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */ 273 /* Assign IRQs to all cores on the bus, start with irq line 2, because serial usually takes 1 */
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index 9ef124f9ee2d..bb18d76f9f2c 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -4,11 +4,13 @@
4 * 4 *
5 * Copyright 2005-2008, Broadcom Corporation 5 * Copyright 2005-2008, Broadcom Corporation
6 * Copyright 2006-2008, Michael Buesch <m@bues.ch> 6 * Copyright 2006-2008, Michael Buesch <m@bues.ch>
7 * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
7 * 8 *
8 * Licensed under the GNU/GPL. See COPYING for details. 9 * Licensed under the GNU/GPL. See COPYING for details.
9 */ 10 */
10 11
11#include <linux/export.h> 12#include <linux/export.h>
13#include <linux/platform_device.h>
12#include <linux/ssb/ssb.h> 14#include <linux/ssb/ssb.h>
13#include <linux/ssb/ssb_embedded.h> 15#include <linux/ssb/ssb_embedded.h>
14#include <linux/ssb/ssb_driver_pci.h> 16#include <linux/ssb/ssb_driver_pci.h>
@@ -32,6 +34,39 @@ int ssb_watchdog_timer_set(struct ssb_bus *bus, u32 ticks)
32} 34}
33EXPORT_SYMBOL(ssb_watchdog_timer_set); 35EXPORT_SYMBOL(ssb_watchdog_timer_set);
34 36
37int ssb_watchdog_register(struct ssb_bus *bus)
38{
39 struct bcm47xx_wdt wdt = {};
40 struct platform_device *pdev;
41
42 if (ssb_chipco_available(&bus->chipco)) {
43 wdt.driver_data = &bus->chipco;
44 wdt.timer_set = ssb_chipco_watchdog_timer_set_wdt;
45 wdt.timer_set_ms = ssb_chipco_watchdog_timer_set_ms;
46 wdt.max_timer_ms = bus->chipco.max_timer_ms;
47 } else if (ssb_extif_available(&bus->extif)) {
48 wdt.driver_data = &bus->extif;
49 wdt.timer_set = ssb_extif_watchdog_timer_set_wdt;
50 wdt.timer_set_ms = ssb_extif_watchdog_timer_set_ms;
51 wdt.max_timer_ms = SSB_EXTIF_WATCHDOG_MAX_TIMER_MS;
52 } else {
53 return -ENODEV;
54 }
55
56 pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
57 bus->busnumber, &wdt,
58 sizeof(wdt));
59 if (IS_ERR(pdev)) {
60 ssb_dprintk(KERN_INFO PFX
61 "can not register watchdog device, err: %li\n",
62 PTR_ERR(pdev));
63 return PTR_ERR(pdev);
64 }
65
66 bus->watchdog = pdev;
67 return 0;
68}
69
35u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask) 70u32 ssb_gpio_in(struct ssb_bus *bus, u32 mask)
36{ 71{
37 unsigned long flags; 72 unsigned long flags;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index df0f145c22fc..6e0daaa0e04b 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/platform_device.h>
16#include <linux/ssb/ssb.h> 17#include <linux/ssb/ssb.h>
17#include <linux/ssb/ssb_regs.h> 18#include <linux/ssb/ssb_regs.h>
18#include <linux/ssb/ssb_driver_gige.h> 19#include <linux/ssb/ssb_driver_gige.h>
@@ -433,6 +434,11 @@ static void ssb_devices_unregister(struct ssb_bus *bus)
433 if (sdev->dev) 434 if (sdev->dev)
434 device_unregister(sdev->dev); 435 device_unregister(sdev->dev);
435 } 436 }
437
438#ifdef CONFIG_SSB_EMBEDDED
439 if (bus->bustype == SSB_BUSTYPE_SSB)
440 platform_device_unregister(bus->watchdog);
441#endif
436} 442}
437 443
438void ssb_bus_unregister(struct ssb_bus *bus) 444void ssb_bus_unregister(struct ssb_bus *bus)
@@ -561,6 +567,8 @@ static int __devinit ssb_attach_queued_buses(void)
561 if (err) 567 if (err)
562 goto error; 568 goto error;
563 ssb_pcicore_init(&bus->pcicore); 569 ssb_pcicore_init(&bus->pcicore);
570 if (bus->bustype == SSB_BUSTYPE_SSB)
571 ssb_watchdog_register(bus);
564 ssb_bus_may_powerdown(bus); 572 ssb_bus_may_powerdown(bus);
565 573
566 err = ssb_devices_register(bus); 574 err = ssb_devices_register(bus);
@@ -1118,8 +1126,7 @@ static u32 ssb_tmslow_reject_bitmask(struct ssb_device *dev)
1118 case SSB_IDLOW_SSBREV_27: /* same here */ 1126 case SSB_IDLOW_SSBREV_27: /* same here */
1119 return SSB_TMSLOW_REJECT; /* this is a guess */ 1127 return SSB_TMSLOW_REJECT; /* this is a guess */
1120 default: 1128 default:
1121 printk(KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev); 1129 WARN(1, KERN_INFO "ssb: Backplane Revision 0x%.8X\n", rev);
1122 WARN_ON(1);
1123 } 1130 }
1124 return (SSB_TMSLOW_REJECT | SSB_TMSLOW_REJECT_23); 1131 return (SSB_TMSLOW_REJECT | SSB_TMSLOW_REJECT_23);
1125} 1132}
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index a305550b4b65..8942db1d855a 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/ssb/ssb.h> 4#include <linux/ssb/ssb.h>
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/bcm47xx_wdt.h>
6 7
7 8
8#define PFX "ssb: " 9#define PFX "ssb: "
@@ -210,5 +211,35 @@ static inline void b43_pci_ssb_bridge_exit(void)
210/* driver_chipcommon_pmu.c */ 211/* driver_chipcommon_pmu.c */
211extern u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc); 212extern u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc);
212extern u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc); 213extern u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc);
214extern u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc);
215
216extern u32 ssb_chipco_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt,
217 u32 ticks);
218extern u32 ssb_chipco_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms);
219
220#ifdef CONFIG_SSB_DRIVER_EXTIF
221extern u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt, u32 ticks);
222extern u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt, u32 ms);
223#else
224static inline u32 ssb_extif_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt,
225 u32 ticks)
226{
227 return 0;
228}
229static inline u32 ssb_extif_watchdog_timer_set_ms(struct bcm47xx_wdt *wdt,
230 u32 ms)
231{
232 return 0;
233}
234#endif
235
236#ifdef CONFIG_SSB_EMBEDDED
237extern int ssb_watchdog_register(struct ssb_bus *bus);
238#else /* CONFIG_SSB_EMBEDDED */
239static inline int ssb_watchdog_register(struct ssb_bus *bus)
240{
241 return 0;
242}
243#endif /* CONFIG_SSB_EMBEDDED */
213 244
214#endif /* LINUX_SSB_PRIVATE_H_ */ 245#endif /* LINUX_SSB_PRIVATE_H_ */
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7f93f34b7f91..ebd08b21b234 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -30,9 +30,10 @@
30 30
31#include "vhost.h" 31#include "vhost.h"
32 32
33static int experimental_zcopytx; 33static int experimental_zcopytx = 1;
34module_param(experimental_zcopytx, int, 0444); 34module_param(experimental_zcopytx, int, 0444);
35MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX"); 35MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
36 " 1 -Enable; 0 - Disable");
36 37
37/* Max number of bytes transferred before requeueing the job. 38/* Max number of bytes transferred before requeueing the job.
38 * Using this limit prevents one virtqueue from starving others. */ 39 * Using this limit prevents one virtqueue from starving others. */
@@ -42,6 +43,21 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX");
42#define VHOST_MAX_PEND 128 43#define VHOST_MAX_PEND 128
43#define VHOST_GOODCOPY_LEN 256 44#define VHOST_GOODCOPY_LEN 256
44 45
46/*
47 * For transmit, used buffer len is unused; we override it to track buffer
48 * status internally; used for zerocopy tx only.
49 */
50/* Lower device DMA failed */
51#define VHOST_DMA_FAILED_LEN 3
52/* Lower device DMA done */
53#define VHOST_DMA_DONE_LEN 2
54/* Lower device DMA in progress */
55#define VHOST_DMA_IN_PROGRESS 1
56/* Buffer unused */
57#define VHOST_DMA_CLEAR_LEN 0
58
59#define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
60
45enum { 61enum {
46 VHOST_NET_VQ_RX = 0, 62 VHOST_NET_VQ_RX = 0,
47 VHOST_NET_VQ_TX = 1, 63 VHOST_NET_VQ_TX = 1,
@@ -62,8 +78,39 @@ struct vhost_net {
62 * We only do this when socket buffer fills up. 78 * We only do this when socket buffer fills up.
63 * Protected by tx vq lock. */ 79 * Protected by tx vq lock. */
64 enum vhost_net_poll_state tx_poll_state; 80 enum vhost_net_poll_state tx_poll_state;
81 /* Number of TX recently submitted.
82 * Protected by tx vq lock. */
83 unsigned tx_packets;
84 /* Number of times zerocopy TX recently failed.
85 * Protected by tx vq lock. */
86 unsigned tx_zcopy_err;
87 /* Flush in progress. Protected by tx vq lock. */
88 bool tx_flush;
65}; 89};
66 90
91static void vhost_net_tx_packet(struct vhost_net *net)
92{
93 ++net->tx_packets;
94 if (net->tx_packets < 1024)
95 return;
96 net->tx_packets = 0;
97 net->tx_zcopy_err = 0;
98}
99
100static void vhost_net_tx_err(struct vhost_net *net)
101{
102 ++net->tx_zcopy_err;
103}
104
105static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
106{
107 /* TX flush waits for outstanding DMAs to be done.
108 * Don't start new DMAs.
109 */
110 return !net->tx_flush &&
111 net->tx_packets / 64 >= net->tx_zcopy_err;
112}
113
67static bool vhost_sock_zcopy(struct socket *sock) 114static bool vhost_sock_zcopy(struct socket *sock)
68{ 115{
69 return unlikely(experimental_zcopytx) && 116 return unlikely(experimental_zcopytx) &&
@@ -126,6 +173,55 @@ static void tx_poll_start(struct vhost_net *net, struct socket *sock)
126 net->tx_poll_state = VHOST_NET_POLL_STARTED; 173 net->tx_poll_state = VHOST_NET_POLL_STARTED;
127} 174}
128 175
176/* In case of DMA done not in order in lower device driver for some reason.
177 * upend_idx is used to track end of used idx, done_idx is used to track head
178 * of used idx. Once lower device DMA done contiguously, we will signal KVM
179 * guest used idx.
180 */
181static int vhost_zerocopy_signal_used(struct vhost_net *net,
182 struct vhost_virtqueue *vq)
183{
184 int i;
185 int j = 0;
186
187 for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
188 if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
189 vhost_net_tx_err(net);
190 if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
191 vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
192 vhost_add_used_and_signal(vq->dev, vq,
193 vq->heads[i].id, 0);
194 ++j;
195 } else
196 break;
197 }
198 if (j)
199 vq->done_idx = i;
200 return j;
201}
202
203static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
204{
205 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
206 struct vhost_virtqueue *vq = ubufs->vq;
207 int cnt = atomic_read(&ubufs->kref.refcount);
208
209 /*
210 * Trigger polling thread if guest stopped submitting new buffers:
211 * in this case, the refcount after decrement will eventually reach 1
212 * so here it is 2.
213 * We also trigger polling periodically after each 16 packets
214 * (the value 16 here is more or less arbitrary, it's tuned to trigger
215 * less than 10% of times).
216 */
217 if (cnt <= 2 || !(cnt % 16))
218 vhost_poll_queue(&vq->poll);
219 /* set len to mark this desc buffers done DMA */
220 vq->heads[ubuf->desc].len = success ?
221 VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
222 vhost_ubuf_put(ubufs);
223}
224
129/* Expects to be always run from workqueue - which acts as 225/* Expects to be always run from workqueue - which acts as
130 * read-size critical section for our kind of RCU. */ 226 * read-size critical section for our kind of RCU. */
131static void handle_tx(struct vhost_net *net) 227static void handle_tx(struct vhost_net *net)
@@ -146,7 +242,7 @@ static void handle_tx(struct vhost_net *net)
146 size_t hdr_size; 242 size_t hdr_size;
147 struct socket *sock; 243 struct socket *sock;
148 struct vhost_ubuf_ref *uninitialized_var(ubufs); 244 struct vhost_ubuf_ref *uninitialized_var(ubufs);
149 bool zcopy; 245 bool zcopy, zcopy_used;
150 246
151 /* TODO: check that we are running from vhost_worker? */ 247 /* TODO: check that we are running from vhost_worker? */
152 sock = rcu_dereference_check(vq->private_data, 1); 248 sock = rcu_dereference_check(vq->private_data, 1);
@@ -172,7 +268,7 @@ static void handle_tx(struct vhost_net *net)
172 for (;;) { 268 for (;;) {
173 /* Release DMAs done buffers first */ 269 /* Release DMAs done buffers first */
174 if (zcopy) 270 if (zcopy)
175 vhost_zerocopy_signal_used(vq); 271 vhost_zerocopy_signal_used(net, vq);
176 272
177 head = vhost_get_vq_desc(&net->dev, vq, vq->iov, 273 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
178 ARRAY_SIZE(vq->iov), 274 ARRAY_SIZE(vq->iov),
@@ -224,10 +320,14 @@ static void handle_tx(struct vhost_net *net)
224 iov_length(vq->hdr, s), hdr_size); 320 iov_length(vq->hdr, s), hdr_size);
225 break; 321 break;
226 } 322 }
323 zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
324 vq->upend_idx != vq->done_idx);
325
227 /* use msg_control to pass vhost zerocopy ubuf info to skb */ 326 /* use msg_control to pass vhost zerocopy ubuf info to skb */
228 if (zcopy) { 327 if (zcopy_used) {
229 vq->heads[vq->upend_idx].id = head; 328 vq->heads[vq->upend_idx].id = head;
230 if (len < VHOST_GOODCOPY_LEN) { 329 if (!vhost_net_tx_select_zcopy(net) ||
330 len < VHOST_GOODCOPY_LEN) {
231 /* copy don't need to wait for DMA done */ 331 /* copy don't need to wait for DMA done */
232 vq->heads[vq->upend_idx].len = 332 vq->heads[vq->upend_idx].len =
233 VHOST_DMA_DONE_LEN; 333 VHOST_DMA_DONE_LEN;
@@ -237,7 +337,8 @@ static void handle_tx(struct vhost_net *net)
237 } else { 337 } else {
238 struct ubuf_info *ubuf = &vq->ubuf_info[head]; 338 struct ubuf_info *ubuf = &vq->ubuf_info[head];
239 339
240 vq->heads[vq->upend_idx].len = len; 340 vq->heads[vq->upend_idx].len =
341 VHOST_DMA_IN_PROGRESS;
241 ubuf->callback = vhost_zerocopy_callback; 342 ubuf->callback = vhost_zerocopy_callback;
242 ubuf->ctx = vq->ubufs; 343 ubuf->ctx = vq->ubufs;
243 ubuf->desc = vq->upend_idx; 344 ubuf->desc = vq->upend_idx;
@@ -251,7 +352,7 @@ static void handle_tx(struct vhost_net *net)
251 /* TODO: Check specific error and bomb out unless ENOBUFS? */ 352 /* TODO: Check specific error and bomb out unless ENOBUFS? */
252 err = sock->ops->sendmsg(NULL, sock, &msg, len); 353 err = sock->ops->sendmsg(NULL, sock, &msg, len);
253 if (unlikely(err < 0)) { 354 if (unlikely(err < 0)) {
254 if (zcopy) { 355 if (zcopy_used) {
255 if (ubufs) 356 if (ubufs)
256 vhost_ubuf_put(ubufs); 357 vhost_ubuf_put(ubufs);
257 vq->upend_idx = ((unsigned)vq->upend_idx - 1) % 358 vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
@@ -265,11 +366,12 @@ static void handle_tx(struct vhost_net *net)
265 if (err != len) 366 if (err != len)
266 pr_debug("Truncated TX packet: " 367 pr_debug("Truncated TX packet: "
267 " len %d != %zd\n", err, len); 368 " len %d != %zd\n", err, len);
268 if (!zcopy) 369 if (!zcopy_used)
269 vhost_add_used_and_signal(&net->dev, vq, head, 0); 370 vhost_add_used_and_signal(&net->dev, vq, head, 0);
270 else 371 else
271 vhost_zerocopy_signal_used(vq); 372 vhost_zerocopy_signal_used(net, vq);
272 total_len += len; 373 total_len += len;
374 vhost_net_tx_packet(net);
273 if (unlikely(total_len >= VHOST_NET_WEIGHT)) { 375 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
274 vhost_poll_queue(&vq->poll); 376 vhost_poll_queue(&vq->poll);
275 break; 377 break;
@@ -587,6 +689,17 @@ static void vhost_net_flush(struct vhost_net *n)
587{ 689{
588 vhost_net_flush_vq(n, VHOST_NET_VQ_TX); 690 vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
589 vhost_net_flush_vq(n, VHOST_NET_VQ_RX); 691 vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
692 if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) {
693 mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
694 n->tx_flush = true;
695 mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
696 /* Wait for all lower device DMAs done. */
697 vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs);
698 mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
699 n->tx_flush = false;
700 kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref);
701 mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex);
702 }
590} 703}
591 704
592static int vhost_net_release(struct inode *inode, struct file *f) 705static int vhost_net_release(struct inode *inode, struct file *f)
@@ -597,6 +710,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
597 710
598 vhost_net_stop(n, &tx_sock, &rx_sock); 711 vhost_net_stop(n, &tx_sock, &rx_sock);
599 vhost_net_flush(n); 712 vhost_net_flush(n);
713 vhost_dev_stop(&n->dev);
600 vhost_dev_cleanup(&n->dev, false); 714 vhost_dev_cleanup(&n->dev, false);
601 if (tx_sock) 715 if (tx_sock)
602 fput(tx_sock->file); 716 fput(tx_sock->file);
@@ -722,6 +836,10 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
722 r = vhost_init_used(vq); 836 r = vhost_init_used(vq);
723 if (r) 837 if (r)
724 goto err_vq; 838 goto err_vq;
839
840 n->tx_packets = 0;
841 n->tx_zcopy_err = 0;
842 n->tx_flush = false;
725 } 843 }
726 844
727 mutex_unlock(&vq->mutex); 845 mutex_unlock(&vq->mutex);
@@ -729,7 +847,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
729 if (oldubufs) { 847 if (oldubufs) {
730 vhost_ubuf_put_and_wait(oldubufs); 848 vhost_ubuf_put_and_wait(oldubufs);
731 mutex_lock(&vq->mutex); 849 mutex_lock(&vq->mutex);
732 vhost_zerocopy_signal_used(vq); 850 vhost_zerocopy_signal_used(n, vq);
733 mutex_unlock(&vq->mutex); 851 mutex_unlock(&vq->mutex);
734 } 852 }
735 853
@@ -838,8 +956,11 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
838 return vhost_net_reset_owner(n); 956 return vhost_net_reset_owner(n);
839 default: 957 default:
840 mutex_lock(&n->dev.mutex); 958 mutex_lock(&n->dev.mutex);
841 r = vhost_dev_ioctl(&n->dev, ioctl, arg); 959 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
842 vhost_net_flush(n); 960 if (r == -ENOIOCTLCMD)
961 r = vhost_vring_ioctl(&n->dev, ioctl, argp);
962 else
963 vhost_net_flush(n);
843 mutex_unlock(&n->dev.mutex); 964 mutex_unlock(&n->dev.mutex);
844 return r; 965 return r;
845 } 966 }
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index aa31692064dd..79e7e4d45eb2 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -34,7 +34,6 @@
34#include <linux/ctype.h> 34#include <linux/ctype.h>
35#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/eventfd.h> 36#include <linux/eventfd.h>
37#include <linux/vhost.h>
38#include <linux/fs.h> 37#include <linux/fs.h>
39#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
40#include <asm/unaligned.h> 39#include <asm/unaligned.h>
@@ -415,14 +414,12 @@ static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd(
415{ 414{
416 struct tcm_vhost_cmd *tv_cmd; 415 struct tcm_vhost_cmd *tv_cmd;
417 struct tcm_vhost_nexus *tv_nexus; 416 struct tcm_vhost_nexus *tv_nexus;
418 struct se_session *se_sess;
419 417
420 tv_nexus = tv_tpg->tpg_nexus; 418 tv_nexus = tv_tpg->tpg_nexus;
421 if (!tv_nexus) { 419 if (!tv_nexus) {
422 pr_err("Unable to locate active struct tcm_vhost_nexus\n"); 420 pr_err("Unable to locate active struct tcm_vhost_nexus\n");
423 return ERR_PTR(-EIO); 421 return ERR_PTR(-EIO);
424 } 422 }
425 se_sess = tv_nexus->tvn_se_sess;
426 423
427 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); 424 tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC);
428 if (!tv_cmd) { 425 if (!tv_cmd) {
@@ -895,6 +892,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
895 vhost_scsi_clear_endpoint(s, &backend); 892 vhost_scsi_clear_endpoint(s, &backend);
896 } 893 }
897 894
895 vhost_dev_stop(&s->dev);
898 vhost_dev_cleanup(&s->dev, false); 896 vhost_dev_cleanup(&s->dev, false);
899 kfree(s); 897 kfree(s);
900 return 0; 898 return 0;
@@ -970,7 +968,10 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
970 return vhost_scsi_set_features(vs, features); 968 return vhost_scsi_set_features(vs, features);
971 default: 969 default:
972 mutex_lock(&vs->dev.mutex); 970 mutex_lock(&vs->dev.mutex);
973 r = vhost_dev_ioctl(&vs->dev, ioctl, arg); 971 r = vhost_dev_ioctl(&vs->dev, ioctl, argp);
972 /* TODO: flush backend after dev ioctl. */
973 if (r == -ENOIOCTLCMD)
974 r = vhost_vring_ioctl(&vs->dev, ioctl, argp);
974 mutex_unlock(&vs->dev.mutex); 975 mutex_unlock(&vs->dev.mutex);
975 return r; 976 return r;
976 } 977 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index dedaf81d8f36..34389f75fe65 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -26,10 +26,6 @@
26#include <linux/kthread.h> 26#include <linux/kthread.h>
27#include <linux/cgroup.h> 27#include <linux/cgroup.h>
28 28
29#include <linux/net.h>
30#include <linux/if_packet.h>
31#include <linux/if_arp.h>
32
33#include "vhost.h" 29#include "vhost.h"
34 30
35enum { 31enum {
@@ -414,28 +410,16 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
414 return 0; 410 return 0;
415} 411}
416 412
417/* In case of DMA done not in order in lower device driver for some reason. 413void vhost_dev_stop(struct vhost_dev *dev)
418 * upend_idx is used to track end of used idx, done_idx is used to track head
419 * of used idx. Once lower device DMA done contiguously, we will signal KVM
420 * guest used idx.
421 */
422int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq)
423{ 414{
424 int i; 415 int i;
425 int j = 0; 416
426 417 for (i = 0; i < dev->nvqs; ++i) {
427 for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) { 418 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
428 if ((vq->heads[i].len == VHOST_DMA_DONE_LEN)) { 419 vhost_poll_stop(&dev->vqs[i].poll);
429 vq->heads[i].len = VHOST_DMA_CLEAR_LEN; 420 vhost_poll_flush(&dev->vqs[i].poll);
430 vhost_add_used_and_signal(vq->dev, vq, 421 }
431 vq->heads[i].id, 0);
432 ++j;
433 } else
434 break;
435 } 422 }
436 if (j)
437 vq->done_idx = i;
438 return j;
439} 423}
440 424
441/* Caller should have device mutex if and only if locked is set */ 425/* Caller should have device mutex if and only if locked is set */
@@ -444,17 +428,6 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
444 int i; 428 int i;
445 429
446 for (i = 0; i < dev->nvqs; ++i) { 430 for (i = 0; i < dev->nvqs; ++i) {
447 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
448 vhost_poll_stop(&dev->vqs[i].poll);
449 vhost_poll_flush(&dev->vqs[i].poll);
450 }
451 /* Wait for all lower device DMAs done. */
452 if (dev->vqs[i].ubufs)
453 vhost_ubuf_put_and_wait(dev->vqs[i].ubufs);
454
455 /* Signal guest as appropriate. */
456 vhost_zerocopy_signal_used(&dev->vqs[i]);
457
458 if (dev->vqs[i].error_ctx) 431 if (dev->vqs[i].error_ctx)
459 eventfd_ctx_put(dev->vqs[i].error_ctx); 432 eventfd_ctx_put(dev->vqs[i].error_ctx);
460 if (dev->vqs[i].error) 433 if (dev->vqs[i].error)
@@ -634,7 +607,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
634 return 0; 607 return 0;
635} 608}
636 609
637static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) 610long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
638{ 611{
639 struct file *eventfp, *filep = NULL; 612 struct file *eventfp, *filep = NULL;
640 bool pollstart = false, pollstop = false; 613 bool pollstart = false, pollstop = false;
@@ -829,9 +802,8 @@ static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
829} 802}
830 803
831/* Caller must have device mutex */ 804/* Caller must have device mutex */
832long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg) 805long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
833{ 806{
834 void __user *argp = (void __user *)arg;
835 struct file *eventfp, *filep = NULL; 807 struct file *eventfp, *filep = NULL;
836 struct eventfd_ctx *ctx = NULL; 808 struct eventfd_ctx *ctx = NULL;
837 u64 p; 809 u64 p;
@@ -902,7 +874,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
902 fput(filep); 874 fput(filep);
903 break; 875 break;
904 default: 876 default:
905 r = vhost_set_vring(d, ioctl, argp); 877 r = -ENOIOCTLCMD;
906 break; 878 break;
907 } 879 }
908done: 880done:
@@ -1599,14 +1571,3 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
1599 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount)); 1571 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
1600 kfree(ubufs); 1572 kfree(ubufs);
1601} 1573}
1602
1603void vhost_zerocopy_callback(struct ubuf_info *ubuf)
1604{
1605 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
1606 struct vhost_virtqueue *vq = ubufs->vq;
1607
1608 vhost_poll_queue(&vq->poll);
1609 /* set len = 1 to mark this desc buffers done DMA */
1610 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
1611 kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
1612}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 1125af3d27d1..2639c58b23ab 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -7,17 +7,11 @@
7#include <linux/mutex.h> 7#include <linux/mutex.h>
8#include <linux/poll.h> 8#include <linux/poll.h>
9#include <linux/file.h> 9#include <linux/file.h>
10#include <linux/skbuff.h>
11#include <linux/uio.h> 10#include <linux/uio.h>
12#include <linux/virtio_config.h> 11#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h> 12#include <linux/virtio_ring.h>
14#include <linux/atomic.h> 13#include <linux/atomic.h>
15 14
16/* This is for zerocopy, used buffer len is set to 1 when lower device DMA
17 * done */
18#define VHOST_DMA_DONE_LEN 1
19#define VHOST_DMA_CLEAR_LEN 0
20
21struct vhost_device; 15struct vhost_device;
22 16
23struct vhost_work; 17struct vhost_work;
@@ -70,6 +64,8 @@ struct vhost_ubuf_ref *vhost_ubuf_alloc(struct vhost_virtqueue *, bool zcopy);
70void vhost_ubuf_put(struct vhost_ubuf_ref *); 64void vhost_ubuf_put(struct vhost_ubuf_ref *);
71void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *); 65void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *);
72 66
67struct ubuf_info;
68
73/* The virtqueue structure describes a queue attached to a device. */ 69/* The virtqueue structure describes a queue attached to a device. */
74struct vhost_virtqueue { 70struct vhost_virtqueue {
75 struct vhost_dev *dev; 71 struct vhost_dev *dev;
@@ -167,7 +163,9 @@ long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
167long vhost_dev_check_owner(struct vhost_dev *); 163long vhost_dev_check_owner(struct vhost_dev *);
168long vhost_dev_reset_owner(struct vhost_dev *); 164long vhost_dev_reset_owner(struct vhost_dev *);
169void vhost_dev_cleanup(struct vhost_dev *, bool locked); 165void vhost_dev_cleanup(struct vhost_dev *, bool locked);
170long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg); 166void vhost_dev_stop(struct vhost_dev *);
167long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
168long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
171int vhost_vq_access_ok(struct vhost_virtqueue *vq); 169int vhost_vq_access_ok(struct vhost_virtqueue *vq);
172int vhost_log_access_ok(struct vhost_dev *); 170int vhost_log_access_ok(struct vhost_dev *);
173 171
@@ -191,8 +189,6 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
191 189
192int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 190int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
193 unsigned int log_num, u64 len); 191 unsigned int log_num, u64 len);
194void vhost_zerocopy_callback(struct ubuf_info *);
195int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
196 192
197#define vq_err(vq, fmt, ...) do { \ 193#define vq_err(vq, fmt, ...) do { \
198 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ 194 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \